From 3bf696d6276a683542ed99ba9b81c3777d4a01df Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Tue, 7 May 2024 08:31:22 -0400 Subject: [PATCH 01/78] docs: Document Event Update struct (#2598) ## Relevant issue(s) Resolves #2597 ## Description Documents the Event Update struct. --- events/db_update.go | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/events/db_update.go b/events/db_update.go index a6865b8707..1d802d3e3a 100644 --- a/events/db_update.go +++ b/events/db_update.go @@ -22,12 +22,24 @@ type UpdateChannel = immutable.Option[Channel[Update]] // EmptyUpdateChannel is an empty UpdateChannel. var EmptyUpdateChannel = immutable.None[Channel[Update]]() -// UpdateEvent represents a new DAG node added to the append-only MerkleCRDT Clock graph -// of a document or sub-field. +// UpdateEvent represents a new DAG node added to the append-only composite MerkleCRDT Clock graph +// of a document. +// +// It must only contain public elements not protected by ACP. type Update struct { - DocID string - Cid cid.Cid + // DocID is the unique immutable identifier of the document that was updated. + DocID string + + // Cid is the id of the composite commit that formed this update in the DAG. + Cid cid.Cid + + // SchemaRoot is the root identifier of the schema that defined the shape of the document that was updated. SchemaRoot string - Block ipld.Node - Priority uint64 + + // Block is the contents of this composite commit, it contains the Cids of the field level commits that + // also formed this update. + Block ipld.Node + + // Priority is used to determine the order in which concurrent updates are applied. + Priority uint64 } From 81183f5b5835d4f9f326c536cb081f8fcb74a86e Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Tue, 7 May 2024 10:34:05 -0400 Subject: [PATCH 02/78] docs(i): Import docs from doc repo (#2596) ## Relevant issue(s) Resolves #2595 ## Description Imports the docs from doc repo (https://github.com/sourcenetwork/docs.source.network/tree/master/docs/defradb) and adds the 0.10.0 and 0.11.0 change logs. --- docs/website/BSL-License.md | 80 ++++ docs/website/README.md | 5 + docs/website/concepts/_category_.json | 5 + docs/website/concepts/ipfs.md | 31 ++ docs/website/concepts/libp2p.md | 24 ++ docs/website/getting-started.md | 353 +++++++++++++++++ docs/website/guides/_category_.json | 5 + docs/website/guides/akash-deployment.md | 145 +++++++ docs/website/guides/deployment-guide.md | 156 ++++++++ docs/website/guides/explain-systems.md | 171 +++++++++ docs/website/guides/merkle-crdt.md | 67 ++++ docs/website/guides/peer-to-peer.md | 144 +++++++ docs/website/guides/schema-migration.md | 286 ++++++++++++++ docs/website/guides/schema-relationship.md | 356 ++++++++++++++++++ docs/website/guides/time-traveling-queries.md | 60 +++ docs/website/references/_category_.json | 5 + docs/website/references/cli/_category_.json | 4 + docs/website/references/cli/defradb.md | 36 ++ docs/website/references/cli/defradb_client.md | 39 ++ .../references/cli/defradb_client_blocks.md | 28 ++ .../cli/defradb_client_blocks_get.md | 31 ++ .../references/cli/defradb_client_dump.md | 31 ++ .../references/cli/defradb_client_peerid.md | 31 ++ .../references/cli/defradb_client_ping.md | 31 ++ .../references/cli/defradb_client_query.md | 46 +++ .../references/cli/defradb_client_rpc.md | 34 ++ .../cli/defradb_client_rpc_addreplicator.md | 37 ++ .../cli/defradb_client_rpc_p2pcollection.md | 35 ++ .../defradb_client_rpc_p2pcollection_add.md | 36 ++ ...defradb_client_rpc_p2pcollection_getall.md | 36 ++ ...defradb_client_rpc_p2pcollection_remove.md | 36 ++ .../cli/defradb_client_rpc_replicator.md | 37 ++ .../defradb_client_rpc_replicator_delete.md | 37 ++ .../defradb_client_rpc_replicator_getall.md | 37 ++ .../cli/defradb_client_rpc_replicator_set.md | 39 ++ .../references/cli/defradb_client_schema.md | 33 ++ .../cli/defradb_client_schema_add.md | 47 +++ .../cli/defradb_client_schema_patch.md | 49 +++ docs/website/references/cli/defradb_init.md | 36 ++ .../references/cli/defradb_server-dump.md | 32 ++ docs/website/references/cli/defradb_start.md | 46 +++ .../website/references/cli/defradb_version.md | 33 ++ .../query-specification/_category_.json | 4 + .../aggregate-functions.md | 47 +++ .../references/query-specification/aliases.md | 51 +++ .../query-specification/collections.md | 9 + .../query-specification/database-api.md | 113 ++++++ .../query-specification/execution-flow.md | 43 +++ .../query-specification/filtering.md | 213 +++++++++++ .../query-specification/grouping.md | 83 ++++ .../limiting-and-pagination.md | 33 ++ .../query-specification/mutation-block.md | 156 ++++++++ .../query-specification/query-block.md | 9 + .../query-language-overview.md | 19 + .../query-specification/relationships.md | 89 +++++ .../sorting-and-ordering.md | 136 +++++++ docs/website/release notes/_category_.json | 5 + docs/website/release notes/v0.10.0.md | 45 +++ docs/website/release notes/v0.11.0.md | 41 ++ docs/website/release notes/v0.2.0.md | 86 +++++ docs/website/release notes/v0.2.1.md | 57 +++ docs/website/release notes/v0.3.0.md | 178 +++++++++ docs/website/release notes/v0.3.1.md | 94 +++++ docs/website/release notes/v0.4.0.md | 80 ++++ docs/website/release notes/v0.5.0.md | 144 +++++++ docs/website/release notes/v0.5.1.md | 91 +++++ docs/website/release notes/v0.6.0.md | 85 +++++ docs/website/release notes/v0.7.0.md | 74 ++++ docs/website/release notes/v0.8.0.md | 75 ++++ docs/website/release notes/v0.9.0.md | 78 ++++ 70 files changed, 4948 insertions(+) create mode 100644 docs/website/BSL-License.md create mode 100644 docs/website/README.md create mode 100644 docs/website/concepts/_category_.json create mode 100644 docs/website/concepts/ipfs.md create mode 100644 docs/website/concepts/libp2p.md create mode 100644 docs/website/getting-started.md create mode 100644 docs/website/guides/_category_.json create mode 100644 docs/website/guides/akash-deployment.md create mode 100644 docs/website/guides/deployment-guide.md create mode 100644 docs/website/guides/explain-systems.md create mode 100644 docs/website/guides/merkle-crdt.md create mode 100644 docs/website/guides/peer-to-peer.md create mode 100644 docs/website/guides/schema-migration.md create mode 100644 docs/website/guides/schema-relationship.md create mode 100644 docs/website/guides/time-traveling-queries.md create mode 100644 docs/website/references/_category_.json create mode 100644 docs/website/references/cli/_category_.json create mode 100644 docs/website/references/cli/defradb.md create mode 100644 docs/website/references/cli/defradb_client.md create mode 100644 docs/website/references/cli/defradb_client_blocks.md create mode 100644 docs/website/references/cli/defradb_client_blocks_get.md create mode 100644 docs/website/references/cli/defradb_client_dump.md create mode 100644 docs/website/references/cli/defradb_client_peerid.md create mode 100644 docs/website/references/cli/defradb_client_ping.md create mode 100644 docs/website/references/cli/defradb_client_query.md create mode 100644 docs/website/references/cli/defradb_client_rpc.md create mode 100644 docs/website/references/cli/defradb_client_rpc_addreplicator.md create mode 100644 docs/website/references/cli/defradb_client_rpc_p2pcollection.md create mode 100644 docs/website/references/cli/defradb_client_rpc_p2pcollection_add.md create mode 100644 docs/website/references/cli/defradb_client_rpc_p2pcollection_getall.md create mode 100644 docs/website/references/cli/defradb_client_rpc_p2pcollection_remove.md create mode 100644 docs/website/references/cli/defradb_client_rpc_replicator.md create mode 100644 docs/website/references/cli/defradb_client_rpc_replicator_delete.md create mode 100644 docs/website/references/cli/defradb_client_rpc_replicator_getall.md create mode 100644 docs/website/references/cli/defradb_client_rpc_replicator_set.md create mode 100644 docs/website/references/cli/defradb_client_schema.md create mode 100644 docs/website/references/cli/defradb_client_schema_add.md create mode 100644 docs/website/references/cli/defradb_client_schema_patch.md create mode 100644 docs/website/references/cli/defradb_init.md create mode 100644 docs/website/references/cli/defradb_server-dump.md create mode 100644 docs/website/references/cli/defradb_start.md create mode 100644 docs/website/references/cli/defradb_version.md create mode 100644 docs/website/references/query-specification/_category_.json create mode 100644 docs/website/references/query-specification/aggregate-functions.md create mode 100644 docs/website/references/query-specification/aliases.md create mode 100644 docs/website/references/query-specification/collections.md create mode 100644 docs/website/references/query-specification/database-api.md create mode 100644 docs/website/references/query-specification/execution-flow.md create mode 100644 docs/website/references/query-specification/filtering.md create mode 100644 docs/website/references/query-specification/grouping.md create mode 100644 docs/website/references/query-specification/limiting-and-pagination.md create mode 100644 docs/website/references/query-specification/mutation-block.md create mode 100644 docs/website/references/query-specification/query-block.md create mode 100644 docs/website/references/query-specification/query-language-overview.md create mode 100644 docs/website/references/query-specification/relationships.md create mode 100644 docs/website/references/query-specification/sorting-and-ordering.md create mode 100644 docs/website/release notes/_category_.json create mode 100644 docs/website/release notes/v0.10.0.md create mode 100644 docs/website/release notes/v0.11.0.md create mode 100644 docs/website/release notes/v0.2.0.md create mode 100644 docs/website/release notes/v0.2.1.md create mode 100644 docs/website/release notes/v0.3.0.md create mode 100644 docs/website/release notes/v0.3.1.md create mode 100644 docs/website/release notes/v0.4.0.md create mode 100644 docs/website/release notes/v0.5.0.md create mode 100644 docs/website/release notes/v0.5.1.md create mode 100644 docs/website/release notes/v0.6.0.md create mode 100644 docs/website/release notes/v0.7.0.md create mode 100644 docs/website/release notes/v0.8.0.md create mode 100644 docs/website/release notes/v0.9.0.md diff --git a/docs/website/BSL-License.md b/docs/website/BSL-License.md new file mode 100644 index 0000000000..a5018a4ef8 --- /dev/null +++ b/docs/website/BSL-License.md @@ -0,0 +1,80 @@ +--- +sidebar_position: 7 +title: BSL 1.1 License +--- + +We are pleased to announce that Source is transitioning its source-code license from Apache 2.0 to the Business Source License version (BSL 1.1). This strategic move empowers Source to exercise greater control over its source code's commercialization while providing open access to the community. + +The timing for this shift is perfect because our latest release brings revolutionary design changes. At this stage of development, it is typical for some commercial users to fork the software for commercial gains. This move is not beneficial to our community. Moreover, it can result in forks jungle, brand dilution, confused users, and code fragmentation. Thus, we must make this change right now to protect our community and developers. + +As you know, DefraDB is a community-driven and developer-centric project. We strongly value our community's active participation and feedback. We genuinely seek your valuable insights to explore and expand the possibilities of the Additional Use Grant in alignment with our shared vision. Together, we can shape the future of DefraDB to serve the needs of our collective community better. + +At our mission's core, we are steadfast in promoting fairness and social good. Similarly, security, interpretability, user data portability, privacy, democratization, and the long-lasting vitality of the network is of prime importance to us. +. + +## BSL Overview + +Here is a brief BSL introduction and key points of our licensing terms. + +### Brief History of BSL +The founders of MySQL and MariaDB introduced BSL. They implemented it first in 2013 for MariaDB products. In 2017, they updated the license and further refined it to version 1.1, benefiting from valuable insights and guidance from Bruce Perens, the co-founder of the Open-Source Initiative (OSI). + +### Other Open-Source Companies Using Commercial Licenses + +It's worth mentioning that providing some sort of commercial licensing is gaining attention and adoption in the open-source community. Today, many notable companies use these licenses for their specific software products. For example, MariaDB, WP Engine (WordPress), MySQL AB (MySQL), Canonical (Ubuntu), Mozilla Corporation (Firefox), Docker, SUSE (Linux Kernel), GitLab (Git), JetBrains (IntelliJ), GitHub (Git), Red Hat (Linux), and Redis Labs are among them. + +### What Are the provisions of BSL? + +The license design provides a balance between open-source principles and commercial interests. It allows companies to maintain control over the commercialization of their source code while providing necessary access to the community. + +## Key Points of BSL 1.1 +* The standard term for the BSL is four years, providing time for DefraDB to gain support and stability before broader adoption. +* The standard BSL allows copying, modifying, redistributing, and non-production use of the source code. + +### Production Use Criteria +As you can see, the non-production use of the source code does not benefit the community's financial sustainability. Thus, to allow running DefraDB in production, we created the following four criteria under the Additional Use Grant Conditions. In summary, the production use is allowed if: +1. The application is connected to the Source Hub Mainnet, with ongoing support through the OPEN utility token. +2. The application is connected to any sidechain protocol integrated with the Source Hub Mainnet. +3. The applications use the Source Network Access Control Policy on any public protocols. +4. The project is a non-profit project. +If none of the above criteria apply to your use case, you can obtain commercial licensing by contacting Source. + +### How Will This Change Benefit DefraDB? + +BSL offers compelling advantages for us. +1. It allows us to maintain a balance between openness and commercial viability. +2. By providing a time-limited license that reverts to open-source after a specified period, BSL allows us to benefit from community collaboration. +3. It also safeguards our ability to monetize and protect our IP. It balances source code accessibility and commercial interests. +4. It also enables transparency and trust while preserving proprietary innovations. The defined roadmap for licensing transitions provides predictability, aligning development efforts and business objectives. Endorsement by influential figures in the open-source community enhances credibility and reputation. +5. Adopting BSL demonstrates our commitment to collaboration and innovation. It will attract developers and foster partnerships. Overall, BSL combines the best elements of open source with safeguards for commercial success. It will empower DefraDB to thrive, innovate, and position itself as a leader in the open-source ecosystem. + +### How Will This Change Benefit Our Community? + +This move also brings compelling benefits to our open-source community. It can create a thriving environment for collaboration and innovation. Here's why BSL is advantageous to our community: +1. Source Code Access: BSL ensures transparency by making the source code readily available. It fosters trust and empowers developers to understand, enhance, and improve the software. +2. Collaborative Community: BSL encourages active participation. It enables developers to contribute their expertise, ideas, and enhancements. This collective effort drives continuous innovation and creates a supportive network within the community. +3. Increased Adoption: BSL's balanced licensing structure will promote wider adoption of DefraDB. Aligning openness with commercial viability attracts organizations to embrace and distribute the software, expanding its reach and impact. +4. Protection of Open-Source Values: BSL maintains the core principles of open source. At the same time, it also acknowledges the need for sustainable commercial models. It strikes a crucial balance that safeguards developers' and companies' interests. +By choosing BSL, we can contribute to the growth and advancement of our project. Thus, it will enable our vibrant community to thrive more. + +## Frequently Asked Questions + +### Q: What Distinguishes the BSL From Other Licenses Such As AGPL, SSPL, Or the Inclusion of The Common Clause in Agreements? +BSL 1.1 stands apart due to its unique characteristic as a time-limited license. It reverts to an open-source license (specifically Apache) after four years. Conversely, the other mentioned options impose permanent restrictions on specific usage scenarios. Our approach aims to strike an optimal balance between ensuring the availability of our source code and supporting the developer community. It also safeguards our capacity to commercialize and provide support for it. + +### Q: Why Make the Change Now? +Our current release is a revolutionary one. It introduces captivating design changes that could lead to third-party forks of the Source code. It allows them to create commercial derivatives without contributing to the developer community. In the best interest of the community and our customers, we believe it is crucial to avoid such an outcome. +### Q: After Four Years, Can I Develop My Own Commercial Product Under the Apache 2.0 license? +Absolutely, if that is your intention. + +### Q: Is Source Still Committed to Being an Open-Source Company? +Absolutely, yes. We continue to license a significant portion of our source code under approved open-source licenses for specific uses. We remain dedicated to servicing various open-source projects related to Source. Furthermore, the BSL only imposes restrictions on the commercialization of our source code. Lastly, after four years, the source code automatically converts to an OSI-approved license (Apache 2.0), further solidifying our commitment to open-source principles. + +### Q: Which of Your Products Will Be Under The BSL? +Currently, BSL 1.1 is only for DefraDB. However, if, in the future, we need to update the licensing of any of our other projects, we will inform the community accordingly. + +### Q: Can I Use DefraDB Licensed Under BSL In a Test and Development Environment? +Yes, you can use DefraDB licensed under BSL in a non-production test and development environment without requiring a subscription from us. + +### Q: Can I Apply BSL To My Own Software? +Yes, you can utilize the BSL framework for your software if you hold the copyright or if the software has a permissive license, such as BSD. Converting your software to BSL involves adding the BSL header to all software files and including the BSL license file in your software distribution. You also need to specify an Additional Use Grant (or declare its absence) and a Change Date suitable for your software in the header of the BSL license file. diff --git a/docs/website/README.md b/docs/website/README.md new file mode 100644 index 0000000000..f02723dc69 --- /dev/null +++ b/docs/website/README.md @@ -0,0 +1,5 @@ +# Website documentation + +This directory contains the documentation that is displayed on our documentation website https://docs.source.network/. + +The structure of this directory and it's children should match that of the website. diff --git a/docs/website/concepts/_category_.json b/docs/website/concepts/_category_.json new file mode 100644 index 0000000000..fccb23e53b --- /dev/null +++ b/docs/website/concepts/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Concepts", + "position": 3 + } + \ No newline at end of file diff --git a/docs/website/concepts/ipfs.md b/docs/website/concepts/ipfs.md new file mode 100644 index 0000000000..5d643d3c61 --- /dev/null +++ b/docs/website/concepts/ipfs.md @@ -0,0 +1,31 @@ +--- +title: IPFS +--- + +## Overview + +IPFS is a decentralized system to access websites, applications, files, and data using content addressing. IPFS stands for **InterPlanetary File System**. The fundamental idea underlying in this technology is to change the way a network of people and computers can exchange information amongst themselves. + +## Key Features + +- Distributed/decentralized system +- Uses content addressing +- Participation + +A decentralized system lets you access information or a file from multiple locations, which aren't managed by a single organization. The pro's of decentralization are - access to multiple locations to access data, easy to dodge content censorship, and faster file transfer. + +IPFS addresses a file by its content instead of its location. A content identifier is the cryptographic hash of the content at that address. It is unique to the content it came in from and permits you to verify if you got what you had requested for. + +For IPFS to work well, active participation of people is necessary. If you are sharing files using IPFS, you need to have copies of the shared files available on multiple computers, which are powered on and running IPFS. In a nutshell, many people provide access to each others files and participate in making them available when requested. Note that if you have downloaded a file using IPFS, by default your computer will share it further with others participants to share further. + +## How Does it Work? + +As discussed earlier, IPFS is a p2p (peer-to-peer) storage network. The IPFS ecosystem works with the following fundamental principles. + +1. Unique identification via content addressing +2. Content linking via directed acrylic graphs (DAGs) +3. Content discovery via distributed hash tables (DHTs) + +## Suggested Reading + +For more in-depth knowledge of the IPFS system refer to the [IPFS Conceptual documentation](https://docs.ipfs.io/concepts/). diff --git a/docs/website/concepts/libp2p.md b/docs/website/concepts/libp2p.md new file mode 100644 index 0000000000..ace7366fa5 --- /dev/null +++ b/docs/website/concepts/libp2p.md @@ -0,0 +1,24 @@ +# libp2p +## Overview + +libp2p is a modular system which helps in the development of peer-to-peer network applications. The system comprises of protocols, specifications, and libraries. + +## What is Peer-to-peer? + +Most commonly used peer-to-peer applications include file sharing networks like bittorrent (used to download movies, files) and the recent uptrend of blockchain networks. Both these network types communicate in a peer-to-peer method. + +In a p2p network, participants (also known as nodes or peers) communicate with each other directly rather than using a **server** like the client/server model of data transfer. + +# Problems Solved by libp2p + +Of the many problems, the major ones which libp2p addresses include: +- Transport +- Identity +- Security +- Peer Routing +- Content Routing +- Messaging/PubSub + +## Suggested Reading + +For more in-depth knowledge of the libp2p system refer to the [libp2p Conceptual documentation](https://docs.libp2p.io/concepts/). \ No newline at end of file diff --git a/docs/website/getting-started.md b/docs/website/getting-started.md new file mode 100644 index 0000000000..739f27b893 --- /dev/null +++ b/docs/website/getting-started.md @@ -0,0 +1,353 @@ +--- +sidebar_position: 1 +title: Getting Started +slug: / +--- + +DefraDB is a user-centric database that prioritizes data ownership, personal privacy, and information security. Its data model, powered by the convergence of [MerkleCRDTs](https://arxiv.org/pdf/2004.00107.pdf) and the content-addressability of [IPLD](https://docs.ipld.io/), enables a multi-write-master architecture. It features [DQL](./references/query-specification/query-language-overview.md), a query language compatible with GraphQL but providing extra convenience. By leveraging peer-to-peer networking it can be deployed nimbly in novel topologies. Access control is determined by a relationship-based DSL, supporting document or field-level policies, secured by the SourceHub network. DefraDB is a core part of the [Source technologies](https://source.network/) that enable new paradigms of decentralized data and access-control management, user-centric apps, data trustworthiness, and much more. + +DISCLAIMER: At this early stage, DefraDB does not offer access control or data encryption, and the default configuration exposes the database to the network. The software is provided "as is" and is not guaranteed to be stable, secure, or error-free. We encourage you to experiment with DefraDB and provide feedback, but please do not use it for production purposes until it has been thoroughly tested and developed. + +## Install + +Install `defradb` by [downloading an executable](https://github.com/sourcenetwork/defradb/releases) or building it locally using the [Go toolchain](https://golang.org/): + +```shell +git clone git@github.com:sourcenetwork/defradb.git +cd defradb +make install +``` + +In the following sections, we assume that `defradb` is included in your `PATH`. If you installed it with the Go toolchain, use: + +```shell +export PATH=$PATH:$(go env GOPATH)/bin +``` + +We recommend experimenting with queries using a native GraphQL client. Altair is a popular option - [download and install it](https://altairgraphql.dev/#download). + +## Start + +Start a node by executing `defradb start`. Keep the node running while going through the following examples. + +Verify the local connection to the node works by executing `defradb client ping` in another terminal. + +## Configuration + +In this document, we use the default configuration, which has the following behavior: + +- `~/.defradb/` is DefraDB's configuration and data directory +- `client` command interacts with the locally running node +- The GraphQL endpoint is provided at + +The GraphQL endpoint can be used with a GraphQL client (e.g., Altair) to conveniently perform requests (`query`, `mutation`) and obtain schema introspection. + +## Add a schema type + +Schemas are used to structure documents using a type system. + +In the following examples, we'll be using a simple `User` schema type. + +Add it to the database with the following command. By doing so, DefraDB generates the typed GraphQL endpoints for querying, mutation, and introspection. + +```shell +defradb client schema add ' + type User { + name: String + age: Int + verified: Boolean + points: Float + } +' +``` + +Find more examples of schema type definitions in the [examples/schema/](https://github.com/sourcenetwork/defradb/examples/schema/) folder. + +## Create a document + +Submit a `mutation` request to create a document of the `User` type: + +```shell +defradb client query ' + mutation { + create_User(input: {age: 31, verified: true, points: 90, name: "Bob"}) { + _key + } + } +' +``` + +Expected response: + +```json +{ + "data": [ + { + "_key": "bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab", + } + ] +} +``` + +`_key` is the document's key, a unique identifier of the document, determined by its schema and initial data. + +## Query documents + +Once you have populated your node with data, you can query it: + +```shell +defradb client query ' + query { + User { + _key + age + name + points + } + } +' +``` + +This query obtains *all* users and returns their fields `_key, age, name, points`. GraphQL queries only return the exact fields requested. + +You can further filter results with the `filter` argument. + +```shell +defradb client query ' + query { + User(filter: {points: {_ge: 50}}) { + _key + age + name + points + } + } +' +``` + +This returns only user documents which have a value for the `points` field *Greater Than or Equal to* (`_ge`) 50. + +## Obtain document commits + +DefraDB's data model is based on [MerkleCRDTs](./guides/merkle-crdt.md). Each document has a graph of all of its updates, similar to Git. The updates are called `commits` and are identified by `cid`, a content identifier. Each references its parents by their `cid`s. + +To get the most recent commits in the MerkleDAG for the document identified as `bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab`: + +```shell +defradb client query ' + query { + latestCommits(dockey: "bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab") { + cid + delta + height + links { + cid + name + } + } + } +' +``` + +It returns a structure similar to the following, which contains the update payload that caused this new commit (`delta`) and any subgraph commits it references. + +```json +{ + "data": [ + { + "cid": "bafybeifhtfs6vgu7cwbhkojneh7gghwwinh5xzmf7nqkqqdebw5rqino7u", + "delta": "pGNhZ2UYH2RuYW1lY0JvYmZwb2ludHMYWmh2ZXJpZmllZPU=", + "height": 1, + "links": [ + { + "cid": "bafybeiet6foxcipesjurdqi4zpsgsiok5znqgw4oa5poef6qtiby5hlpzy", + "name": "age" + }, + { + "cid": "bafybeielahxy3r3ulykwoi5qalvkluojta4jlg6eyxvt7lbon3yd6ignby", + "name": "name" + }, + { + "cid": "bafybeia3tkpz52s3nx4uqadbm7t5tir6gagkvjkgipmxs2xcyzlkf4y4dm", + "name": "points" + }, + { + "cid": "bafybeia4off4javopmxcdyvr6fgb5clo7m5bblxic5sqr2vd52s6khyksm", + "name": "verified" + } + ] + } + ] +} +``` + +Obtain a specific commit by its content identifier (`cid`): + +```shell +defradb client query ' + query { + commits(cid: "bafybeifhtfs6vgu7cwbhkojneh7gghwwinh5xzmf7nqkqqdebw5rqino7u") { + cid + delta + height + links { + cid + name + } + } + } +' +``` + +## DefraDB Query Language (DQL) + +DQL is compatible with GraphQL but features various extensions. + +Read the [Query specification](./references/query-specification/query-language-overview.md) to discover filtering, ordering, limiting, relationships, variables, aggregate functions, and other useful features. + + +## Peer-to-peer data synchronization + +DefraDB leverages peer-to-peer networking for data exchange, synchronization, and replication of documents and commits. + +When starting a node for the first time, a key pair is generated and stored in its "root directory" (`~/.defradb/` by default). + +Each node has a unique `Peer ID` generated from its public key. This ID allows other nodes to connect to it. + +There are two types of peer-to-peer relationships supported: **pubsub** peering and **replicator** peering. + +Pubsub peering *passively* synchronizes data between nodes by broadcasting *Document Commit* updates to the topic of the commit's document key. Nodes need to be listening on the pubsub channel to receive updates. This is for when two nodes *already* have share a document and want to keep them in sync. + +Replicator peering *actively* pushes changes from a specific collection *to* a target peer. + +### Pubsub example + +Pubsub peers can be specified on the command line using the `--peers` flag, which accepts a comma-separated list of peer [multiaddresses](https://docs.libp2p.io/concepts/addressing/). For example, a node at IP `192.168.1.12` listening on 9000 with Peer ID `12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B` would be referred to using the multiaddress `/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B`. + +Let's go through an example of two nodes (*nodeA* and *nodeB*) connecting with each other over pubsub, on the same machine. + +Start *nodeA* with a default configuration: + +```shell +defradb start +``` + +Obtain the Peer ID from its console output. In this example, we use `12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B`, but locally it will be different. + +For *nodeB*, we provide the following configuration: + +```shell +defradb start --rootdir ~/.defradb-nodeB --url localhost:9182 --p2paddr /ip4/0.0.0.0/tcp/9172 --tcpaddr /ip4/0.0.0.0/tcp/9162 --peers /ip4/0.0.0.0/tcp/9171/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B +``` + +About the flags: + +- `--rootdir` specifies the root dir (config and data) to use +- `--url` is the address to listen on for the client HTTP and GraphQL API +- `--p2paddr` is the multiaddress for the p2p networking to listen on +- `--tcpaddr` is the multiaddress for the gRPC server to listen on +- `--peers` is a comma-separated list of peer multiaddresses + +This starts two nodes and connects them via pubsub networking. + +### Collection subscription example + +It is possible to subscribe to updates on a given collection by using its ID as the pubsub topic. The ID of a collection is found as the field `schemaVersionID` in one of its documents. Here we use the collection ID of the `User` type we created above. After setting up 2 nodes as shown in the [Pubsub example](#pubsub-example) section, we can subscribe to collections updates on *nodeA* from *nodeB* by using the `rpc p2pcollection` command: + +```shell +defradb client rpc p2pcollection add --url localhost:9182 bafkreibpnvkvjqvg4skzlijka5xe63zeu74ivcjwd76q7yi65jdhwqhske +``` + +Multiple collection IDs can be added at once. + +```shell +defradb client rpc p2pcollection add --url localhost:9182 +``` + +### Replicator example + +Replicator peering is targeted: it allows a node to actively send updates to another node. Let's go through an example of *nodeA* actively replicating to *nodeB*: + +Start *nodeA*: + +```shell +defradb start +``` + +In another terminal, add this example schema to it: + +```shell +defradb client schema add ' + type Article { + content: String + published: Boolean + } +' +``` + +Start (or continue running from above) *nodeB*, that will be receiving updates: + +```shell +defradb start --rootdir ~/.defradb-nodeB --url localhost:9182 --p2paddr /ip4/0.0.0.0/tcp/9172 --tcpaddr /ip4/0.0.0.0/tcp/9162 +``` + +Here we *do not* specify `--peers` as we will manually define a replicator after startup via the `rpc` client command. + +In another terminal, add the same schema to *nodeB*: + +```shell +defradb client schema add --url localhost:9182 ' + type Article { + content: String + published: Boolean + } +' +``` + +Set *nodeA* to actively replicate the "Article" collection to *nodeB*: + +```shell +defradb client rpc addreplicator "Article" /ip4/0.0.0.0/tcp/9172/p2p/ +defradb client rpc replicator set -c "Article" /ip4/0.0.0.0/tcp/9172/p2p/ + +``` + +As we add or update documents in the "Article" collection on *nodeA*, they will be actively pushed to *nodeB*. Note that changes to *nodeB* will still be passively published back to *nodeA*, via pubsub. + + +## Securing the HTTP API with TLS + +By default, DefraDB will expose its HTTP API at `http://localhost:9181/api/v0`. It's also possible to configure the API to use TLS with self-signed certificates or Let's Encrypt. + +To start defradb with self-signed certificates placed under `~/.defradb/certs/` with `server.key` +being the public key and `server.crt` being the private key, just do: +```shell +defradb start --tls +``` + +The keys can be generated with your generator of choice or with `make tls-certs`. + +Since the keys should be stored within the DefraDB data and configuration directory, the recommended key generation command is `make tls-certs path="~/.defradb/certs"`. + +If not saved under `~/.defradb/certs` then the public (`pubkeypath`) and private (`privkeypaths`) key paths need to be explicitly defined in addition to the `--tls` flag or `tls` set to `true` in the config. + +Then to start the server with TLS, using your generated keys in custom path: +```shell +defradb start --tls --pubkeypath ~/path-to-pubkey.key --privkeypath ~/path-to-privkey.crt + +``` + +DefraDB also comes with automatic HTTPS for deployments on the public web. To enable HTTPS, + deploy DefraDB to a server with both port 80 and port 443 open. With your domain's DNS A record + pointed to the IP of your server, you can run the database using the following command: +```shell +sudo defradb start --tls --url=your-domain.net --email=email@example.com +``` +Note: `sudo` is needed above for the redirection server (to bind port 80). + +A valid email address is necessary for the creation of the certificate, and is important to get notifications from the Certificate Authority - in case the certificate is about to expire, etc. + + +## Conclusion + +This gets you started to use DefraDB! Read on the documentation website for guides and further information. diff --git a/docs/website/guides/_category_.json b/docs/website/guides/_category_.json new file mode 100644 index 0000000000..7494f247dd --- /dev/null +++ b/docs/website/guides/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Guides", + "position": 2 + } + \ No newline at end of file diff --git a/docs/website/guides/akash-deployment.md b/docs/website/guides/akash-deployment.md new file mode 100644 index 0000000000..1156347999 --- /dev/null +++ b/docs/website/guides/akash-deployment.md @@ -0,0 +1,145 @@ +--- +sidebar_label: Akash Deployment Guide +sidebar_position: 60 +--- +# Deploy DefraDB on Akash + +## Overview + +This guide will walk you through the required steps to deploy DefraDB on Akash. + +## Prerequisites + +Before you get started you will need an Akash account with at least 5 AKT. If don't have an Akash account you can create one by installing [Keplr](https://www.keplr.app/). + +## Deploy + +![Cloudmos console](/img/akash/deploy.png "Cloudmos console") + +Deploying on Akash can be done through the [Cloudmos console](https://deploy.cloudmos.io/new-deployment). Click on the "Empty" deployment type and copy the config below into the editor. + +```yaml +--- +version: "2.0" + +services: + defradb: + image: sourcenetwork/defradb:develop + args: + - start + - --url=0.0.0.0:9181 + expose: + - port: 9171 + as: 9171 + to: + - global: true + - port: 9181 + as: 80 + to: + - global: true + +profiles: + compute: + defradb: + resources: + cpu: + units: 1.0 + memory: + size: 1Gi + storage: + size: 1Gi + placement: + akash: + attributes: + host: akash + signedBy: + anyOf: + - "akash1365yvmc4s7awdyj3n2sav7xfx76adc6dnmlx63" + - "akash18qa2a2ltfyvkyj0ggj3hkvuj6twzyumuaru9s4" + pricing: + defradb: + denom: uakt + amount: 10000 + +deployment: + defradb: + akash: + profile: defradb + count: 1 +``` + +Next click the "Create Deployment" button. A pop-up will appear asking you to confirm the configuration transaction. + +After confirming you will be prompted to select a provider. Select a provider with a price and location that makes sense for your use case. + +A final pop-up will appear asking you to confirm the deployment transaction. If the deployment is successful you should now see deployment info similar to the image below. + +## Deployment Info + +![Cloudmos deployment](/img/akash/info.png "Cloudmos deployment") + +To configure and interact with your DefraDB node, you will need the P2P and API addresses. They can be found at the labeled locations in the image above. + +## P2P Replication + +To replicate documents from a local DefraDB instance to your Akash deployment you will need to create a shared schema on both nodes. + +Run the commands below to create the shared schema. + +First on the local node: + +```bash +defradb client schema add ' + type User { + name: String + age: Int + } +' +``` + +Then on the Akash node: + +```bash +defradb client schema add --url ' + type User { + name: String + age: Int + } +' +``` + +> The API address can be found in the [deployment info](#deployment-info). + +Next you will need the peer ID of the Akash node. Run the command below to view the node's peer info. + +```bash +defradb client p2p info --url +``` + +If the command is successful, you should see output similar to the text below. + +```json +{ + "ID": "12D3KooWQr7voGBQPTVQrsk76k7sYWRwsAdHRbRjXW39akYomLP3", + "Addrs": [ + "/ip4/0.0.0.0/tcp/9171" + ] +} +``` + +> The address here is the node's p2p bind address. The public p2p address can be found in the [deployment info](#deployment-info). + +Setup the replicator from your local node to the Akash node by running the command below. + +```bash +defradb client p2p replicator set --collection User '{ + "ID": "12D3KooWQr7voGBQPTVQrsk76k7sYWRwsAdHRbRjXW39akYomLP3", + "Addrs": [ + "/dns//" + ] +}' +``` + +> The p2p host and port can be found in the [deployment info](#deployment-info). For example: if your p2p address is http://provider.bdl.computer:32582/ the host would be provider.bdl.computer and the port would be 32582. + +The local node should now be replicating all User documents to the Akash node. \ No newline at end of file diff --git a/docs/website/guides/deployment-guide.md b/docs/website/guides/deployment-guide.md new file mode 100644 index 0000000000..82541ed2e9 --- /dev/null +++ b/docs/website/guides/deployment-guide.md @@ -0,0 +1,156 @@ +--- +sidebar_label: Deployment Guide +sidebar_position: 70 +--- +# A Guide to DefraDB Deployment +DefraDB aspires to be a versatile database, supporting both single-node and clustered deployments. In a clustered setup, multiple nodes collaborate seamlessly. This guide walks you through deploying DefraDB, from single-node configurations to cloud and server environments. Let’s begin. + +## Prerequisites +The prerequisites listed in this section should be met before starting the deployment process. + +**Pre-Compiled Binaries** - Each release has its own set of pre-compiled binaries for different Operating Systems. Obtain the pre-compiled binaries for your operating system from the [official releases](https://github.com/sourcenetwork/defradb/releases). + +### Bare Metal Deployment + +For Bare Metal deployments, there are two methods available: + +- ### Building from Source + +Ensure Git, Go and make are installed for all your development environments. + +1. **Unix (Mac and Linux)** - The main thing required is the [Go language toolchain](https://go.dev/dl/), which is supported up to Go 1.20 in DefraDB due to the current dependencies. +2. **Windows** - Install the [MinGW toolchain](https://www.mingw-w64.org/) specific to GCC and add the [Make toolchain](https://www.gnu.org/software/make/). + +Follow these steps to build from source: + +1. Run git clone to download the [DefraDB repository](https://github.com/sourcenetwork/defradb#install) to your local machine. +2. Navigate to the repository using `cd`. +3. Execute the Make command to build a local DefraDB setup with default configurations. +4. Set the compiler and build tags for the playground: `GOFLAGS="-tags=playground"` + +#### Build Playground + +Refer to the Playground Basics Guide for detailed instructions. + +1. Compile the playground separately using the command: `make deps:playground` +2. This produces a bundle file in a folder called dist. +3. Set the environment variable using the [NodeJS language toolchain](https://nodejs.org/en/download/current) and npm to build locally on your machine. The JavaScript and Typescript code create an output bundle for the frontend code to work. +4. Build a specific playground version of DefraDB. Use the go flags environment variable, instructing the compiler to include the playground directly embedded in all files. Execute the [go binary embed](https://pkg.go.dev/embed) command, producing a binary of approximately 4MB. + + + +- ### Docker Deployments + +Docker deployments are designed for containerized environments. The main prerequisite is that Docker should be installed on your machine. + + +The steps for Docker deployment are as follows: + +1. Install Docker by referring to the [official Docker documentation](https://docs.docker.com/get-docker/). +2. Navigate to the root of the repository where the Dockerfile is located. +3. Run the following command: +`docker build -t defra -f tools/defradb.containerfile ` + + +**Note**: The period at the end is important and the -f flag specifies the file location. + +The container file is in a subfolder called tools: `path: tools/defradb.containerfile` + +Docker images streamline the deployment process, requiring fewer dependencies. This produces a DefraDB binary file for manual building and one-click deployments, representing the database in binary form as a system. + +## Deployment + +### Manual Deployment + +DefraDB is a single statically built binary with no third-party dependencies. Similar to bare metal, it can run on any cloud or machine. Execute the following command to start DefraDB: +`defradb start --store badger` + + + +### AWS Environment + +For deploying to an AWS environment, note the following: + +- Deploy effortlessly with a prebuilt [AMI](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) (Amazon Machine Image) featuring DefraDB. +- Access the image ID or opt for the convenience of the Amazon Marketplace link. +- Refer to [AWS documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EC2_GetStarted.html) for an easy EC2 instance launch with your specified image size. +- Customize your setup using Packer and Terraform scripts in this directory: `tools/cloud/aws/packer` + +  + +### Akash Deployments + +For detailed instructions on deploying DefraDB with Akash, refer to the [Akash Deployment Guide](https://nasdf-feat-akash-deploy.docs-source-network.pages.dev/guides/akash-deployment). + +  + +## Configurations + +- The default root directory on Unix machines is `$HOME/.defradb`. For Windows it is `%USERPROFILE%\.defradb`​. +- Specifiy the DefraDB folder with this command: `defradb --rootdir start`. +- The default directory for where data is specified is `/data`. + +  + +## Storage Engine + +The storage engines currently used include: + +- Fileback persistent storage powered the [Badger](https://github.com/dgraph-io/badger%5D ) database. +- [In-Memory Storage](https://github.com/sourcenetwork/defradb/blob/develop/datastore/memory/memory.go) which is B-Tree based, ideal for testing does not work with the file system. It is specified with this flag: `--store memory` + +  + +## Network and Connectivity + +As a P2P database, DefraDB requires two ports for node communication, they include: + +  + +1. **API Port**: It powers the HTTP API, handling queries  from the client to the database  and various API commands. The default port number is *9181*. + +2. **P2P Port**: It facilitates communication between nodes, supporting data sharing, synchronization, and replication. The default port no is *9171*. + +  + +The P2P networking functionality can't be disabled entirely, but you can use the `defradb start --no-p2p`​ command through the config files and CLI to deactivate it. + +  + +### Port Customization + +The API port can be specified using the [bind address](https://docs.libp2p.io/concepts/fundamentals/addressing/): + +`API: --url :` + +For P2P use the P2P adder to a multi-address: + +`--p2paddr ` + +Here is an [infographic](https://images.ctfassets.net/efgoat6bykjh/XQrDLqpkV06rFhT24viJc/1c2c72ddebe609c80fc848bfa9c4771e/multiaddress.png) to further understand multi-address. + + +## The Peer Key + +Secure communication between nodes in DefraDB is established with a unique peer key for each node. Key details include: + +  + +- The peer key is automatically generated on startup, replacing the key file in a specific path. +- There is no current method for generating a new key except for overwriting an existing one. +- The peer key type uses a specific elliptic curve, called an Ed25519, which can be used to generate private keys. +- In-memory mode generates a new key with each startup. +- The config file located at `/config.yaml` is definable and used for specification. +- Additional methods for users to generate their own Ed25519 key: +openssl genpkey -algorithm ed25519 -text + +## Future Outlook + +As DefraDB evolves, the roadmap includes expanding compatibility with diverse deployment environments: + +- **Google Cloud Platform (GCP)**: Tailored deployment solutions for seamless integration with GCP environments. +- **Kubernetes**: Optimization for Kubernetes deployments, ensuring scalability and flexibility. +- **Embedded/IoT for Small Environments**: Adaptations to cater to the unique demands of embedded systems and IoT applications. +- **Web Assembly (WASM) Deployments**: Exploring deployment strategies utilizing Web Assembly for enhanced cross-platform compatibility. + +  \ No newline at end of file diff --git a/docs/website/guides/explain-systems.md b/docs/website/guides/explain-systems.md new file mode 100644 index 0000000000..a1d770fc2e --- /dev/null +++ b/docs/website/guides/explain-systems.md @@ -0,0 +1,171 @@ +--- +sidebar_label: Explain Systems Guide +sidebar_position: 20 +--- +# A Guide to Explain Systems in DefraDB + +## Overview + +The DefraDB Explain System is a powerful tool designed to introspect requests, examine plan graphs, and deliver insights into the execution of queries and mutations in DefraDB. These requests can range from basic information queries to highly intricate multi-step operations, all enabled with a single directive added to the request. + +### Regular Request + +```graphql +query { + Author { + _key + name + age + } +} +``` + +### Explain Request + +```graphql +query @explain { + Author { + _key + name + age + } +} +``` + +As application demand grows and schemas expand, requests often become more complex. This could involve adding a type-join or sorting large data sets, which can significantly increase the workload for the database. This often requires tweaking the query or the schema to ensure requests run as faster. However, without the capability to introspect and understand the request's execution flow, the database will be a black box to developers, limiting their capacity to optimize. This is why DefraDB allows developers to ask for an explanation of the request execution, plan graph, and runtime metrics. + +DefraDB provides the option to explain or analyze requests to gain insight into query resolution. Instead of directly requesting data, these queries ask the database to outline the steps it would take to resolve the request and execute all necessary operations before generating the result. This provides transparency into potential bottlenecks, such as inefficient scans or redundant sorting operations. Explain requests enable developers to better understand the database's inner workings and clarify the operations required for request resolution. + +Explain requests interact directly with the request planner, executor, and the resulting Plan Graph. + +## Planner and Plan Graph + +The request planner plays a crucial role in DefraDB as it is responsible for executing and presenting request results. When a database receives a request, it converts it into a series of operations planned and implemented by the request planner. These operations are represented as a Plan Graph, which is a directed graph of operations the database must perform to deliver the requested information. + +The Plan Graph is beneficial because it offers a structured request representation, allowing concurrent traversal, branch exploration, and independent subgraph optimization. Each Plan Graph node represents a specific work unit and consists of smaller graphs. For instance, the Plan Graph may contain scan nodes, index nodes, sorting nodes, and filter nodes, among others. The Plan Graph's order and structure are hierarchical, with each node relying on the previous node's output. For example, the final output may depend on the state rendering, which in turn relies on the state limiting, state sorting, and state scanning. + + + +The Plan Graph is a vital component of request processing as it enables the database to simplify complex operations into smaller, more manageable units. In this way, the Plan Graph contributes to the database's performance and scalability enhancement. + +The Explain System and Plan Graph collectively provide structured, accessible insights and transparency into the steps a database takes to execute a request. + +## Benefits + +At its core, the Explain System is a tool that assists developers in optimizing database queries and enhancing performance. Here is an example that emphasizes its advantages. + +Quick scans - Most queries begin with a scan node, which is a brute-force method of searching the entire key-value collection. This can be slow for large data sets. However, by using a secondary index, a space-time tradeoff can be made to improve query performance and avoid full scans. + +Use of Secondary Indexes- Determining the performance benefits of adding a secondary index can be challenging. Fortunately, the Explain System offers valuable insights into DefraDB's internal processing and Plan Graph, helping to identify the impact. Most importantly, developers can run a simple Explain request and obtain these insights without actually executing the request or building the index, as it only operates on the plan graph. + +Improved transparency- Submitting an explain request informs developers whether a full table scan or an index scan will be conducted, and which other elements will be involved in the process. This information enables developers to understand the steps required to execute their queries and create more efficient ones. + +Query Optimization- For example, it is more efficient to query from primary to secondary than from secondary to primary. The Explain System can also accurately demonstrate the inefficiency of certain queries, such as a simple point lookup compared to an efficient join index. Overall, the Explain System helps developers gain insight into the inner workings of the database and queries, allowing for greater introspection and understanding. + +## How it works + +When you send a request to the database, it can either execute the request or explain it. By default, the database will execute the request as expected. This will compile the request, construct a plan, and evaluate the nodes of the plan to render the results. + +Conversely, an Explain will compile the request, construct a plan, and finally walk the plan graph, collecting node attributes and execution metrics. The goal is to gather details about each part of the plan and show this information to the developer in a clear and organized way. + +Having the plan arranged as parts in a graph is helpful because it's both fast to process and simple to understand. When a request is changed into an Explain request, it creates an organized view of the plan graph that developers can make sense of. Some smaller details might be left out, but the main points and important features give a clear link between the internal and external views of the graph. By gathering the structure and features of the plan graph, developers can learn the steps needed to run their requests and make them work better and faster. + +## Types of Explain Requests + +### Simple Explain + +Simple Explain Requests is the default mode for explanation, only requiring the additional `@explain` directive. You can also be explicit and provide a type argument to the directive like this `@explain(type: simple)`. + +This mode of explanation returns only the syntactic and structural information of the Plan Graph, its nodes, and their attributes. + +The following example shows a Simple Explain request applies to an `Author` query request. + +```graphql +query @explain { + Author { + name + age + } +} +``` + +```json +// Response +{ + "explain": { + "select TopNode": { + "selectNode": { + "filter": null, + "scanNode": { + "filter":null, + "collectionID": "3", + "collectionName": "Author", + "spans": [{ + "start": "/3", + "end": "/4" + }] + } + } + } + } +} +``` + +With the corresponding Plan Graph: + +Simple Explain requests are extremely fast, since it does not actually execute the constructed Plan Graph. It is intended to give transparency back to the developer, and to understand the structure and operations of how the database would resolve their request. + +### Execute Explain + +Execute explanation differs from Simple mode because it actually executes the constructed plan graph from the request. However, it doesn't return the results, but instead collects various metrics and runtime information about how the request was executed, and returns it using using the same rendered plan graph structure that the Simple Explain does. This is similar to EXPLAIN ANALYZE from PostgreSQL or MySQL + +You can create an Execute Explain by specifying the explain type using the directive type​arguments @explain(type: execute). + +The following example shows a Execute Explain request applies to an author query request. + +```graphql +query @explain(type: execute) { + Author { + name + age + } +} +``` + +```json +// Response +[ + { + "explain": { + "executionSuccess": true, + "sizeOfResult": 1, + "planExecutions": 2, + "selectTopNode": { + "selectNode": { + "iterations": 2, + "filterMatches": 1, + "scanNode": { + "iterations": 2, + "docFetches": 2, + "filterMatches": 1 + } + } + } + } + } +] +``` + +Because Execute Explain actually executes the plan, it will of course take more time to complete and return results than the Simple Explain. It will actually take slightly longer to execute than the non-explain counterpart, as it has the overhead of measuring and collecting information. + +## Limitations + +One disadvantage of the Explain System is that it violates the formal specification of the GraphQL API. This means that certain guarantees, such as the symmetry between the structure of the request and result, is not maintained. + +For example, if a request is sent to a user collection, the GraphQL Schema specifies that it will return an array of users. However, if the explain directive is added, the structure of the result will not match the schema specified and will instead be the plan graph representation. While this violation is considered acceptable in order to improve the developer experience, it is important to be aware of this limitation. + +## Next Steps + +A future feature called Prediction Explain aims to provide a balance between speed and information. These requests do not execute the plan graph, but instead make educated guesses about the potential impact of the query based on attributes and metrics. Prediction Explain Requests take longer than the Simple Explain System, but not as long as Execution Explain Requests. + +The Explain System is being developed with additional tooling in mind. Currently, it returns a structured JSON object that represents the plan graph. In the future, the aim is for the tool to provide different representations of the Plan Graph, including a text output that is more easily readable by humans and a visual graph that displays the top-down structure of the graph. In addition to the Simple and Execution Explain Requests that the Explain System currently supports or will support in the future, the team is also working on serializing and representing the returned object in various ways. This will provide developers with more options for understanding and analyzing the database and queries. diff --git a/docs/website/guides/merkle-crdt.md b/docs/website/guides/merkle-crdt.md new file mode 100644 index 0000000000..150e35fb6b --- /dev/null +++ b/docs/website/guides/merkle-crdt.md @@ -0,0 +1,67 @@ +--- +sidebar_label: Merkle CRDT Guide +sidebar_position: 30 +--- +# A Guide to Merkle CRDTs in DefraDB + +## Overview +Merkle CRDTs are a type of Conflict-free Replicated Data Type (CRDT). They are designed to update or modify independent sets of data without any human intervention, ensuring that updates made by multiple actors are merged without conflicts. The goal of Merkle CRDT is to perform deterministic, automatic data merging and synchronization without any inconsistencies. CRDTs were first formalized in 2011 and have become a useful tool in distributed computing. Merkle CRDTs are a new kind of CRDT that allows data to be merged without conflicts, ensuring that data is deterministically synchronized across multiple actors. This can be useful in a variety of distributed computing applications where data needs to be updated and merged in a consistent and conflict-free manner. + +## Background on Regular CRDTs +Conflict-free Replicated Data Types (CRDTs) are a useful tool in local and offline-first applications. They allow multiple actors or peers to collaborate and update the state of a data structure without worrying about synchronizing that state. CRDTs come in many different forms and can be applied to a variety of data types, such as simple registers, counters, sets, lists, and maps. The key feature of CRDTs is their ability to merge data deterministically, ensuring that all actors eventually reach the same state. + +To achieve this, CRDTs rely on the concept of causality or ordering of events. This determines how the merge algorithm works and ensures that if all events or updates are applied to a data type, the resulting state will be the same for all actors. In distributed systems, however, the concept of time and causality can be more complex than it appears. This is because it is often difficult to determine the relative order of events occurring on different computers in a network. As a result, CRDTs often rely on some sort of clock or a different mechanism for tracking the relative order of events. + +## Need for CRDTs + +It can be difficult to determine the relative order of events occurring on different computers in a network, which is why CRDTs can enable the user to ensure data can be merged without conflicts. For example, consider a situation where two actors, A and B, are making updates to the same data at the same time. If actor A stamps their update with a system time of 2:39:56 PM EST on September 6, 2022, and actor B stamps their update with a system time of 2:40:00 PM, it would look like actor B's update occurred after actor A's. However, system times are not always reliable because they can be easily changed by actors, leading to inconsistencies in the relative order of events. To solve this problem, distributed systems use alternative clocks such as logical clocks or vector clocks to track the causality of events. + + +To track the relative causality of events, CRDTs often rely on clocks such as logical clocks or vector clocks. However, these clocks have limitations when used in high-churn networks with a large number of peers. For example, in a peer-to-peer network with a high rate of churn, logical and vector clocks require additional metadata for each peer that an actor interacts with. This metadata must be constantly maintained for each peer, which can be inefficient if the number of peers is unbounded. Additionally, in high churn environments, the amount of metadata grows linearly with the churn rate, making it infeasible to use these clocks in certain situations. Therefore, existing CRDT clock implementations may not be sufficient for use in high churn networks with an unbounded number of peers. + +## Formalization of Merkle CRDT + +Merkle CRDTs are a type of CRDT that combines traditional CRDTs with a new approach to CRDT clocks called a Merkle clock. This clock allows us to solve the issue of maintaining a constant amount of metadata per peer in a high churn network. Instead of tracking this metadata, we can use the inherent causality of Merkle DAGs (Directed Acyclic Graphs). In these graphs, each node is identified using its content identifiable data (CID) and is embedded in another node. The edges in these graphs are directed, meaning one node points to another, forming a DAG structure. If a node points to another node, the CID of the first node is embedded in the value of the second. The inherent nature of Merkle graphs is the embedded relation of hashing or CIDs from one node to another, providing us with useful properties. + + +To create a Merkle CRDT, we take an existing Merkle clock and embed any CRDT that satisfies the requirements. A CRDT is made up of three components: the data type, the CRDT type (operation-based or state-based), and the semantic type. For our specific implementation, we use delta state based CRDTs with different data types and semantic types for different applications. The formal structure of a CRDT is simple - it consists of a Merkle CRDT outer box containing two inner boxes, a Merkle clock and a regular CRDT. + + + +## Merkle Clock + +Merkle clocks are a type of clock used in distributed systems to solve the issue of tracking metadata for each peer that an actor interacts with. They are based on Merkle DAGs that function like hash chains, similar to a blockchain. These graphs are made up of nodes and edges, where the edges are directed, meaning that one node points to another. The head of a Merkle DAG is the most recent node added to the graph, and the entire graph can be referred to by the CID of the head node. The size of the CID hash does not grow with the number of nodes in the graph, making it a useful tool for high churn networks with a large number of peers. + +The Merkle clock is created by adding an additional metadata field to each node of the Merkle DAG, called the height value, which acts as an incremental counter that increases with each new node added to the system. This allows the Merkle clock to provide a rough sense of causality, meaning that it can determine if one event happened before, at the same time, or after another event. The inherent causality of the Merkle DAG ensures that events are recorded in the correct order, making it a useful tool for tracking changes in a distributed system. + +The embedding of CID into the parent node that produces the hash chain provides a causality guarantee that, for example a B is pointed to by node A, node C is pointed to by node B and so on till the node Z, A had to exist before B, because the value of A is embedded inside B, and B could not exist before A, otherwise it would result in breaking the causality of time because the value of A is embedded inside the value of B which then gets embedded inside the value of C, which means that C has to come after B and so on, all the way till the user gets back to Z. And hence if the user has constructed a Merkle DAG correctly, then A has to happen before B, B has to happen before C, C has to happen before D, all the way until they get to Z. This inherent causality of time with respect to CIDs and Merkle DAG provides the user with a causality-adhering system. + +## Delta State Semantics + +There are two types of Delta State Semantics: Operation-Based CRDTs and State-Based CRDTs. Operation-Based CRDTs use the intent of an operation as the body or content of the message, while State-Based CRDTs use the resulting state as the body or content of the message. Both have their own advantages and disadvantages, and the appropriate choice depends on the specific use case. Operation-Based CRDTs express actions such as setting a value to 10 or incrementing a counter by 4 through the intent of the operation. State-Based CRDTs, on the other hand, include the resulting state in the message. For example, a message to set a value to 10 would include the value 10 as the body or content of the message. + +Operation-Based CRDTs tend to be smaller because their messages only contain the operation being performed, while State-Based CRDTs are larger because their messages contain both the current state and the state being changed. It is important to consider the trade-offs between these two types of Delta State Semantics when choosing which one to use in a given situation. + +Delta State Semantics is an optimization of the State-based CRDTs. While both Operation-based CRDTs and State-based CRDTs have their own pros and cons, Delta State CRDTs offer a hybrid approach that uses the state as the message content, but with the same size as an operation. + +In a Delta State CRDT, the message body includes only the minimum amount, or "delta," necessary to transform the previous state to the target state. For example, if we have a set of nine fruit names, and we want to add a banana to the set, the Delta State CRDT would only include the delta, or the value "banana," rather than expressing the entire set of 10 fruit names as in traditional State-based CRDTs. This is like an operation because it has the size of only one action, but it expresses the difference in state between the previous and target rather than the intent of the action. + + +## Branching and Merging State + + +### Branching of Merkle CRDTs + + +Merkle CRDTs are based on the concept of a Merkle clock, which is in turn based on the idea of a Merkle DAG. The structure of a Merkle DAG allows it to branch and merge at any point, as long as it adheres to the requirement of being a DAG and does not create a recursive loop. + + +Branching in a Merkle CRDT system occurs when two peers make independent changes to a common ancestor node and then share those changes, resulting in two distinct states. Neither of these states is considered the correct or canonical version in a Merkle CRDT system. Instead, both are treated as their own local main copies. From these divergent states, further updates can be made, causing the divergence to increase. For example, if there are 10 nodes in common between the two states, one branch may have five new nodes while the other has six. These branches exist independently of each other, and changes can be made to each branch independently without the need for immediate synchronization. This makes CRDTs useful for local-first or offline-first applications that can operate without network connectivity. The structure of a Merkle DAG, on which a Merkle CRDT is based, naturally supports branching. + +### Merging of Merkle CRDTs + +Merging in a Merkle CRDT system involves bringing two divergent states back together into a single, canonical graph. This is done by adding a new head node, known as a merge node, to the history of the graph. The merge node has two or more previous parents, as opposed to the traditional single parent of most nodes. To merge these states, merge semantics must be applied to the new system. The Merkle clock provides two pieces of information that facilitate this process: the use of a CID for each parent and the ability to go back in time through both branches of the divergent state, parent by parent, before officially merging the state. Each type of CRDT defines its own merge semantics. + + +The process begins by finding a common ancestral node between the two divergent states. Each node in the system includes a height parameter, which is the number of nodes preceding it. This, along with the CID of the ancestral node, is provided to the embedded CRDT's merge system to facilitate the merging process. The Merkle CRDT coordinates the logistics of the Merkle DAG and passes information about the multiple parents of the merge node to the embedded CRDT's merge system, which is responsible for defining the merge semantics. As long as the CRDT and the Merkle DAG are functioning correctly, the resulting Merkle clock will also operate correctly. + diff --git a/docs/website/guides/peer-to-peer.md b/docs/website/guides/peer-to-peer.md new file mode 100644 index 0000000000..5b6a52b42c --- /dev/null +++ b/docs/website/guides/peer-to-peer.md @@ -0,0 +1,144 @@ +--- +sidebar_label: Peer-to-Peer Guide +sidebar_position: 10 +--- +# A Guide to Peer-to-Peer Networking in DefraDB + +## Overview + +P2P networking is a way for devices to communicate and share data directly with each other without the need for a central server. In a P2P network, all devices, also known as peers, are equal and can both send and receive data. DefraDB is a database that uses P2P networking instead of the traditional client-server model. + +One advantage of this is that it allows for the development of offline-first or local-first applications. These are apps that can still work even when there is no internet connection and can sync data between multiple devices without the need for a central server to facilitate the synchronization. This makes it possible for a peer-to-peer network and database like DefraDB to function in a trustless environment, where no one device is more important or trustworthy than any other. This aligns with the goals of a decentralized, private, and user-centric database. + +P2P networking is the primary method of communication used in DefraDB, a decentralized database. The libp2p library was developed specifically for this purpose and forms the technological foundation of the database. In DefraDB, documents are replicated and combined into an update graph, similar to a version control client like Git or a hash chain or a hash graph. P2P networking allows nodes in DefraDB to communicate directly with each other, without the need for an intermediate node, making it easier to synchronize the updates within the update graph of a document. + +Libp2p is a decentralized network framework that enables the development of P2P applications. It consists of a set of protocols, specifications, and libraries created by Protocol Labs for the IPFS project. As the network layer for IPFS, libp2p provides various features for P2P communication such as transport, security, peer routing, and content discovery. + +Libp2p is modular, meaning it can be customized and integrated into different P2P projects and applications. It is designed to work with the IPLD (Inter Planetary Linked Data) data model, which is a suite of technologies for representing and navigating hash-linked data. IPLD allows for the unification of all data models that link data with hashes as instances of IPLD, making it a suitable choice for use with libp2p in P2P networking. + +## Documents and Collections + +The high-level distinction between a document is as follows: + +* A document is a single record that contains multiple fields. These documents are bound by schema. For example, each row in an SQL table has multiple individual columns. These rows are analogous to documents with multiple individual fields. + +* A collection refers to a collection of documents under a single schema. For example, a table from an SQL database comprising of rows and columns is analogous to collections. + +## Need for P2P Networking in DefraDB + +The DefraDB database requires peer-to-peer (P2P) networking to facilitate data synchronization between nodes. This is necessary because DefraDB can store documents and individual IPLD blocks on various nodes around the world, which may be used by a single application or multiple applications. P2P networking allows local instances of DefraDB, whether on a single device or in a web browser, to replicate information with other devices owned by the user or with trusted third parties. These third parties may serve as historical archival nodes or may be other users with whom the user is collaborating. For example, if a collaborative document powered by DefraDB is being shared with others, it should be transmitted over a P2P network to avoid the need for a trusted intermediary node. DefraDB offers two types of replication over the P2P network: + +* Passive replication + +* Active replication + +## How it works + +There are two, concrete types of data replication within DefraDB, i.e., active, and passive replication. Both these replication types serve different use cases and are implemented using different mechanics. + +### Passive Replication + +In DefraDB, passive replication is a type of data replication in which updates are automatically broadcast to the network and its peers without explicit coordination. This occurs over a global publish-subscrib network (PubSub), which is a way to broadcast updates on a specific topic and receive updates on that topic. + +This is called passive replication because it is similar to a "fire and forget" scenario. Passive replication is enabled for all nodes by default and all nodes will always publish to the larger PubSub network. Passive replication can be compared to the connectionless protocol UDP, while active replication can be compared to the connection-oriented protocol TCP. + +### Active Replication + +In active replication, data is replicated between nodes in a direct, point-to-point manner. This means that a specific node is chosen to constantly receive updates from the local node. In contrast, passive replication uses the Gossip protocol, which is a peer-to-peer communication mechanism in which nodes exchange state information about themselves and other nodes they know about. In the Gossip protocol, each node initiates a gossip round every second to exchange information with another random node, and the process is repeated until the whole system is synchronized. One difference between active and passive replication is that the Gossip protocol is a multi-hop protocol, meaning that there may be multiple connections between nodes in the network. Active replication, on the other hand, creates a direct connection between two nodes and ensures that updates are actively pushed to the other node, which then acknowledges receipt of the update to establish two-way communication. + +Passive replication is a good choice for situations where you want your peers to be able to follow your updates without requiring much coordination from you. It is often used in collaborative environments where multiple people are working on a document and want to ensure that both peers are in sync with each other. On the other hand, active replication is better for situations where you have a specific peer you are collaborating with and want to ensure that all of your data is being replicated to an archival node. This is because active replication involves a direct, point-to-point connection between the two nodes, allowing for more efficient and reliable data replication. + +## Implementation of Peer-to-Peer Networking in DefraDB + +In the DefraDB software architecture, a PubSub system is used for peer-to-peer networking. In this system, publishers send messages without specifying specific receivers, and subscribers express interest in certain types of messages without knowing which publishers they come from. This allows for a more dynamic network topology and better scalability. In the DefraDB PubSub network, nodes can publish or subscribe to specific topics. When a node publishes a message in passive replication, it is broadcasted to all nodes in the network. These nodes then coordinate with each other, re-broadcast the message, and use a process called "gossiping" to spread the published information through multiple connections, or "hops." This is known as the Gossip protocol. + +In passive replication, updates are broadcasted on a per-document level over the global PubSub network. Each document has its own topic, and nodes can subscribe to the topic corresponding to a specific document to receive updates passively. This is useful in environments where certain documents are in high demand or are being frequently updated, as the connections to these "hot documents" can be kept open to ensure they are kept up-to-date. However, if a document has not been accessed in a while, it is less important for it to be constantly updated and it is easy to resync these "cold documents" by submitting a query for the relevant updates. Passive replication and the PubSub system are therefore focused on individual documents. + +One major difference between active and passive networks is that an active network can focus on both collections and individual documents, while a passive network is only focused on individual documents. Active networks operate over a direct, point-to-point connection and allow you to select an entire collection to replicate to another node. For example, if you have a collection of books and specify a target node for active replication, the entire collection will be replicated to that node, including any updates to individual books. However, it is also possible to replicate granularly by selecting specific books within the collection for replication. Passive networks, on the other hand, are only concerned with replicating individual documents. + +```bash +$ defradb client rpc addreplicator "Books" /ip4/0.0.0.0/tcp/9172/p2p/ +``` + +## Concrete Features of P2P in DefraDB + +### Passive Replication Features + +The Defra Command Line Interface (CLI) allows you to modify the behavior of the peer-to-peer data network. When a DefraDB node starts up, it is assigned a libp2p host by default. + +```bash +$ defradb start +... +2023-03-20T07:18:17.276-0400, INFO, defra.cli, Starting P2P node, {"P2P address": "/ip4/0.0.0.0/tcp/9171"} +2023-03-20T07:18:17.281-0400, INFO, defra.node, Created LibP2P host, {"PeerId": "12D3KooWEFCQ1iGMobsmNTPXb758kJkFc7XieQyGKpsuMxeDktz4", "Address": ["/ip4/0.0.0.0/tcp/9171"]} +``` + +This host has a Peer ID, which is a function of a secret private key generated when the node is started for the first time. The Peer ID is important to know as it may be relevant for different parts of the peer-to-peer networking system. The libp2p networking stack can be enabled or disabled. + +```bash +$ defradb start --no-p2p +``` + +The passive networking system can also be enabled or disabled. By default, if the P2P network is online, the passive networking system is turned on. + +```bash +$ defradb start --peers /ip4/0.0.0.0/tcp/9171/p2p/ +``` + +A node automatically listens on multiple addresses or ports when the P2P module is instantiated. These are referred to as the peer-to-peer address, which is expressed as a multi-address. A multi-address is a string that represents a network address and includes information about the transport protocol and addresses for multiple layers of the network stack. + + +```bash +/ip4/0.0.0.0/tcp/9171/p2p/ + +scheme/ip_address/protocol/port/protocol/peer_id +``` +The peer listens in on the p2p port 9171​ by default, which can be customized through the CLI or the configuration file. + +```bash +$ defradb start --p2paddr /ip4/0.0.0.0/tcp/9172 +``` + +The peer-to-peer address is the first of the addresses that the peer listens in on. + +At the start of a node, flags can be specified to enable, disable, or switch the host that the peer is listening on. When a new node is started, every existing or new document goes through an LRU (Least Recently Used) cache to identify the most important, relevant, or frequently used documents over a specific period of time. Then, by default, the passive replication system automatically subscribes to and creates the corresponding document topics on the PubSub network. + +When a node is started, it specifies a list of peers that it wants to stay connected to. The peer-to-peer node is self-organizing, meaning that if a node joins a new topic, it asks the larger network for other peers that are sharing information on that topic. This ensures that the node is always connected to some relevant nodes. A node also tries to find other relevant nodes, particularly when an individual topic is joined, subscribed to, or published. + +### Active Replication Features + +To use the active replication feature in DefraDB, you can submit an add replicator Remote Procedure Call (RPC) command through the client API. You will need to specify the multi-address and Peer ID of the peer that you want to include in the replicator set, as well as the name of the collection that you want to replicate to that peer. These steps handle the process of defining which peers you want to connect to, enabling or disabling the underlying subsystems, and sending additional RPC commands to add any necessary replicators. + +```bash +$ defradb client rpc addreplicator "Books" /ip4/0.0.0.0/tcp/9172/p2p/ +``` + +## Benefits of the P2P System + +One of the main benefits of the peer-to-peer (P2P) system is its robustness and ability to work even in the event of network failures. This allows developers to create local-first, offline-first applications. If a developer's node loses its internet connection, the P2P system will continue making changes and queue up updates. When the system is back online and reconnects to the network, it will automatically resolve the updates and resume publishing or replicating to the nodes specified by the developer. This means that the developer can rely on a trustless mechanism and does not need to rely on a central, trusted peer for data replication or repositories to save data. Instead, data is directly passed from the developer's node to any other collaborating node. This global P2P network allows developers to collaborate with anyone across the internet with no fundamental limitations. Additionally, since the P2P system is built on top of libp2p, developers have access to other useful features as well. These factors make it highly advantageous to work with a P2P network, especially from a local-first perspective. + +In DefraDB, the peer-to-peer system has several benefits. It is easy to connect to a server in a data center because each server has its own individual IP address. However, in a home network, there is a single IP for the modem and multiple devices connected to it are protected by a NAT firewall, making it difficult for other nodes to connect directly. The libp2p framework offers two solutions to this problem: + +Circuit Relays - This allow you to specify a third-party node that acts as an intermediary to resolve the NAT firewall issue. This works when you connect to the firewall/circuit relay node, which is a publicly accessible node, and another node connects to it as well. The third-party node acts as a conduit in this situation. This process requires trust in the third-party node to properly relay information, but it operates over encrypted transport layers, so the third-party node cannot use man-in-the-middle attacks to listen in on the data exchange. However, it does require the third-party node to be online and accessible. + +NAT Hole Punching - This is a technique that allows nodes to connect directly to a device behind a NAT firewall. This ensures that a user can directly connect with another node and vice versa, without the need for a trusted intermediary within the peer-to-peer network. + +## Current Limitations and Future Outlook + +Here are some of the limitations of the P2P system: + +One limitation of the peer-to-peer system is the potential scalability issue with having every document have its own independent topic. This can lead to overhead if a user has thousands or tens of thousands of documents in their node, or if an application developer has hundreds of thousands or millions of documents in their node. To address this issue, the team is exploring ways to create aggregate topics that can be scoped to subnets. These subnets can be group-specific or application-specific. Multiple hops are required between subnets. This means that if a user wants to synchronize and broadcast updates from their subnet to another subnet, they have to go from their subnet to the global net and back to the other subnet. The team is exploring ways to navigate this limitation through multi-hop mechanisms. + +In a peer-to-peer network, when a user broadcasts an update, it is sent to other nodes on the network. However, if a node is offline or experiences some other issue, it may miss some updates. In DefraDB's passive replication mode, the most recent update is broadcasted through the network using a Merkle DAG (directed acyclic graph). The broadcasting node does not verify that the receiving node has received all previous updates, so it is the responsibility of the receiving node to ensure it has received all necessary updates. If a node misses a couple of updates and then receives a new update, it must synchronize all previous updates before considering the document up to date. This is because the internal data model of the document is based on all changes made over time, not just the most recent change. When broadcasting the most recent update, it is sent over the peer-to-peer PubSub network. However, if a node needs to go back in time through the Merkle DAG to get updates from previous broadcasts, it uses a different system called the Distributed Hash Table (DHT). + +The scalability of Bitswap and the Distributed Hash Table (DHT) have been identified as limitations in the peer-to-peer (P2P) system. To address these issues, we are exploring the use of two new protocols.: + +PubSub based query system - This that allows users to query and receive updates through the global PubSub network using query topics that are independent of document topics. + +Graph Sync - This is a protocol developed by Protocol Labs, which has the potential to resolve issues with the Bitswap algorithm and DHT. These two approaches show promise in improving the scalability of the P2P system. + +There are currently some limitations with the peer-to-peer system being used. One issue is that replicators, which are added to a node, do not persist through updates or restarts. This means that the user must re-add the replicators every time the node is restarted. However, this issue will be resolved in the next version of the system. + +Currently, when a replicator is added to a node, it doesn't persist between node updates or node restarts. This means that every time there is a restart, the user must re-add these replicators. This is a minor oversight that the Source team plans to fix in a future release. In the meantime, they are also wWorking on a new protocol called Head Exchange to address issues with syncing the Merkel DAG when updates have been missed or concurrent, diverged updates have been made. The Head Exchange protocol aims to efficiently establish the most recent update seen by each node, determine if there are any divergent updates, and figure out the most efficient way to synchronize the nodes with the least amount of communication. + +One issue with peer-to-peer local-first development is that it can be difficult for nodes to connect with each other when they are running on devices within the same home Wi-Fi network. This is due to a NAT firewall, which is a router that operates to protect private networks. A NAT firewall only allows internet traffic to pass through if it was requested by a device on the private network. It protects the identity of a network by not exposing internal IP addresses to the internet. This can make it difficult for other nodes to connect directly to a node running behind a NAT firewall. diff --git a/docs/website/guides/schema-migration.md b/docs/website/guides/schema-migration.md new file mode 100644 index 0000000000..9bdd854a18 --- /dev/null +++ b/docs/website/guides/schema-migration.md @@ -0,0 +1,286 @@ +--- +sidebar_label: Schema Migration Guide +sidebar_position: 60 +--- +# A Guide to Schema Migration in DefraDB + +## Overview +In a database system, an application’s requirements can change at any given time, to meet this change, Schema migrations are necessary. This is where Lens comes in, as a migration engine that produces effective schema migration. + +This guide will provide an understanding of schema migrations, focusing on the Lens migration engine. Let’s dive in! + +Lens is a pipeline for user-defined transformations. It enables users to write their transformations in any programming language and run them through the Lens pipeline, which transforms the cached representation of the data. + +## Goals of the Lens Migration System + +Here are some of the goals of the Lens schema migration system: + +- **Presenting a consistent view of data across nodes**: The Lens schema migration system can present data across nodes consistently, regardless of the schema version being used. + +- **Verifiability of data**: Schema migration in the Lens migration system is presented as data, this preserves the user-defined mutations without corrupting system-defined mutations and also allows migrating from one schema version to another. + +- **A language-agnostic way of writing schema migrations**: Schema migrations can be written in any programming language and executed properly as Lens is language-agnostic. + +- **Safe usage of migrations by others through a sandbox**: Migrations written in Lens are run in a sandbox, which ensures safety and eliminates the concern for remote code executions (RCE). + +- **Peer-to-peer sync of schema migrations**: Lens allows peers to write their migrations in different application versions and sync without worrying about the versions other peers are using. + +- **Local autonomy of schema migrations**: Lens enables local autonomy in writing schema migrations by giving users control of the schema version they choose to use. The users can stay in a particular schema version and still communicate with peers on different versions, as Lens is not restricted to a particular schema version. + +- **Reproducibility and deterministic nature of executing migrations**: When using the Lens migration system, changes to schemas can be written, tagged and shared with other peers regardless of their infrastructure and requirements for deployments. + + +## Mechanism + +In this section, we’ll look at the mechanism behind the Lens migration system and explain how it works. + +Lens migration system functions as a bi-directional transformation engine, enabling the migration of data documents in both forward and reverse directions. It allows for the transformation of documents from schema X to Y in the forward direction and Y to X in the reverse direction. + +The above process is done foundationally, through a verifiable system powered by WebAssembly (Wasm). Wasm also enables the sandbox safety and language-agnostic feature of Lens. + +Internally, schema migrations are evaluated lazily. This avoids the upfront cost of doing a massive migration at once. + +*Lazy evaluation is a technique in programming where an expression is only evaluated when its value is needed.* + +Adopting lazy evaluation in the migration system also allows rapid toggling between schema versions and representations. + +## Usage + +The Lens migration system addresses critical use cases related to schema migrations in peer-to-peer, eventually consistent databases. These use cases include: + +  + +- **Safe Schema Progression**: Ensuring the seamless progression of database schemas is vital for accommodating changing application requirements. Lens facilitates the modification, upgrade, or reversion of schemas while upholding data integrity. + +- **Handling Peer-to-Peer Complexity**: In environments where different clients operate on varying application and database versions, Lens offers a solution to address the complexity of schema migrations. It ensures coherence and effectiveness across different networks. + +- **Language-Agnostic Flexibility**: Functions in Lens are designed to be language-agnostic, offering the versatility to define schema changes in the preferred programming language. This adaptability makes Lens suitable for diverse development environments and preferences. + +- **Lazy Evaluation**: Lens employs a lazy evaluation mechanism, initiating migrations without immediate execution. Schema changes are applied only when documents are read, queried, or updated. This approach reduces the upfront cost of extensive schema migrations while maintaining data consistency. + +- **On-Demand Schema Selection**: Lens supports on-demand schema selection during data queries. Users can specify the schema version they wish to work with, facilitating A/B testing and the seamless transition between different schema versions. + + + +These use cases highlight how Lens empowers users to manage schema migrations effectively, ensuring data consistency and adaptability in evolving database systems. + + +## Example + +In this example we will define a collection using a schema with an `emailAddress` field. We will then patch the schema to add a new field `email`, then define a bi-directional Lens to migrate data to/from the new field. + +**Step One**, define the `Users` collection/schema: + +```graphql +defradb client schema add ' + type Users { + emailAddress: String + } +' +``` + +**Step Two**, patch the `Users` schema, adding the new field, here we pass in `--set-active=true` to automatically apply the schema change to the `Users` collection: + +```graphql +defradb client schema patch ' + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": "String"} } + ] +' --set-active=true +``` + +**Step Three**, fetch the schema ids so that we can later tell Defra which schema versions we wish to migrate to/from: + +```graphql +defradb client schema describe --name="Users" +``` + +**Step Four**, in order to define our Lens module - we need to define 4 functions: + +- `next() unsignedInteger8`, this is a host function imported to the module - calling it will return a pointer to a byte array that will either contain + an error, an EndOfStream identifier (indicating that there are no more source values), or a pointer to the start of a json byte array containing the Defra document to migrate. It is typically called from within the `transform` and `inverse` functions, and can be called multiple times within them if desired. + + - `alloc(size: unsignedInteger64) unsignedInteger8`​, this is required by all lens modules regardless of language or content - this function should allocate a block of memory of the given `size` , it is used by the Lens engine to pass stuff in to the wasm instance. The memory needs to remain reserved until the next wasm call, e.g. until `transform` or `set_param` has been called. It's implementation will be different depending on which language you are working with, but it should not need to differ between modules of the same language. The Rust SDK contains an alloc function that you can call. + +- `set_param(ptr: unsignedInteger8) unsignedInteger8`​, this function is only required by modules that accept a set of parameters. As an input parameter it receives a single pointer that will point to the start of a json byte array containing the parameters defined in the configuration file. It returns a pointer to either nil, or an error message. It will be called once, when the the migration is defined in Defra (and on restart of the database). How it is implemented is up to you. + +- `transform() unsignedInteger8`​, this function is required by all Lens modules - it is the migration, and within this function you should define what the migration should do, in this example it will copy the data from the `emailAddress` field into the `email` field. Lens Modules can call the `next` function zero to many times to draw documents from the Defra datastore, however modules used in schema migrations should currently limit this to a single call per `transform` call (Lens based views may call it more or less frequently in order to filter or create documents). + +- `inverse() unsignedInteger8`​, this function is optional, you only need to define it if you wish to define the inverse migration. It follows the same pattern as the `transform` function, only you should implement it to do the reverse. In this example we want this to copy the value from the `email` field into the `emailAddress`​ field. + +Here is what our migration would look like if we were to write it in Rust: + +```graphql +#[link(wasm_import_module = "lens")] +extern "C" { + fn next() -> *mut u8; +} + +#[derive(Deserialize, Clone)] +pub struct Parameters { + pub src: String, + pub dst: String, +} + +static PARAMETERS: RwLock> = RwLock::new(None); + +#[no_mangle] +pub extern fn alloc(size: usize) -> *mut u8 { + lens_sdk::alloc(size) +} + +#[no_mangle] +pub extern fn set_param(ptr: *mut u8) -> *mut u8 { + match try_set_param(ptr) { + Ok(_) => lens_sdk::nil_ptr(), + Err(e) => lens_sdk::to_mem(lens_sdk::ERROR_TYPE_ID, &e.to_string().as_bytes()) + } +} + +fn try_set_param(ptr: *mut u8) -> Result<(), Box> { + let parameter = lens_sdk::try_from_mem::(ptr)?; + + let mut dst = PARAMETERS.write()?; + *dst = Some(parameter); + Ok(()) +} + +#[no_mangle] +pub extern fn transform() -> *mut u8 { + match try_transform() { + Ok(o) => match o { + Some(result_json) => lens_sdk::to_mem(lens_sdk::JSON_TYPE_ID, &result_json), + None => lens_sdk::nil_ptr(), + EndOfStream => lens_sdk::to_mem(lens_sdk::EOS_TYPE_ID, &[]), + }, + Err(e) => lens_sdk::to_mem(lens_sdk::ERROR_TYPE_ID, &e.to_string().as_bytes()) + } +} + +fn try_transform() -> Result>, Box> { + let ptr = unsafe { next() }; + let mut input = match lens_sdk::try_from_mem::>(ptr)? { + Some(v) => v, + // Implementations of `transform` are free to handle nil however they like. In this + // implementation we chose to return nil given a nil input. + None => return Ok(None), + EndOfStream => return Ok(EndOfStream) + }; + + let params = PARAMETERS.read()?; + + let value = input.get_mut(¶ms.src) + .ok_or(ModuleError::PropertyNotFoundError{requested: params.src.clone()})? + .clone(); + + let mut result = input.clone(); + result.insert(params.dst, value); + + let result_json = serde_json::to_vec(&result)?; + lens_sdk::free_transport_buffer(ptr)?; + Ok(Some(result_json)) +} + +#[no_mangle] +pub extern fn inverse() -> *mut u8 { + match try_inverse() { + Ok(o) => match o { + Some(result_json) => lens_sdk::to_mem(lens_sdk::JSON_TYPE_ID, &result_json), + None => lens_sdk::nil_ptr(), + EndOfStream => lens_sdk::to_mem(lens_sdk::EOS_TYPE_ID, &[]), + }, + Err(e) => lens_sdk::to_mem(lens_sdk::ERROR_TYPE_ID, &e.to_string().as_bytes()) + } +} + +fn try_inverse() -> Result>, Box> { + let ptr = unsafe { next() }; + let mut input = match lens_sdk::try_from_mem::>(ptr)? { + Some(v) => v, + // Implementations of `transform` are free to handle nil however they like. In this + // implementation we chose to return nil given a nil input. + None => return Ok(None), + EndOfStream => return Ok(EndOfStream) + }; + + let params = PARAMETERS.read()?; + + // Note: In this example `inverse` is exactly the same as `transform`, only the useage + // of `params.dst` and `params.src` is reversed. + let value = input.get_mut(¶ms.dst)?; + + let mut result = input.clone(); + result.insert(params.src, value); + + let result_json = serde_json::to_vec(&result)?; + lens_sdk::free_transport_buffer(ptr)?; + Ok(Some(result_json)) +} +``` + + + + +More fully coded example modules, including an AssemblyScript example can be found in our integration tests here: https://github.com/sourcenetwork/defradb/tree/develop/tests/lenses + +and here: https://github.com/lens-vm/lens/tree/main/tests/modules + +We should then compile it to wasm, and copy the resultant `.wasm` file to a location that the Defra node has access to. Make sure that the file is safe there, at the moment Defra will not copy it and will refer back to that location on database restart. + +**Step Five**, now that we have updated the collection, and defined our migration, we need to tell Defra to use it, by providing it the source and destination schema IDs from our earlier `defradb client schema describe`​ call, and a configuration file defining the parameters we wish to pass it: + +```graphql +defradb client schema migration set ' + { + "lenses": [ + { + "path": , + "arguments": { + "src": "emailAddress", + "dst": "email" + } + } + ] + } +' +``` + + +Now the migration has been configured! Any documents committed under the original schema version will now be returned as if they were committed using the newer schema version. + +As we have defined an inverse migration, we can give this migration to other nodes in our peer network still on the original schema version, and they will be able to query our documents committed using the new schema version applying the inverse. + +We can also change our active schema version on this node back to the original to see the inverse in action: + +```graphql +defradb client schema set-active +``` + +Now when we query Defra, any documents committed after the schema update will be rendered as if they were committed on the original schema version, with `email` field values being copied to the `emailAddress` field at query time. + +## Advantages  + +Here are some advantages of Lens as a schema migration system: + +- Lens is not bound to a particular deployment, programming language, or interaction method. It can be used globally and is accessible to clients regardless of their location or infrastructure. +- Users can query on-demand even with different schema versions. +- Migration between different schemas is a seamless process. + +## Disadvantages + +The Lens migration system also has some downsides to schema migration which include: + +- Using a Lazy execution approach, errors might be found later when querying through the migration. +- There’s a time constraint as the Lens migration system is a work in progress +- The performance of the system is secondary, with more focus on overall functionality. + +## Future Outlook + +The core problem we currently have in the Lens schema migration system is the performance issues when migrating schemas, hence for future versions, the following would be considered: + +- Increasing the performance of the migration system. +- Making migrations easier to write. +- Expansion of the schema update system to include the removal of fields, not just adding fields. +- Enabling users to query the schema version of their choice on-demand. +- Support for Eager evaluation. +- Implementing dry run testing for development and branching scenarios, and handling divergent schemas. \ No newline at end of file diff --git a/docs/website/guides/schema-relationship.md b/docs/website/guides/schema-relationship.md new file mode 100644 index 0000000000..59745b60f2 --- /dev/null +++ b/docs/website/guides/schema-relationship.md @@ -0,0 +1,356 @@ +--- +sidebar_label: Schema Relationship Guide +sidebar_position: 50 +--- +# A Guide to Schema Relationship in DefraDB + +## Overview +Schema systems allow developers to enforce a structure on a given object type or database, which might be represented as rows in a SQL-based database or documents in a no SQL-based database. This enables developers to understand the structure of these objects so they can have type safety, structure safety, or the ability to enforce certain invariance or priorities syntactically or semantically. + +A developer can easily enforce two separate local schema types for two objects. However, many variables need to be properly handled when it comes to the mechanism of cross-schema relationships. For example, when creating relationships between instances of documents, the developer has to design these relationships in advance with certain considerations. + +Different types of relationships exist between documents and schemas. It is generally categorized based on the number of types as follows: + +One-to-One Relationship: One document has a single reference to another document of a different type and vice versa. + +One-to-Many/Many-to-One Relationship: One document can be referenced by many documents. For example, an author has many books and each of these books refer to one author. + +Many-to-Many Relationship: Many-to-many allows developers to correlate a set of schema objects on one side to a whole set of schema objects on the other side. For example, a defined set of genres has a series of books. An intermediary relationship is created such that many correlated books have various genres. Conversely, various genres have many correlated books. Note: Many-to-many relationship is currently not supported by DefraDB, but it can be implemented through other techniques. + +The developer will design and structure these relationships within the actual data of the database. Conversely, with managed relationships, the database assumes some of the responsibility of designing and maintaining the data. It depends on how the developer designs the primary and foreign keys, and how they correlate from the respective relationship model. + +## How It Works + +DefraDB supports managed relationships but not unmanaged relationships, i.e., the database is responsible for accurately correlating and associating documents to their respective relationship types, primary keys, and foreign keys. The developer will be explicit about the kind of correlation they are choosing, i.e., one-to-one, one-to-many, or many-to-many; but does not have to be explicit in defining their schemas. However, the developer is not responsible for defining the field that manages the foreign keys, how that relates to the primary keys of the respective types, which side of the document is responsible for maintaining the relationship, etc. This is because the side that holds on to the foreign key is decided based on the type of relationship. In general, when querying over a relationship, the developer will define a join operation (which will allow querying from two separate tables or collections) and find a way to correlate the results into a single set of values. It should be noted that it is more efficient to query from the primary side to the secondary side. + +By default, for unmanaged databases (e.g., SQL model) that has normalized tables and uses a left join, the developer will define which field on which table correlates to which field on another table. This is not the case with managed databases like DefraDB, where a type join is used in place of any other join. Type join systems reduce the complexities when defining the row join or the field join as this is automatically handled by the database. + +Managing relationships for schemas is both easy and powerful in DefraDB. This is why in one-to-one relationships, DefraDB can automatically configure which side is the primary side of the relationship and can define how a developer queries different types. + +However, there are some shortcomings in how Defra handles these relationships. This is because, firstly, documents in Defra are self-describing, and secondly, content identifiers of the documents are used to create the primary keys. Eventually, it becomes a little different when compared to regular databases where a primary key is an auto-incrementing integer or a randomly generated UID. Therefore, because of the operations taking place between the documents and Merkle security, the developer must keep certain causality mechanisms in mind. An example for this is a primary relationship between an "author" and "book", where "author” is the primary side in that system. The developer will either know the doc key of the book before they create the relationship, or they will create a primary key i.e., "author", which will reference "book", and then update that document once they have created the "book" to build the relationship. + +When it comes to a one-to-many relationship, there is no primary side, i.e., the developer has no option of choosing which side is primary or  secondary. In this relationship type, the “many” type is the primary, and the “one” type is the secondary. Therefore, in the example of "author" to "books", if one "author" has many "books”, the book type holds the reference to the foreign key of the author type. This allows DefraDB to keep single fields on the respective type, otherwise "author" will have an array of values, thereby complicating the structure and breaking the normalization mechanism of databases. + +Note: When adding related types, the developer must add both types or all related types at the same time, i.e., define all the types within the Schema Definition Language (SDL), and send them as a single schema add operation, or the database will not understand the correlated types. + +With respect to filtering on related types, for both one-to-one and one-to-many relationships, the developer can filter on parent objects, which have different semantics than filtering on the child objects or the related object. Filtering on the parent object only returns the parent object if the related type matches the filter. However, filtering on the related type returns the parent regardless, but it won't return the related sub-type unless it matches the filter. For example, if we ask for authors that have books in a certain genre, it will only return authors with those sub-values for the books. If we apply a filter to the sub-type, it will return all the authors, but only return the books that match that filter. + +Note: In managed relationships, the developer can also apply filtering on the values of the sub-types. + +Currently, DefraDB does not support many-to-many relationships as it relates to content-identifiable data structures, self-verifying documents, and other variables, making its implementation complicated. An intermediary table, often referred to as the junction table, is used to correlate the primary key of one side of the many to the primary key of the other side of the many. Also, when creating a relationship, there is implicit data created which becomes complicated for the purposes of self-describing, self-authenticated data structures, and privacy-preserving, ownership verification of data. + +Defra also does not support cascading deletes. In cascading deletes, if the developer deletes one side of a relationship, they can define a side effect or a cascade that will affect other documents, rows, or tables. While Defra does not support this feature currently, it may be included in a future version update. + +## Guidelines + +The following pointers provide a concrete guide on how to implement various definitions for the two managed relationship types: one-to-one and one-to-many, as well as the process of creating, updating and querying documents for the respective relationship types. + +### Guidelines for One-to-One Type + +1. Define the Schema and Add Types - Here, an example of two schema types - "user" and "address", where "user" has one "address" and "address" has one "user", thereby establishing a one-to-one relationship between them. The user type contains the name, age, and username, while the address type contains the street name, street number, and country. “user" is specified as the primary side of this relationship because it is more likely that the developer will query from a user to find their address rather than querying from an address to find its respective user. + + Once these schemas are loaded into the database, it will automatically create the necessary foreign keys in the respective types. + +```graphql +type User { + name: String + username: String + age: Int + address: Address @primary +} + +type Address { + streetNumber: String + streetName: String + country: String + user: User +} +``` + +2. Create and Update Mutations - Creating documents in Defra is based on the nature of how these documents need to exist because of their content identifiable structure. The developer has to first create the primary side, then the secondary side, and then update the primary side. + + In the above example, the developer will first create the "user". This is the first mutation. + +```graphql +mutation { + create_Address(input: {streetNumber: "123", streetName: "Test road", country: "Canada"}) { + _key + } +} +``` + +```graphql +mutation { + create_User(input: {name: "Alice", username: "awesomealice", age: 35, address_id: "bae-fd541c25-229e-5280-b44b-e5c2af3e374d"}) { + _key + } +} +``` + +Note: Currently, the developer must create the secondary side of the relation (`Address`) first followed by the primary side with the secondary id (`address_id`) included, but in a future version of Defra, this can be done in either order. + +3. Querying Types - After creating the required documents, the developer has to send a query request from the primary side. Therefore, in the above example, it will ask for the three respective fields of the "user", and it will also have the embedded address type in the selection set. As the developer will query from the "user" into the "address", and as defined above, the "user" is the primary type, this lookup of "user" into "address" will be an efficient lookup that will only require a single point. A single point lookup means that it won't incur a table scan. This is explained in the query below: + +```graphql +query { + User { + name + username + age + Address { + streetNumber + streetName + country + } + } +} + +``` + +```graphql +query { + Address { + streetNumber + streetName + country + User { + name + username + age + } + } +} + +``` + +```graphql +query { + User (filter: {Address: {country: "Canada"}}) { + name + username + age + Address { + streetNumber + streetName + country + } + } +} + +``` + +Going from the secondary into the primary will be a more expensive query operation because it requires a table scan looking for the correlated user for this address. + +Note: Defra supports queries from both sides, regardless of which side is the primary or secondary, i.e., the developer can query in the reverse direction. + +## Guidelines for One-to-Many Type + +1. Define the Schema and Add Types - For the one-to-many relationship, two types are defined, for example, "author" and "book". The author type has a name, a birth date, and authored books. This is going to be a one-to-many relationship into the book type. The book type has a name, a description, a single genre string, and the author to which it is related. So "author" is the one, and the "book" is the many. + + +```graphql +# schema.graphql + +type Author { + name: String + dateOfBirth: DateTime + authoredBooks: [Book] +} + +type Book { + name: String + description: String + genre: String + author: Author +} + +``` + + +```bash +defradb client schema add -f schema.graphql +``` + +2. Create Documents - In this step, first the "one" type from the one-to-many type needs to be created. Therefore, in the above-mentioned example, a blank author type will be created. Once "author" is created, then the related books published by the author will be created. + + Note: Currently Defra only supports creating one type at a time, but the developer can repeat this as many times as required. + +```graphql +mutation { + create_Author(input: {name: "Saadi", dateOfBirth: "1210-07-23T03:46:56.647Z"}) { + _key + } +} +``` + + +```graphql +mutation { + create_Book(input: {name: "Gulistan", genre: "Poetry", author_id: "bae-0e7c3bb5-4917-5d98-9fcf-b9db369ea6e4"}) { + _key + } +} +``` + + +```graphql +mutation { + update_Author(id: "bae-0e7c3bb5-4917-5d98-9fcf-b9db369ea6e4", input: {name: "Saadi Shirazi"}) { + _key + } +} +``` + + +```graphql +mutation { + update_Book(filter: {name: {_eq: "Gulistan"}}, input: {description: "Persian poetry of ideas"}) { + _key + } +} +``` +This demonstrates that the developer should define or correlate the two types "author" and "book" using the same property of querying from the primary type to the secondary type and that in a one-to-many relationship, the "many" side is always the primary. This means that the developer has to store the related ID on the primary side, i.e., the "many" side. So, the "book" in the author-book relationship needs to hold onto the ID of the related type, i.e., the author ID. + +Note: The developer can create as many books they require by using this pattern. + +3. Querying Types - There are two directions in which the developer can run a query, i.e., secondary-to-primary or primary-to-secondary. In the first example, we are sending a query request from the "author", i.e., the secondary. It asks for all the authors in the author collection, their names, their ages, and the other fields that are on the author type, including the related book field. This book field forms the "many" end of the relationship and based on how it is defined under Point 1 above, is an array of book. Hence, it returns multiple values as an array as opposed to returning a single object, as in the case of one-to-one relationships. + + As a result, on querying a related type for a one-to-many, the developer can also sub-filter the related type. In other words, if an author has a certain number of books, we can filter the author by their name but return the books that are part of a particular genre. So, in the author-to-book direction, the developer can filter on two different levels - filter on the top level of the actual author type of the collection or filter on the book level - both having two different implications. This is further explained in the Query Specifications document. + +```graphql +query { + Author { + name + dateOfBirth + authoredBooks { + name + genre + description + } + } +} +``` + + +```json +// Results: +[ + { + "name": "Saadi Shirazi", + "dateOfBirth": "1210-07-23T03:46:56.647Z", + "authoredBooks": [ + { + "name": "Gulistan", + "genre": "Poetry", + "description": "Persian poetry of ideas" + }, + { + "name": "Bustan", + "genre": "Poetry" + } + ] + } +] +``` + +```graphql +query { + Book { + name + genre + Author { + name + dateOfBirth + } + } +} +``` + +```json +// Results: +[ + { + "name": "Gulistan", + "genre": "Poetry", + "Author": { + "name": "Saadi Shirazi", + "dateOfBirth": "1210-07-23T03:46:56.647Z", + } + }, + { + "name": "Bustan", + "genre": "Poetry", + "Author": { + "name": "Saadi Shirazi", + "dateOfBirth": "1210-07-23T03:46:56.647Z", + } + } +] +``` + +```graphql +query { + Author { + name + dateOfBirth + authoredBooks(filter: {name: {_eq: "Gulistan"}}) { + name + genre + } + } +} +``` + +```json +// Results: +[ + { + "name": "Saadi Shirazi", + "dateOfBirth": "1210-07-23T03:46:56.647Z", + "authoredBooks": [ + { + "name": "Gulistan", + "genre": "Poetry" + } + ] + } +] +``` + +```graphql +query { + # Filters on the parent object can reference child fields + # even if they are not requested. + Author(filter: {authoredBooks: {name: {_eq: "Gulistan"}}}) { + name + dateOfBirth + } +} +``` + +```json +// Results: +[ + { + "name": "Saadi Shirazi", + "dateOfBirth": "1210-07-23T03:46:56.647Z" + } +] +``` + +Note: +The book-to-author query is included to demonstrate that the developer can query from both sides of the relationship. + +The various mechanisms, filtering, and rendering properties of one-to-one apply to this section as well. Also, the ability to filter on related types in one-to-many also applies to one-to-one relationships. + +## Current Limitations and Future Outlook + +The notable deficiencies of the current system are as follows: + +* It does not support many-to-many relationships. + +* It requires multiple mutations to sort all the related types. + +* It does not support random joins, i.e., currently, unmanaged relationships are not supported. + +The above limitations will be eliminated in future version updates of DefraDB. Our team is also working on secondary indexes, where the developer can make queries from either side, thereby improving the performance of querying from the secondary into the primary. This is almost as efficient as querying from the primary side using point lookup as opposed to a table scan \ No newline at end of file diff --git a/docs/website/guides/time-traveling-queries.md b/docs/website/guides/time-traveling-queries.md new file mode 100644 index 0000000000..0c952861bd --- /dev/null +++ b/docs/website/guides/time-traveling-queries.md @@ -0,0 +1,60 @@ +--- +sidebar_label: Time Traveling Queries Guide +sidebar_position: 40 +--- +# A Guide to Time Traveling Queries in DefraDB + +## Overview +Time Traveling queries allow users to query previous states of documents within the query interface. Essentially, it returns data as it had appeared at a specific commit. This is a powerful tool as it allows users to inspect and verify arbitrary states and time regardless of the number of updates made or who made these updates if the user has the current state. Since a current state is always going to be based on some previous state and that previous state is going to be based on another previous state, hence time-traveling queries provide the ability to “go back in time” and look at previous states with minimal changes to the working of the query. A special quality of this query is that there is minimal distinction between a regular query run versus a time-traveling query since both apply almost the same logic to fetch the result of the query. + +## Background + +The Web2 stack has traditional databases, like Postgres or MySQL, that usually have the current state as the only state. Once a user makes an update, the previous state is overwritten. There is no way to retrieve it from the system, unless a snapshot is captured, which exists as an independent file in the backup. The only way to access previous states is by loading the backup onto the database and querying the previous state. Additionally, in traditional systems, this backup occurs only once every hour, once a day, or once a month. This results in a loss of the ability to introspect each update made in the database. Here, the time travel inquiry system provides an edge over the existing databases as the data model of this system is independent of the mechanism of creating snapshots or backups that a user would utilize as part of natural maintenance and administration. The data model of time-traveling queries is such that every update is a function of all the previous updates. Essentially, there is no difference between inspecting the state of a document at a present point in time versus a previous point since the previous state is a function of the document graph. + +## Usage + +A powerful feature of a time-traveling query is that very little work is required from the developer to turn a traditional non-time-traveling query into a time-traveling query. Each update a document goes through gets a version identifier known as a Content Identifier (CID). CIDs are a function of the data model and are used to build out the time travel queries. These CIDs can be used to refer to a version that contains some piece of data. Instead of using some sort of human-invented notion of semantic version labels like Version 1, or Version 3.1 alpha, it uses the hash of the data as the actual identifier. The user can take the entire state of a document and create a single constant-sized CID. Each update in the document produces a new version number for the document, including a new version number for its individual fields. The developer then only needs to submit a new time-traveling query using the doc key of the document that it wants to query backward through its state, just like in a regular query, only here the developer needs to add the 32-bit hexadecimal version identifier that is expressed as it’s CID in an additional argument and the query will fetch the specific update that was made in the document. + +```graphql +# Here we fetch a User of the given dockey, in the state that it was at +# at the commit matching the given CID. +query { + User ( + cid: "bafybeieqnthjlvr64aodivtvtwgqelpjjvkmceyz4aqerkk5h23kjoivmu", + dockey: "bae-52b9170d-b77a-5887-b877-cbdbb99b009f" + ) { + name + age + } +} +``` + +## How It Works + +The mechanism behind time-traveling queries is based on the Merkel CRDT system and the data model of the documents discussed in the above sections. Each time a document is updated, a log of updates known as the Update graph is recorded. This graph consists of every update that the user makes in the document from the beginning till the Nth update. In addition to the document update graph, we also have an independent and individual update graph for each field of the document. The document update graph would capture the overall updates made in the document whereas the independent and individual update graphs would capture the changes made to a specific field of the document. The data model as discussed in the Usage section works in a similar fashion, where it keeps appending the updates of the document to its present state. So even if a user deletes any information in the document, this will be recorded as an update within the update graph. Hence, no information gets deleted from the document, as all updates are stored in the update graph. + +[Include link to CRDT doc here] + +Since we now have this update graph of changes, the query also takes its mechanism from the inherent properties of the Delta State Merkel CRDTs. Under this, the actual content of the update added by the user in the document is known as the Delta Payload. This delta payload is the amount of information that a user wants to go from a previous state to the very next state, where the value of the next state is set by some other user. For example, suppose a team of developers is working on a document and one of them wants to change the name of the document, then in this case, the delta payload of the new update would be the name of the document set by that user. Hence, the time-traveling queries work on two core concepts, the appending update graph and the delta payload which contains information that is required to go from the previous state to the next state. With both of these, whenever a user submits a regular query, the query caches the present state of the document within the database. And we internally issue a time-traveling query for the current state, with the only upside being that the user can submit a non-time-traveling query faster since a cached version of the same is already stored in the database. Thus, using this cached version of the present state of the document, the user can apply a time-traveling query using the CID of the specific version they want to query in the document. The database will then set the CID provided by the user as the Target State, a state at which the query will stop and go back to the beginning of the document, known as the Genesis State. The query will then apply all its operations until it reaches back to the Target State. + +The main reason behind setting a Target state is because the Merkel CRDT is a single-direction graph, and it only points backward in time. But to apply all the updates of all the delta payloads from the genesis to the target state, we need the query to track the existence of the target state as the present state of the target version can be a function of multiple operations. We thus perform a two-step process where it starts from the target version, goes to the genesis state, and comes back to the version. And from this, we produce the current present or the actual external facing state, also known as the serialized state. + +## Limitations + +1. Relational Limitation: A user will not be able to apply a time-traveling query to a series of related documents relating to the document that they are applying the query. For example, a person has some books and a list of their respective authors. An author can have many books under their name, but one book can be associated with one author only. Now, if a user applies a time-traveling to a specific version at some point in time of a particular book, it will only be able to query the state of that book and not the related state of its correlated author. A regular query, on the other hand, can go in-depth and present its values or get the state of the book and its correlated author. However, with the time-traveling query, the user will not be able to query beyond the exact state to which the query is applied. + +2. Performance Limitation: As discussed earlier, the present state is stored as a cached version in the database, and based on this cached version, the current present state is computed. Hence the performance of the query depends on two factors: + + a. The size of the update graph of the document + + b. The number of updates that are between the Genesis state and the Target state. + +The larger the number of updates that exist between the Genesis state and the Target state, the longer it is going to take for the query to go back to the genesis state, perform its operations and come back to the target state. And hence, the time taken by the query to provide results would increase in proportion to the number of updates that are present between these two states. + + +## Future Outlook + +The future outlook for time-traveling queries focuses mainly on resolving the current limitations that this query faces. To navigate the relational limitation, the current data model being used the time-traveling query needs to be exposed to the underlying aspects of the Merkel CRDT data model. Here, taking the help of the example mentioned for this limitation in the previous section, the relationship between the author and their book can be expressed by using doc keys of two types. Therefore, book A, which has a particular doc key (doc key A) to represent it, and this book has its author B, will be represented by its different doc key (doc key B). So, whenever the user correlates that book A was published by author B, the user relates the doc keys in the relationship field of the update graph. + +For the performance limitations, snapshots can be the next step toward the elimination of the performance limitation. Currently, we keep a cached version of the present or the current state of the document. Once an update is made, this cached version will get replaced with the current version. Therefore, at any given point in time, there would only be a single cached version of the current state. However, developers can choose to trade this space to decrease the time taken for the query execution. This can be achieved by creating snapshots at various points in the update history. For a document undergoing millions of updates, snapshots can be taken at every 1000th update and a cached version of this snapshot can be created such that if we need to query the 2000th update, we just need to go back to the closest snapshot instead of having to go back to the Genesis state and then moving 2000 states to get to the Target state. For example, if we need to query the 1010th update, then we only need to execute 10 steps backward and 10 steps forward from the cached update, i.e., the 1000th update. Therefore, depending on the interval of the cache set by the user, for every 'x' number of updates, they would be required to execute 'x' number of steps after the closest cached version of the snapshot. + diff --git a/docs/website/references/_category_.json b/docs/website/references/_category_.json new file mode 100644 index 0000000000..cca3d1e4a7 --- /dev/null +++ b/docs/website/references/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "References", + "position": 4 + } + \ No newline at end of file diff --git a/docs/website/references/cli/_category_.json b/docs/website/references/cli/_category_.json new file mode 100644 index 0000000000..53661360ff --- /dev/null +++ b/docs/website/references/cli/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "CLI Specification", + "position": 2 +} \ No newline at end of file diff --git a/docs/website/references/cli/defradb.md b/docs/website/references/cli/defradb.md new file mode 100644 index 0000000000..3a38cb52a0 --- /dev/null +++ b/docs/website/references/cli/defradb.md @@ -0,0 +1,36 @@ +# defradb + +DefraDB Edge Database + +## Synopsis + +DefraDB is the edge database to power the user-centric future. + +Start a database node, issue a request to a local or remote node, and much more. + +DefraDB is released under the BSL license, (c) 2022 Democratized Data Foundation. +See https://docs.source.network/BSL.txt for more information. + + +## Options + +``` + -h, --help help for defradb + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +## SEE ALSO + +* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client +* [defradb init](defradb_init.md) - Initialize DefraDB's root directory and configuration file +* [defradb server-dump](defradb_server-dump.md) - Dumps the state of the entire database +* [defradb start](defradb_start.md) - Start a DefraDB node +* [defradb version](defradb_version.md) - Display the version information of DefraDB and its components + diff --git a/docs/website/references/cli/defradb_client.md b/docs/website/references/cli/defradb_client.md new file mode 100644 index 0000000000..81656acca2 --- /dev/null +++ b/docs/website/references/cli/defradb_client.md @@ -0,0 +1,39 @@ +# client + +Interact with a running DefraDB node as a client + +## Synopsis + +Interact with a running DefraDB node as a client. +Execute queries, add schema types, and run debug routines. + +## Options + +``` + -h, --help help for client +``` + +## Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +## SEE ALSO + +* [defradb](defradb.md) - DefraDB Edge Database +* [defradb client blocks](defradb_client_blocks.md) - Interact with the database's blockstore +* [defradb client dump](defradb_client_dump.md) - Dump the contents of a database node-side +* [defradb client peerid](defradb_client_peerid.md) - Get the peer ID of the DefraDB node +* [defradb client ping](defradb_client_ping.md) - Ping to test connection to a node +* [defradb client query](defradb_client_query.md) - Send a DefraDB GraphQL query request +* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB gRPC server +* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a running DefraDB instance + diff --git a/docs/website/references/cli/defradb_client_blocks.md b/docs/website/references/cli/defradb_client_blocks.md new file mode 100644 index 0000000000..9f1a50f296 --- /dev/null +++ b/docs/website/references/cli/defradb_client_blocks.md @@ -0,0 +1,28 @@ +# client blocks + +Interact with the database's blockstore + +## Options + +``` + -h, --help help for blocks +``` + +## Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +## SEE ALSO + +* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client +* [defradb client blocks get](defradb_client_blocks_get.md) - Get a block by its CID from the blockstore. + diff --git a/docs/website/references/cli/defradb_client_blocks_get.md b/docs/website/references/cli/defradb_client_blocks_get.md new file mode 100644 index 0000000000..2ddfcb8453 --- /dev/null +++ b/docs/website/references/cli/defradb_client_blocks_get.md @@ -0,0 +1,31 @@ +# client blocks get + +Get a block by its CID from the blockstore. + +``` +defradb client blocks get [CID] [flags] +``` + +## Options + +``` + -h, --help help for get +``` + +## Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +## SEE ALSO + +* [defradb client blocks](defradb_client_blocks.md) - Interact with the database's blockstore + diff --git a/docs/website/references/cli/defradb_client_dump.md b/docs/website/references/cli/defradb_client_dump.md new file mode 100644 index 0000000000..fdc2a38a3b --- /dev/null +++ b/docs/website/references/cli/defradb_client_dump.md @@ -0,0 +1,31 @@ +# client dump + +Dump the contents of a database node-side + +``` +defradb client dump [flags] +``` + +## Options + +``` + -h, --help help for dump +``` + +## Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +## SEE ALSO + +* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client + diff --git a/docs/website/references/cli/defradb_client_peerid.md b/docs/website/references/cli/defradb_client_peerid.md new file mode 100644 index 0000000000..cf3f175646 --- /dev/null +++ b/docs/website/references/cli/defradb_client_peerid.md @@ -0,0 +1,31 @@ +# client peerid + +Get the peer ID of the DefraDB node + +``` +defradb client peerid [flags] +``` + +## Options + +``` + -h, --help help for peerid +``` + +## Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +## SEE ALSO + +* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client + diff --git a/docs/website/references/cli/defradb_client_ping.md b/docs/website/references/cli/defradb_client_ping.md new file mode 100644 index 0000000000..6115c5f493 --- /dev/null +++ b/docs/website/references/cli/defradb_client_ping.md @@ -0,0 +1,31 @@ +# client ping + +Ping to test connection to a node + +``` +defradb client ping [flags] +``` + +## Options + +``` + -h, --help help for ping +``` + +## Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +## SEE ALSO + +* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client + diff --git a/docs/website/references/cli/defradb_client_query.md b/docs/website/references/cli/defradb_client_query.md new file mode 100644 index 0000000000..b602633b78 --- /dev/null +++ b/docs/website/references/cli/defradb_client_query.md @@ -0,0 +1,46 @@ +# client query + +Send a DefraDB GraphQL query request + +## Synopsis + +Send a DefraDB GraphQL query request to the database. + +A query request can be sent as a single argument. Example command: +defradb client query 'query { ... }' + +Or it can be sent via stdin by using the '-' special syntax. Example command: +cat request.graphql | defradb client query - + +A GraphQL client such as GraphiQL (https://github.com/graphql/graphiql) can be used to interact +with the database more conveniently. + +To learn more about the DefraDB GraphQL Query Language, refer to https://docs.source.network. + +``` +defradb client query [query request] [flags] +``` + +## Options + +``` + -h, --help help for query +``` + +## Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +## SEE ALSO + +* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client + diff --git a/docs/website/references/cli/defradb_client_rpc.md b/docs/website/references/cli/defradb_client_rpc.md new file mode 100644 index 0000000000..1044e78c2e --- /dev/null +++ b/docs/website/references/cli/defradb_client_rpc.md @@ -0,0 +1,34 @@ +# client rpc + +Interact with a DefraDB gRPC server + +## Synopsis + +Interact with a DefraDB gRPC server. + +## Options + +``` + --addr string gRPC endpoint address (default "0.0.0.0:9161") + -h, --help help for rpc +``` + +## Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +## SEE ALSO + +* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client +* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Interact with the P2P collection system +* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Interact with the replicator system + diff --git a/docs/website/references/cli/defradb_client_rpc_addreplicator.md b/docs/website/references/cli/defradb_client_rpc_addreplicator.md new file mode 100644 index 0000000000..a7c5c9fe7a --- /dev/null +++ b/docs/website/references/cli/defradb_client_rpc_addreplicator.md @@ -0,0 +1,37 @@ +# client rpc addreplicator + +Add a new replicator + +## Synopsis + +Use this command if you wish to add a new target replicator +for the p2p data sync system. + +``` +defradb client rpc addreplicator [flags] +``` + +## Options + +``` + -h, --help help for addreplicator +``` + +## Options inherited from parent commands + +``` + --addr string gRPC endpoint address (default "0.0.0.0:9161") + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +## SEE ALSO + +* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB gRPC server + diff --git a/docs/website/references/cli/defradb_client_rpc_p2pcollection.md b/docs/website/references/cli/defradb_client_rpc_p2pcollection.md new file mode 100644 index 0000000000..37edd5e76d --- /dev/null +++ b/docs/website/references/cli/defradb_client_rpc_p2pcollection.md @@ -0,0 +1,35 @@ +# client rpc p2pcollection + +Interact with the P2P collection system + +## Synopsis + +Add, delete, or get the list of P2P collections + +## Options + +``` + -h, --help help for p2pcollection +``` + +## Options inherited from parent commands + +``` + --addr string gRPC endpoint address (default "0.0.0.0:9161") + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +## SEE ALSO + +* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB gRPC server +* [defradb client rpc p2pcollection add](defradb_client_rpc_p2pcollection_add.md) - Add P2P collections +* [defradb client rpc p2pcollection getall](defradb_client_rpc_p2pcollection_getall.md) - Get all P2P collections +* [defradb client rpc p2pcollection remove](defradb_client_rpc_p2pcollection_remove.md) - Add P2P collections + diff --git a/docs/website/references/cli/defradb_client_rpc_p2pcollection_add.md b/docs/website/references/cli/defradb_client_rpc_p2pcollection_add.md new file mode 100644 index 0000000000..902ff41cb7 --- /dev/null +++ b/docs/website/references/cli/defradb_client_rpc_p2pcollection_add.md @@ -0,0 +1,36 @@ +# client rpc p2pcollection add + +Add P2P collections + +## Synopsis + +Use this command if you wish to add new P2P collections to the pubsub topics + +``` +defradb client rpc p2pcollection add [collectionID] [flags] +``` + +## Options + +``` + -h, --help help for add +``` + +## Options inherited from parent commands + +``` + --addr string gRPC endpoint address (default "0.0.0.0:9161") + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +## SEE ALSO + +* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Interact with the P2P collection system + diff --git a/docs/website/references/cli/defradb_client_rpc_p2pcollection_getall.md b/docs/website/references/cli/defradb_client_rpc_p2pcollection_getall.md new file mode 100644 index 0000000000..92d53377c0 --- /dev/null +++ b/docs/website/references/cli/defradb_client_rpc_p2pcollection_getall.md @@ -0,0 +1,36 @@ +# client rpc p2pcollection getall + +Get all P2P collections + +## Synopsis + +Use this command if you wish to get all P2P collections in the pubsub topics + +``` +defradb client rpc p2pcollection getall [flags] +``` + +## Options + +``` + -h, --help help for getall +``` + +## Options inherited from parent commands + +``` + --addr string gRPC endpoint address (default "0.0.0.0:9161") + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +## SEE ALSO + +* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Interact with the P2P collection system + diff --git a/docs/website/references/cli/defradb_client_rpc_p2pcollection_remove.md b/docs/website/references/cli/defradb_client_rpc_p2pcollection_remove.md new file mode 100644 index 0000000000..9f8214dc46 --- /dev/null +++ b/docs/website/references/cli/defradb_client_rpc_p2pcollection_remove.md @@ -0,0 +1,36 @@ +# client rpc p2pcollection remove + +Add P2P collections + +## Synopsis + +Use this command if you wish to remove P2P collections from the pubsub topics + +``` +defradb client rpc p2pcollection remove [collectionID] [flags] +``` + +## Options + +``` + -h, --help help for remove +``` + +## Options inherited from parent commands + +``` + --addr string gRPC endpoint address (default "0.0.0.0:9161") + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +## SEE ALSO + +* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Interact with the P2P collection system + diff --git a/docs/website/references/cli/defradb_client_rpc_replicator.md b/docs/website/references/cli/defradb_client_rpc_replicator.md new file mode 100644 index 0000000000..cdb87fad32 --- /dev/null +++ b/docs/website/references/cli/defradb_client_rpc_replicator.md @@ -0,0 +1,37 @@ +# client rpc replicator + +Interact with the replicator system + +## Synopsis + +Add, delete, or get the list of persisted replicators + +## Options + +``` + -c, --collection stringArray Define the collection for the replicator + -f, --full Set the replicator to act on all collections + -h, --help help for replicator +``` + +## Options inherited from parent commands + +``` + --addr string gRPC endpoint address (default "0.0.0.0:9161") + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +## SEE ALSO + +* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB gRPC server +* [defradb client rpc replicator delete](defradb_client_rpc_replicator_delete.md) - Delete a replicator +* [defradb client rpc replicator getall](defradb_client_rpc_replicator_getall.md) - Get all replicators +* [defradb client rpc replicator set](defradb_client_rpc_replicator_set.md) - Set a P2P replicator + diff --git a/docs/website/references/cli/defradb_client_rpc_replicator_delete.md b/docs/website/references/cli/defradb_client_rpc_replicator_delete.md new file mode 100644 index 0000000000..392481b3e9 --- /dev/null +++ b/docs/website/references/cli/defradb_client_rpc_replicator_delete.md @@ -0,0 +1,37 @@ +# client rpc replicator delete + +Delete a replicator + +## Synopsis + +Use this command if you wish to remove the target replicator +for the p2p data sync system. + +``` +defradb client rpc replicator delete [-f, --full | -c, --collection] [flags] +``` + +## Options + +``` + -h, --help help for delete +``` + +## Options inherited from parent commands + +``` + --addr string gRPC endpoint address (default "0.0.0.0:9161") + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +## SEE ALSO + +* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Interact with the replicator system + diff --git a/docs/website/references/cli/defradb_client_rpc_replicator_getall.md b/docs/website/references/cli/defradb_client_rpc_replicator_getall.md new file mode 100644 index 0000000000..79d891a5c9 --- /dev/null +++ b/docs/website/references/cli/defradb_client_rpc_replicator_getall.md @@ -0,0 +1,37 @@ +# client rpc replicator getall + +Get all replicators + +## Synopsis + +Use this command if you wish to get all the replicators +for the p2p data sync system. + +``` +defradb client rpc replicator getall [flags] +``` + +## Options + +``` + -h, --help help for getall +``` + +## Options inherited from parent commands + +``` + --addr string gRPC endpoint address (default "0.0.0.0:9161") + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +## SEE ALSO + +* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Interact with the replicator system + diff --git a/docs/website/references/cli/defradb_client_rpc_replicator_set.md b/docs/website/references/cli/defradb_client_rpc_replicator_set.md new file mode 100644 index 0000000000..5b94f1a1ab --- /dev/null +++ b/docs/website/references/cli/defradb_client_rpc_replicator_set.md @@ -0,0 +1,39 @@ +# client rpc replicator set + +Set a P2P replicator + +## Synopsis + +Use this command if you wish to add a new target replicator +for the p2p data sync system or add schemas to an existing one + +``` +defradb client rpc replicator set [-f, --full | -c, --collection] [flags] +``` + +## Options + +``` + -c, --collection stringArray Define the collection for the replicator + -f, --full Set the replicator to act on all collections + -h, --help help for set +``` + +## Options inherited from parent commands + +``` + --addr string gRPC endpoint address (default "0.0.0.0:9161") + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +## SEE ALSO + +* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Interact with the replicator system + diff --git a/docs/website/references/cli/defradb_client_schema.md b/docs/website/references/cli/defradb_client_schema.md new file mode 100644 index 0000000000..140c7fe635 --- /dev/null +++ b/docs/website/references/cli/defradb_client_schema.md @@ -0,0 +1,33 @@ +# client schema + +Interact with the schema system of a running DefraDB instance + +## Synopsis + +Make changes, updates, or look for existing schema types to a DefraDB node. + +## Options + +``` + -h, --help help for schema +``` + +## Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +## SEE ALSO + +* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client +* [defradb client schema add](defradb_client_schema_add.md) - Add a new schema type to DefraDB +* [defradb client schema patch](defradb_client_schema_patch.md) - Patch an existing schema type + diff --git a/docs/website/references/cli/defradb_client_schema_add.md b/docs/website/references/cli/defradb_client_schema_add.md new file mode 100644 index 0000000000..0909eb5789 --- /dev/null +++ b/docs/website/references/cli/defradb_client_schema_add.md @@ -0,0 +1,47 @@ +# client schema add + +Add a new schema type to DefraDB + +## Synopsis + +Add a new schema type to DefraDB. + +Example: add from an argument string: + defradb client schema add 'type Foo { ... }' + +Example: add from file: + defradb client schema add -f schema.graphql + +Example: add from stdin: + cat schema.graphql | defradb client schema add - + +To learn more about the DefraDB GraphQL Schema Language, refer to https://docs.source.network. + +``` +defradb client schema add [schema] [flags] +``` + +## Options + +``` + -f, --file string File to load a schema from + -h, --help help for add +``` + +## Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +## SEE ALSO + +* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a running DefraDB instance + diff --git a/docs/website/references/cli/defradb_client_schema_patch.md b/docs/website/references/cli/defradb_client_schema_patch.md new file mode 100644 index 0000000000..307e7b83fa --- /dev/null +++ b/docs/website/references/cli/defradb_client_schema_patch.md @@ -0,0 +1,49 @@ +# client schema patch + +Patch an existing schema type + +## Synopsis + +Patch an existing schema. + +Uses JSON PATCH formatting as a DDL. + +Example: patch from an argument string: + defradb client schema patch '[{ "op": "add", "path": "...", "value": {...} }]' + +Example: patch from file: + defradb client schema patch -f patch.json + +Example: patch from stdin: + cat patch.json | defradb client schema patch - + +To learn more about the DefraDB GraphQL Schema Language, refer to https://docs.source.network. + +``` +defradb client schema patch [schema] [flags] +``` + +## Options + +``` + -f, --file string File to load a patch from + -h, --help help for patch +``` + +## Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +## SEE ALSO + +* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a running DefraDB instance + diff --git a/docs/website/references/cli/defradb_init.md b/docs/website/references/cli/defradb_init.md new file mode 100644 index 0000000000..5b7f2071ce --- /dev/null +++ b/docs/website/references/cli/defradb_init.md @@ -0,0 +1,36 @@ +# init + +Initialize DefraDB's root directory and configuration file + +## Synopsis + +Initialize a directory for configuration and data at the given path. + +``` +defradb init [flags] +``` + +## Options + +``` + -h, --help help for init + --reinitialize Reinitialize the configuration file + --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") +``` + +## Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +## SEE ALSO + +* [defradb](defradb.md) - DefraDB Edge Database + diff --git a/docs/website/references/cli/defradb_server-dump.md b/docs/website/references/cli/defradb_server-dump.md new file mode 100644 index 0000000000..91641d1125 --- /dev/null +++ b/docs/website/references/cli/defradb_server-dump.md @@ -0,0 +1,32 @@ +# server-dump + +Dumps the state of the entire database + +``` +defradb server-dump [flags] +``` + +## Options + +``` + -h, --help help for server-dump + --store string Datastore to use. Options are badger, memory (default "badger") +``` + +## Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +## SEE ALSO + +* [defradb](defradb.md) - DefraDB Edge Database + diff --git a/docs/website/references/cli/defradb_start.md b/docs/website/references/cli/defradb_start.md new file mode 100644 index 0000000000..d393f7f3a5 --- /dev/null +++ b/docs/website/references/cli/defradb_start.md @@ -0,0 +1,46 @@ +# start + +Start a DefraDB node + +## Synopsis + +Start a new instance of DefraDB node. + +``` +defradb start [flags] +``` + +## Options + +``` + --email string Email address used by the CA for notifications (default "example@example.com") + -h, --help help for start + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr string Listener address for the p2p network (formatted as a libp2p MultiAddr) (default "/ip4/0.0.0.0/tcp/9171") + --peers string List of peers to connect to + --privkeypath string Path to the private key for tls (default "certs/server.crt") + --pubkeypath string Path to the public key for tls (default "certs/server.key") + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --tcpaddr string Listener address for the tcp gRPC server (formatted as a libp2p MultiAddr) (default "/ip4/0.0.0.0/tcp/9161") + --tls Enable serving the API over https + --valuelogfilesize ByteSize Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1GiB) +``` + +## Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +## SEE ALSO + +* [defradb](defradb.md) - DefraDB Edge Database + diff --git a/docs/website/references/cli/defradb_version.md b/docs/website/references/cli/defradb_version.md new file mode 100644 index 0000000000..de817ade10 --- /dev/null +++ b/docs/website/references/cli/defradb_version.md @@ -0,0 +1,33 @@ +# version + +Display the version information of DefraDB and its components + +``` +defradb version [flags] +``` + +## Options + +``` + -f, --format string Version output format. Options are text, json + --full Display the full version information + -h, --help help for version +``` + +## Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +## SEE ALSO + +* [defradb](defradb.md) - DefraDB Edge Database + diff --git a/docs/website/references/query-specification/_category_.json b/docs/website/references/query-specification/_category_.json new file mode 100644 index 0000000000..15871c9ae9 --- /dev/null +++ b/docs/website/references/query-specification/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Query specification", + "position": 1 +} \ No newline at end of file diff --git a/docs/website/references/query-specification/aggregate-functions.md b/docs/website/references/query-specification/aggregate-functions.md new file mode 100644 index 0000000000..a22690c665 --- /dev/null +++ b/docs/website/references/query-specification/aggregate-functions.md @@ -0,0 +1,47 @@ +--- +sidebar_label: Aggregate Functions +sidebar_position: 115 +--- + +# Aggregate Functions + +The most common use case of grouping queries is to compute some aggregate function over the sub-group. Like the special `_group` field, aggregate functions are defined and returned using special fields. These fields prefix the target field name with the name of the aggregate function you wish to apply. If we had the field `rating`, we could access the average value of all sub-group ratings by including the special field `_avg { rating }` in our return object. The available aggregate functions and their associated scalars can be found above in `Table 3`. + +The special aggregate function fields' format is the function name and the field name as its sub-elements. Specifically: `_$function { $field }`, where `$function` is the list of functions from `Table 3`, and `$field` is the field name to which the function will be applied to. E.g., applying the `max` function to the `rating` field becomes `_max { rating }`. + +Let us augment the previous grouped books by genre example and include an aggregate function on the sub-groups ratings. +```graphql +{ + Books(filter: {author: {name: {_like: "John%"}}}, groupBy: [genre]) { + genre + _avg { + rating + points + } + _group { + title + rating + } + } +} +``` + +Here we return the average of all the ratings of the books whose authors name begins with "John" grouped by the genres. + +We can also use simpler queries, without any `groupBy` clause, and still use aggregate functions. The difference is, instead of applying the aggregate function to only the sub-group, it applies it to the entire result set. + +Let's simply count all the objects returned by a given filter. +```graphql +{ + _count(Books: {filter: {rating: {_gt: 3.5}}}) +} +``` +This returns an array of objects that includes the respective books title, along with the repeated `_count` field, which is the total number of objects that match the filter. + +> Note, the special aggregate field `_count` has no subfields selected, so instead of applying the `count` function to a field, it applies to the entire object. This is only possible with the `count` function; all the other aggregate functions must specify their target field using the correct field name selection. + +We can further simplify the above count query by including only the `_count` field. If we ***only*** return the `_count` field, then a single object is returned, instead of an array of objects. + +DefraDB also supports applying aggregate functions to relations just like we do fields. However, only the `count` function is available directly on the related object type. + + diff --git a/docs/website/references/query-specification/aliases.md b/docs/website/references/query-specification/aliases.md new file mode 100644 index 0000000000..acd5db124d --- /dev/null +++ b/docs/website/references/query-specification/aliases.md @@ -0,0 +1,51 @@ +--- +sidebar_label: Aliases +sidebar_position: 90 +--- +# Aliases + +If the structure of a returned query is not ideal for a given application, you can rename fields and entire query results to suit your use case. This is particularly useful, and sometimes necessary when using multiple queries within a single request. + +```graphql +{ + topTenBooks: Books(sort: {rating: DESC}, limit: 10) { + title + genre + description + } +} +``` + +In the above example, the books result is renamed to `topTenBooks`, which can be useful for semantic reasoning about the request, and for organizational purposes. It is suggested in production deployments to name your queries properly. + +```graphql +{ + topTenBooks: Books(sort: {rating: DESC}, limit: 10) { + title + genre + description + } + + bottomTenBooks: Books(sort: {rating: ASC}, limit: 10) { + title + genre + description + } +} +``` + +In this query the two returned results are named `topTenBooks` and `bottomTenBooks` respectively. When dealing with multiple queries of the same type (e.g., `books`), it is required to alias one from another. + +Additionally, we can alias individual fields within our returned types. Aliasing a field works the same way as aliasing a query. + +```graphql +{ + Books { + name: title + genre + description + } +} +``` + +In the above example, we have renamed the `title` field to `name`. Unlike query aliases, there is no requirement in any context because name collisions are impossible within a defined query return type. diff --git a/docs/website/references/query-specification/collections.md b/docs/website/references/query-specification/collections.md new file mode 100644 index 0000000000..08003c5a31 --- /dev/null +++ b/docs/website/references/query-specification/collections.md @@ -0,0 +1,9 @@ +--- +sidebar_label: Collections +sidebar_position: 40 +--- +# Collections + +A **collection** is like a SQL table, which represents a group of documents with similar structures. Every developer-defined type is attached to a collection. + +Each collection gets an auto-generated query field, allowing users and developers to query, filter, select, and interact with documents in different ways. \ No newline at end of file diff --git a/docs/website/references/query-specification/database-api.md b/docs/website/references/query-specification/database-api.md new file mode 100644 index 0000000000..e2cec7d671 --- /dev/null +++ b/docs/website/references/query-specification/database-api.md @@ -0,0 +1,113 @@ +--- +sidebar_label: Database API +sidebar_position: 160 +--- +# Database API + +So far, all the queries and mutations that have been discussed were specific to the stored and managed developer or user-created objects. However, that is only one aspect of DefraDB's GraphQL API. The other part of DefraDB GraphQL API is the auxiliary APIs, which include MerkleCRDT Traversal, Schema Management, and more. + +## MerkleCRDTs + +All objects in DefraDB are stored in MerkleCRDTs (see [Merkle CRDT Guide](../../guides/merkle-crdt.md)). These MerkleCRDTs are represented as a series of small updates connected in a MerkleDAG. The MerkleDAG is a Merklized version of a DAG (Directed Acyclical Graph), which means that each node in the DAG references a parent node through some kind of Content Identifier (CID). + +The image below shows an example structure of a MerkleDAG. + +![](https://mvpworkshop.co/wp-content/uploads/2021/01/ipfs-inarticle7.jpeg) + +The `Head` CID represents the "current" or "latest" state of a MerkleDAG. + +DefraDB allows you to query, traverse, and validate the DAG structure, allowing for self-verifying data structures. + +In DefraDB Database API, DAG nodes are represented as `Commit`, `CommitLink`, and `Delta` types. They are defined as shown below: + +```graphql +# Commit is an individual node in a CRDTs MerkleDAG +type Commit { + cid: String // cid is the Content Identifier of this commit + height: Int // height is the incremental version of the current commit + delta: Delta // delta is the delta-state update generated by a CRDT mutation + previous: [Commit] // previous is the links to the previous node in the MerkleDAG + links: [CommitLink] // links are any additional commits this commit may reference. +} + +# CommitLink is a named link to a commit +type CommitLink { + name: String // name is the name of the CommitLink + commit: Commit // commit is the linked commit +} + +# Delta is the differential state change from one node to another +type Delta { + payload: String // payload is a base64 encoded byte-array. +} +``` + +To query the latest commit of an object (with id: '123'): +```graphql +query { + latestCommits(docid: "123") { + cid + height + delta { + payload + } + } +} +``` + +To query all the commits of an object (with id: '123'): +```graphql +query { + allCommits(docid: "123") { + cid + height + delta { + payload + } + } +} +``` + +To query a specific commit: +```graphql +query { + Commits(cid: 'Qm123') { + cid + height + delta { + payload + } + } +} +``` + +In addition to using `Commits` specific queries, include commit version sub-fields in object queries. + +```graphql +query { + User { + _key + name + age + + _version { + cid + height + } + } +} +``` + +The above example shows how to query for the additional `_version` field that is generated automatically for each added schema type. The `_version` has the same execution as `latestCommits`. + +Both `_version` and `latestCommits` return an array of `Commits` types because the `HEAD` of the MerkleDAG can point to more than one DAG node. This is caused by two concurrent updates to the DAG at the same height. The DAG usually has a single head. However, it can also have multiple heads. + +Commits queries also work with aggregates, grouping, limit, offset, order, dockey, cid, and depth +There is __typename introspection keyword that works on all queries that does not appear to be documented anywhere, for example: + +```graphql +commits(dockey: "bae-52b9170d-b77a-5887-b877-cbdbb99b009f") { + cid + __typename +} +``` diff --git a/docs/website/references/query-specification/execution-flow.md b/docs/website/references/query-specification/execution-flow.md new file mode 100644 index 0000000000..61d405469a --- /dev/null +++ b/docs/website/references/query-specification/execution-flow.md @@ -0,0 +1,43 @@ +--- +sidebar_label: Execution Flow +sidebar_position: 140 +--- +# Execution Flow + +Understanding the execution flow of a query can help understand its structure, and help you with your queries. Query execution is broken down into the following three phases: +- Parsing +- Planning +- Executing + +## Parsing Phase + +The parsing phase parses the query as a string and returns a structured Abstract Syntax Tree (AST) representation. It also does a semantic validation of the structure against the schema. + +## Planning Phase + +The planning phase analyzes the query, the storage structure, and any additional indexes to determine query execution. This phase is highly dependant on the deployment environment and underlying storage engine as it uses available features and structure to provide optimal performance. Specific schemas automatically create certain secondary indexes. The planning phase automatically uses available custom secondary indexes created by you. + +## Execution Phase + +The execution phase does data scanning, filtering, and formatting. This phase has a deterministic process towards the steps taken to produce results. This is due to the priority an argument and its parameters have over another. + +The priority order of arguments is as follows: + +1. filter -> groupBy: Filtered Data +1. groupBy -> aggregate: Subgroups +1. aggregate -> having: Subgroups +1. having -> order: Filtered Data +1. order -> limit: Ordered Data + +Each step has a specific purpose as described here. + +1. `filter` argument breaks down the target collection (based on provided parameters and fields) into the output result set. +1. `groupBy` argument divides the result set further into subgroups across potentially several dimensions. +1. `aggregate` phase processes a subgroup's given fields. +1. `having` argument filters the data based on the grouped fields or aggregate results. +1. `order` argument structures the result set based on the ordering (ascending or descending) of one or more field values. +1. `limit` argument and its associated arguments restrict the number of the finalized, filtered, ordered result set. + +See the image below for an example of the execution order: + +![](https://i.imgur.com/Yf0KJ5A.png) \ No newline at end of file diff --git a/docs/website/references/query-specification/filtering.md b/docs/website/references/query-specification/filtering.md new file mode 100644 index 0000000000..e19dc94961 --- /dev/null +++ b/docs/website/references/query-specification/filtering.md @@ -0,0 +1,213 @@ +--- +sidebar_label: Filtering +sidebar_position: 50 +--- +# Filtering + +Filtering is used to screen data entries containing the specified fields and predicates (including compound predicates) out of a collection of documents using conditional keywords like `_and`, `_or`, `_not`. To accomplish this, the `filter` keyword can be applied as an argument to root level fields and subfields. + +An empty `filter` object is equivalent to no filters being applied. Hence, the output will return all books. The following example displays an empty filter being applied on the root level field. + +```graphql +{ + Books(filter: {}) { + title + genre + description + } +} +``` + +Some filtering options depend on the available indexes on a field. However, we will not be discuss them in this section. + +To apply a filter to a specific field, we can specify it within the filter object. The example below only returns books with the title “A Painted House”. + +```graphql +{ + Books(filter: { title: { _eq: "A Painted House" }}) { + title + genre + description + } +} +``` + +We can apply filters to all or multiple fields available. + +**NOTE:** Each additional field listed in the filter object implies to a conditional AND relation. + +```graphql +{ + Books(filter: { title: {_eq: "A Painted House"}, genre: {_eq: "Thriller" }}) { + title + genre + description + } +} +``` + +The above query only returns books with the title “A Painted House” AND genre “Thriller”. + +Filters can also be applied on subfields that have relational objects within them. For example: an object Book, with an Author field, has a many-to-one relationship to the Author object. Then we can query and filter based on the value of the Author field. + +```graphql +{ + Books(filter: { genre: {_eq: "Thriller"}, author: {name: {_eq: "John Grisham"}}}) { + title + genre + description + Author { + name + bio + } + } +} +``` + +This query returns all books authored by “John Grisham” with the genre “Thriller”. + +Filtering from the root object level, compared to the sub-object level results in different semantics. Root filters that apply to sub-objects (aka `author` section of the above query), only returns the root object type if both the root object and sub-object conditions are fulfilled. For example, if the author filter condition is satisfied, the above code snippet only returns books. + +This applies to both single sub-objects and array sub-objects, i.e., if we apply a filter on a sub-object array, the output **only** returns the root object, if at least one sub-object matches the given filter instead of requiring **every** sub-object to match the query. For example, the following query will only return authors, if they have **at least** one thriller genre based book. + +```graphql +{ + Authors(filter: {book: {genre: {_eq: "Thriller"}}}) { + name + bio + } +} +``` + +Additionally, in the selection set, if we include the sub-object array we are filtering on, the filter is then implicitly applied unless otherwise specified. + +In the query snippet above, let's add `books` to the selection set using the query below . +```graphql +{ + Authors(filter: {book: {genre: {_eq: "Thriller"}}}) { + name + bio + books { + title + genre + } + } +} +``` + +Here, the `books` section will only contain books that match the root object filter, namely, `{genre: {_eq: "Thriller"}}`. If we wish to return the same authors from the above query and include *all* their books, we can add an explicit filter directly to the sub-object instead of the sub-filters. + +```graphql +{ + Authors(filter: {book: {genre: {_eq: "Thriller"}}}) { + name + bio + books(filter: {}) { + title + genre + } + } +} +``` + +In the code snippet above, the output returns authors who have at least one book with the genre "Thriller". The output also returns **all** the books written by these selected authors (not just the thrillers). + +Filters applied solely to sub-objects, which are only applicable for array types, are computed independently from the root object filters. + +```graphql +{ + Authors(filter: {name: {_eq: "John Grisham"}}) { + name + bio + books(filter: { genre: {_eq: "Thriller" }}) { + title + genre + description + } + } +} +``` + +The above query returns all authors with the name “John Grisham”, then filters and returns all the returned authors' books with the genre “Thriller”. This is similar to the previous query, but an important distinction is that it will return all the matching author objects regardless of the book's sub-object filter. + +The first query, will only return an output if there are any Thriller books written by the author “John Grisham” (using AND condition i.e., both conditions have to be fulfilled). The second query always returns all authors named “John Grisham”, and their Thriller genre books. + +So far, we have only seen examples of EXACT string matches, but we can also filter using scalar value type or object fields. For e.g., booleans, integers, floating points, etc. Also, comparison operators like: Greater Than, Less Than, Equal To or Greater than, Less Than or Equal To, EQUAL can be used. + +Let's query for all books with a rating greater than or equal to 4. + +```graphql +{ + Books(filter: { rating: { _gte: 4 } }) { + title + genre + description + } +} +``` + +**NOTE:** In the above example, the expression contains a new scalar type object `{ _gte: 4 }`. While previously, where we had a simple string value. If a scalar type field has a filter with an object value, then that object's first and only key must be a comparison operator like `_gte`. If the filter is given a simple scalar value like “John Grisham”, “Thriller”, or FALSE, then the default operator that should be used is `_eq` (EQUAL). The following table displays a list of available operators: + + +| Operator | Description | +| -------- | -------- | +| `_eq` | Equal to | +| `_neq` | Not Equal to | +| `_gt` | Greater Than | +| `_gte` | Greater Than or Equal to | +| `_lt` | Less Than | +| `_lte` | Less Than or Equal to | +| `_in` | In the List | +| `_nin` | Not in the List | +| `_like` | Like Sub-String | +|`_nlike` | Unlike Sub-String | +###### Table 1. Supported operators. + +The table below displays the operators that can be used for every value type: + + +| Scalar Type | Operators | +| -------- | -------- | +| String | `_eq, _neq, _like, _in, _nin` | +| Integer | `_eq, _neq, _gt, _gte, _lt, _lte, _in, _nin` | +| Floating Point | `_eq, _neq, _gt, _gte, _lt, _lte, _in, _nin` | +| Boolean | `_eq, _neq, _in, _nin` | +| DateTime | `_eq, _neq, _gt, _gte, _lt, _lte, _in, _nin` | +###### Table 2. Operators supported by Scalar types. + +There are 3 types of conditional keywords, i.e, `_and`, `_or`, and `_not`. Conditional keywords like `_and` and `_or` are used when we need to apply filters on multiple fields simultaneously. The `_not` conditional keyword only accepts an object. + +The code snippet below queries all books that are a part of the Thriller genre, or have a rating between 4 to 5. + +```graphql +{ + Books( + filter: { + _or: [ + {genre: {_eq: "Thriller"}}, + { _and: [ + {rating: { _gte: 4 }}, + {rating: { _lte: 5 }}, + ]}, + ] + } + ) + title + genre + description +} +``` + +An important thing to note about the above query is the `_and` conditional. Even though AND is assumed, if we have two filters on the same field, we MUST specify the `_and` operator. This is because JSON objects cannot contain duplicate fields. + +>**Invalid**: +`filter: { rating: { _gte: 4 }, rating { _lte: 5 } }` +>**Valid**: +`filter: { _and: [ {rating: {_gte: 4}}, {rating: {_lte: 5}} ]}` + +The `_not` conditional accepts an object instead of an array. + +> Filter all objects that *do not* have the genre "Thriller" +> `filter: { _not: { genre: { _eq: "Thriller" } } }` + +*The`_not` operator should only be used when the available filter operators like `_neq` do not fit the use case.* diff --git a/docs/website/references/query-specification/grouping.md b/docs/website/references/query-specification/grouping.md new file mode 100644 index 0000000000..babbc67b16 --- /dev/null +++ b/docs/website/references/query-specification/grouping.md @@ -0,0 +1,83 @@ +--- +sidebar_label: Grouping +sidebar_position: 110 +--- +# Grouping + +Grouping allows a collection of results from a query to be "grouped" into sections based on some field. These sections are called sub-groups, and are based on the equality of fields within objects, resulting in clusters of groups. Any object field may be used to group objects together. Additionally, multiple fields may be used in the group by clause to further segment the groups over multiple dimensions. + +Once one or more group by fields have been selected using the `groupBy` argument, which accepts an array of length one or more, you may only access certain fields in the return object. Only the indicated `groupBy` fields and aggregate function results may be included in the result object. If you wish to access the sub-groups of individual objects, a special return field called `_group` is available. This field matches the root query type, and can access any field in the object type. + +In the example below, we are querying for all the books whose author's name begins with 'John'. The results will then be grouped by genre, and will return the genre name and the sub-groups `title` and `rating`. +```graphql +{ + Books(filter: {author: {name: {_like: "John%"}}}, groupBy: [genre]) { + genre + _group { + title + rating + } + } +} +``` + +In the above example, we can see how the `groupBy` argument is provided and that it accepts an array of field names. We can also see how the special `_group` field can be used to access the sub-group elements. + +It's important to note that in the above example, the only available field from the root `Book` type is the `groupBy` field `genre`, along with the special group and aggregate proxy fields. + +#### Grouping on Multiple Fields +As mentioned, we can include any number of fields in the `groupBy` argument to segment the data further. Which can then also be accessed in the return object, as demonstrated in the example below: +```graphql +{ + Books(filter: {author: {name: {_like: "John%"}}}, groupBy: [genre, rating]) { + genre + rating + _group { + title + description + } + } +} +``` + +#### Grouping on Related Objects +Objects often have related objects within their type definition indicated by the `@relation` directive on the respective object. We can use the grouping system to split results over the related object and the root type fields. + +Like any other group query, we are limited in which fields we can access indicated by the `groupBy` argument's fields. If we include a subtype that has a `@relation` directive in the `groupBy` list, we can access the entire relations fields. + +Only "One-to-One" and "One-to-Many" relations can be used in a `groupBy` argument. + +Given a type definition defined as: +```graphql +type Book { + title: String + genre: String + rating: Float + author: Author @relation +} + +type Author { + name: String + written: [Book] @relation +} +``` + +We can create a group query over books and their authors, as demonstrated in the example below: +```graphql +{ + Books(groupBy: [author]) { + Author { + name + } + _group { + title + genre + rating + } + } +} +``` + +As you can see, we can access the entire `Author` object in the main return object without having to use any special proxy fields. + +Group operations can include any combination, single or multiple, individual field or related object, that a developer needs. diff --git a/docs/website/references/query-specification/limiting-and-pagination.md b/docs/website/references/query-specification/limiting-and-pagination.md new file mode 100644 index 0000000000..fd5577e2c3 --- /dev/null +++ b/docs/website/references/query-specification/limiting-and-pagination.md @@ -0,0 +1,33 @@ +--- +sidebar_label: Limiting and Pagination +sidebar_position: 70 +--- +# Limiting and Pagination + +After filtering and sorting a query, we can then limit and skip elements from the returned set of objects. + +Let us get the top 10 rated books: +```graphql +{ + Books(sort: {rating: DESC}, limit: 10) { + title + genre + description + } +} +``` + +The `limit` function accepts the maximum number of items to return from the resulting set. Next, we can `skip` elements in the set, to get the following N objects from the return set. Both these functions can be used to create a pagination system, where we have a limit on number of items per page, and can skip through pages as well. + +Let's get the *next* top 10 rated books after the previous query: +```graphql +{ + Books(sort: {rating: DESC}, limit:10, offset: 10) { + title + genre + description + } +} +``` + +Limits and offsets can be combined to create several different pagination methods. \ No newline at end of file diff --git a/docs/website/references/query-specification/mutation-block.md b/docs/website/references/query-specification/mutation-block.md new file mode 100644 index 0000000000..255c3b40b1 --- /dev/null +++ b/docs/website/references/query-specification/mutation-block.md @@ -0,0 +1,156 @@ +--- +sidebar_label: Mutation Block +sidebar_position: 150 +--- +# Mutation Block + +Mutations are the `write` side of the DefraDB Query Language. They rely on the query system to function properly. Updates, upserts and deletes, all require filtering and finding data before taking action. + +The data and payload format that mutations use is fundamental to maintaining the designed structure of the database. All mutation definitions are generated for each defined type in the Database. This is similar to the read query system. + +Mutations are similar to SQL `INSERT INTO ...` or `UPDATE` statements. Much like the Query system, all mutations exist inside a `mutation { ... }` block. Several mutations can be run at the same time, independently of one another. + +## Insert + +Insert is used to create new documents from scratch. This involves many necessary steps to ensure all the data is structured properly and verifiable. From a developer's perspective, it's the easiest of all the mutations as it doesn't require any queries or document lookups before execution. + +```graphql +type Book { ... } + +mutation { + create_Book(input: createBookInput) [Book] +} +``` + +The above example displays the general structure of an insert mutation. You call the `create_TYPE` mutation, with the given input. + +### Input Object Type + +All mutations use a typed input object to update the data. + +The following is an example with a full type and input object: + +```graphql +type Book { + title: String + description: String + rating: Float +} + +mutation { + create_Book(input: { + title: "Painted House", + description: "The story begins as Luke Chandler ...", + rating: 4.9 + }) { + title + description + rating + } +} +``` + +The above is a simple example of creating a Book using an insert mutation. Additionally, we can see that much like the Query functions, we can select the fields we want to be returned here. + +The generated insert mutation returns the same type it creates, in this case, a Book type. So we can easily include all the fields as a selection set so that we can return them. + +More specifically, the return type is of type `[Book]`. So we can create and return multiple books at once. + +## Update + +Updates are distinct from Inserts in several ways. Firstly, it relies on a query to select the correct document or documents to update. Secondly, it uses a different payload system. + +Update filters use the same format and types from the Query system. Hence, it easily transferable. + +The structure of the generated update mutation for a `Book` type is given below: +```graphql +mutation { + update_Book(dockey: ID, filter: BookFilterArg, input: updateBookInput) [Book] +} +``` + +See the structure and syntax of the filter query above. You can also see an additional field `id`, thawhich will supersede the `filter`; this makes it easy to update a single document by a given ID. + +The input object type is the same for both `update_TYPE` and `create_TYPE` mutations. + +Here's an example. +```json +{ + name: "John", + rating: nil +} +``` + +This update sets the `name` field to "John" and deletes the `rating` field value. + +Once we create our update, and select which document(s) to update, we can query the new state of all documents affected by the mutation. This is because our update mutation returns the type it mutates. + +A basic example is provided below: +```graphql +mutation { + update_Book(dockey: '123', input: {name: "John"}) { + _key + name + } +} + +``` + +Here, we can see that after applying the mutation, we return the `_key` and `name` fields. We can return any field from the document (not just the updated ones). We can even return and filter on related types. + +Beyond updating by an ID or IDs, we can use a query filter to select which fields to apply our update to. This filter works the same as the queries. + +```graphql +mutation { + update_Book(filter: {rating: {_le: 1.0}}, input: {rating: 1.5}) { + _key + rating + name + } +} +``` + +Here, we select all documents with a rating less than or equal to 1.0, update the rating value to 1.5, and return all the affected documents `_key`, `rating`, and `name` fields. + +For additional filter details, see the above `Query Block` section. + + +## Delete + +Deleting mutations allow developers and users to remove objects from collections. You can delete using specific Document Keys, a list of doc keys, or a filter statement. + +The document selection interface is identical to the `Update` system. Much like the update system, we can return the fields of the deleted documents. + +The structure of the generated delete mutation for a `Book` type is given below: +```graphql +mutation { + delete_Book(dockey: ID, ids: [ID], filter: BookFilterArg) [Book] +} +``` + +Here, we can delete a document with ID '123': +```graphql +mutation { + delete_User(dockey: '123') { + _key + name + } +} +``` + +This will delete the specific document, and return the `_key` and `name` for the deleted document. + +DefraDB currently uses a Hard Delete system, which means that when a document is deleted, it is completely removed from the database. + +Similar to the Update system, you can use a filter to select which documents to delete, as shown below: + +```graphql +mutation { + delete_User(filter: {rating: {_gt: 3}}) { + _key + name + } +} +``` + +Here, we are deleting all the matching documents (documents with a rating greater than 3). diff --git a/docs/website/references/query-specification/query-block.md b/docs/website/references/query-specification/query-block.md new file mode 100644 index 0000000000..81ff227553 --- /dev/null +++ b/docs/website/references/query-specification/query-block.md @@ -0,0 +1,9 @@ +--- +sidebar_label: Query Block +sidebar_position: 30 +--- +# Query Block + +Query blocks are read-only GraphQL operations designed only to request information from the database, without the ability to mutate the database state. They contain multiple subqueries which are executed concurrently, unless there is some variable dependency between them. + +Queries support database query operations such as filtering, sorting, grouping, skipping/limiting, aggregation, etc. These query operations can be used on different GraphQL object levels, mostly on fields that have some relation or embedding to other objects. \ No newline at end of file diff --git a/docs/website/references/query-specification/query-language-overview.md b/docs/website/references/query-specification/query-language-overview.md new file mode 100644 index 0000000000..17b54aae55 --- /dev/null +++ b/docs/website/references/query-specification/query-language-overview.md @@ -0,0 +1,19 @@ +--- +sidebar_label: Query Language Overview +sidebar_position: 10 +--- +# Query Language Overview + +The DefraDB query language (DQL) is a GraphQL defined API which is used to access and query data residing inside a DefraDB node. + +[GraphQL](https://graphql.org) is an open-source query language for APIs, built for making APIs fast, flexible, and developer friendly. Databases such as [DGraph](https://dgraph.io/) and [Fauna](https://fauna.com) use GraphQL API as a query language to read and write data to/from the database. +- DGraph is a distributed, high throughput graph database. +- Fauna is a transactional database delivered as a secure, web-native API GraphQL. + +DefraDB (while using GraphQL) is designed as a document storage database, unlike DGraph and Fauna. DQL exposes every functionality of the database directly, without the need for any additional APIs. The functionalities include: +- Reading, writing, and modifying data. +- Describing data structures, schemas, and architecting data models (via index's and other schema independent, application-specific requirements). + +**Exception**: DefraDBs PeerAPI is used to interact with other databases and with the underlying CRDTs (for collaborative text editing). + +Our initial design relies only on the currently available GraphQL specification (version tagged June 2018 edition). Initially, the GraphQL Query Language will utilize standard GraphQL Schemas, with any additional directives exposed by DefraDB. DefraDBs CRDT types will initially be automatically mapped to GraphQL types. \ No newline at end of file diff --git a/docs/website/references/query-specification/relationships.md b/docs/website/references/query-specification/relationships.md new file mode 100644 index 0000000000..bfecbf329e --- /dev/null +++ b/docs/website/references/query-specification/relationships.md @@ -0,0 +1,89 @@ +--- +sidebar_label: Relationships +sidebar_position: 80 +--- +# Relationships + +DefraDB supports a number of common relational models that an application may need. These relations are expressed through the Document Model, which has a few differences from the standard SQL model. There are no manually created `Join Tables` which track relationships. Instead, the non-normative nature of NoSQL Document objects allows us to embed and resolve relationships as needed automatically. + +Relationships are defined through the Document Schemas, using a series of GraphQL directives, and inferencing. They are always defined on both sides of the relation, meaning both objects involved in the relationship. + +#### One-to-One +The simplest relationship is a "one-to-one" which directly maps one document to another. The code below defines a one-to-one relationship between the `Author` and their `Address`: + +```graphql +type Author { + name: String + address: Address @primary +} + +type Address { + number: Integer + streetName: String + city: String + postal: String + author: Author +} +``` + +The types of both objects are included and DefraDB infers the relationship. As a result: +- Both objects which can be queried separately. +- Each object provides field level access to its related object. + +The notable distinction of "one-to-one" relationships is that only the DocKey of the corresponding object is stored. + +On the other hand, if you simply embed the Address within the Author type without the internal relational system, you can include the `@embed` directive, which will embed it within. Objects embedded inside another using the `@embed` directive do not expose a query endpoint, so they can *only* be accessed through their parent object. Additionally they are not assigned a DocKey. + +#### One-to-Many +A "one-to-many" relationship allows us to relate several objects of one type, to a single instance of another. + +Let us define a one-to-many relationship between an author and their books below. This example differs from the above relationship example because we relate the author to an array of books, instead of a single address. + +```graphql +type Author { + name: String + books: [Book] +} + +type Book { + title: String + genre: String + description: String + author: Author +} +``` + +In this case, the books object is defined within the Author object to be an array of books, indicating that *one* Author type has a relationship to *many* Book types. Internally, much like the one-to-one model, only the DocKeys are stored. However, the DocKey is only stored on one side of the relationship (the child type). In this example, only the Book type keeps a reference to its associated Author DocKey. + +#### Many-to-Many + +*to be updated* + +#### Multiple Relationships + +It is possible to define a collection of different relationship models. Additionally, we can define multiple relationships within a single type. Relationships containing unique types, can simply be added to the types without issue. Like the following: +```graphql +type Author { + name: String + address: Address + books: [Book] @relation("authored_books") @index +} +``` + +However, in case of multiple relationships using the *same* types, you have to annotate the differences. You can use the `@relation` directive to be explicit. +```graphql +type Author { + name: String + written: [Book] @relation(name: "written_books") + reviewed: [Book] @relation(name: "reviewed_books") +} + +type Book { + title: String + genre: String + author: Author @relation(name: "written_books") + reviewedBy: Author @relation(name: "reviewed_books") +} +``` + +Here we have two relations of the same type. By default, their association would conflict because internally, type names are used to specify relations. We use the `@relation` to add a custom name to the relation. `@relation` can be added to any relationship, even if it's a duplicate type relationship. It exists to be explicit, and to change the default parameters of the relation. \ No newline at end of file diff --git a/docs/website/references/query-specification/sorting-and-ordering.md b/docs/website/references/query-specification/sorting-and-ordering.md new file mode 100644 index 0000000000..444a244905 --- /dev/null +++ b/docs/website/references/query-specification/sorting-and-ordering.md @@ -0,0 +1,136 @@ +--- +sidebar_label: Sorting and Ordering +sidebar_position: 60 +--- +# Sorting and Ordering + +Sorting is an integral part of any Database and Query Language. The sorting syntax is similar to filter syntax, in that we use objects, and sub-objects to indicate sorting behavior, instead of filter behavior. + +The query to find all books ordered by their latest published date: +```graphql +{ + Books(order: { published_at: DESC}) { + title + description + published_at + } +} +``` +The syntax indicates: +- The field we wanted to sort on `published_at` +- The direction we wanted to order by `descending`. + +Sorting can be applied to multiple fields in the same query. The sort order is same as the field order in the sorted object. + +The query below finds all books ordered by earliest published date and then by descending order of titles. +```graphql +{ + Books(order: { published_at: ASC, title: DESC }) { + title + genre + description + } +} +``` + +Additionally, you can sort sub-object fields along with root object fields. + +The query below finds all books ordered by earliest published date and then by the latest authors' birthday. +```graphql +{ + Books(order: { published_at: ASC, Author: { birthday: DESC }}) { + title + description + published_at + Author { + name + birthday + } + } +} +``` + +Sorting multiple fields simultaneously is primarily driven by the first indicated sort field (primary field). In the query above, it is the “published_at” date. The following sort field (aka, secondary field), is used in the case that more than one record has the same value for the primary sort field. + +Assuming there are more than two sort fields, in that case, the same behavior applies, except the primary, secondary pair shifts by one element. Hence, the 2nd field is the primary, and the 3rd is the secondary, until we reach the end of the sort fields. + +In case of a single sort field and objects with same value, the documents identifier (DocKey) is used as the secondary sort field by default. This is applicable regardless of the number of sort fields. As long as the DocKey is not already included in sort fields, it acts as the final tie-breaking secondary field. + +If the DocKey is included in the sort fields, any field included afterwards will never be evaluated. This is because all DocKeys are unique. If the sort fields are `published_at`, `id`, and `birthday`, the `birthday` sort field will never be evaluated and should be removed from the list. + +> Sorting on sub-objects from the root object is only allowed if the sub-object is not an array. If it is an array, the sort must be applied to the object field directly instead of through the root object. + +*So, instead of:* +```graphql +{ + Authors(order: { name: DESC, Books: { title: ASC }}) { + name + Books { + title + } + } +} +``` +*We need:* +```graphql +{ + Authors(order: { name: DESC }) { + name + Books(order: { title: ASC }) { + title + } + } +} +``` + +>Root level filters and order only apply to root object. If you allow the initial version of the query, it would be confusing if the ordering applied to the order of the root object compared to its sibling objects or if the ordering applied solely to the sub-object. + +>If you allow it, it enforces the semantics of root level sorting on array sub-objects to act as a sorting mechanism for the root object. As a result, there is no obvious way to determine which value in the array is used for the root order. + +If you have the following objects in the database: +```json + [ + "Author" { + "name": "John Grisham", + "books": [ + { "title": "A Painted House" }, + { "title": "The Guardians" } + ] + }, + "Author" { + "name": "John Grisham", + "books": [ + { "title": "Camino Winds" }, + ] + }, + "Author" { + "name": "John LeCare", + "books": [ + { "title": "Tinker, Tailor, Soldier, Spy"} + ] + } + ] +``` +> and the following query +```graphql +{ + Authors(order: { name: DESC, books: { title: ASC }}) { + name + books { + title + } + } +} +``` + +```graphql +Books(filter: {_id: [1]}) { + title + genre + description +} +``` + +> Given there are two authors with the same name (John Grisham), the sort object `(sort: { name: "desc", Books: { title: "asc" }}` would suggest we sort duplicate authors using `Books: { title: "asc" }` as the secondary sort field. However, because the books field is an array of objects, there is no single value for the title to compare easily. +> +> Therefore, sorting on array sub objects from the root field is ***strictly not allowed***. diff --git a/docs/website/release notes/_category_.json b/docs/website/release notes/_category_.json new file mode 100644 index 0000000000..40addbd58f --- /dev/null +++ b/docs/website/release notes/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Release Notes", + "position": 4 + } + \ No newline at end of file diff --git a/docs/website/release notes/v0.10.0.md b/docs/website/release notes/v0.10.0.md new file mode 100644 index 0000000000..c5471bcf9c --- /dev/null +++ b/docs/website/release notes/v0.10.0.md @@ -0,0 +1,45 @@ +--- +sidebar_position: 100 +--- +# v0.10.0 + +> 2024-03-08 + +## Changelog +DefraDB v0.10 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + +To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.9.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.source.network/. + +### Features +* feat: Add JSON scalar ([#2254](https://github.com/sourcenetwork/defradb/issues/2254)) +* feat: Add case insensitive `like` operator ([#2368](https://github.com/sourcenetwork/defradb/issues/2368)) +* feat: Add composite indexes ([#2226](https://github.com/sourcenetwork/defradb/issues/2226)) +* feat: Add support for views with Lens transforms ([#2311](https://github.com/sourcenetwork/defradb/issues/2311)) +* feat: Allow setting null values on doc fields ([#2273](https://github.com/sourcenetwork/defradb/issues/2273)) +* feat: Generate OpenAPI command ([#2235](https://github.com/sourcenetwork/defradb/issues/2235)) +* feat: Model Col. SchemaVersions and migrations on Cols ([#2286](https://github.com/sourcenetwork/defradb/issues/2286)) +* feat: Multiple docs with nil value on unique-indexed field ([#2276](https://github.com/sourcenetwork/defradb/issues/2276)) +* feat: Replace FieldDescription.RelationType with IsPrimary ([#2288](https://github.com/sourcenetwork/defradb/issues/2288)) +* feat: Reverted order for indexed fields ([#2335](https://github.com/sourcenetwork/defradb/issues/2335)) +* feat: Rework GetCollection/SchemaByFoo funcs into single ([#2319](https://github.com/sourcenetwork/defradb/issues/2319)) +### Fix +* fix: Add `latest` image tag for ghcr ([#2340](https://github.com/sourcenetwork/defradb/issues/2340)) +* fix: Add missing delta payload ([#2306](https://github.com/sourcenetwork/defradb/issues/2306)) +* fix: Add missing directive definitions ([#2369](https://github.com/sourcenetwork/defradb/issues/2369)) +* fix: Add validation to JSON fields ([#2375](https://github.com/sourcenetwork/defradb/issues/2375)) +* fix: Fix compound relational filters in aggregates ([#2297](https://github.com/sourcenetwork/defradb/issues/2297)) +* fix: Load root dir before loading config ([#2266](https://github.com/sourcenetwork/defradb/issues/2266)) +* fix: Make peers sync secondary index ([#2390](https://github.com/sourcenetwork/defradb/issues/2390)) +* fix: Make returned collections respect explicit transactions ([#2385](https://github.com/sourcenetwork/defradb/issues/2385)) +* fix: Mark docs as deleted when querying in delete mut ([#2298](https://github.com/sourcenetwork/defradb/issues/2298)) +* fix: Move field id off of schema ([#2336](https://github.com/sourcenetwork/defradb/issues/2336)) +* fix: Update GetCollections behaviour ([#2378](https://github.com/sourcenetwork/defradb/issues/2378)) +### Refactoring +* refactor: Decouple net config ([#2258](https://github.com/sourcenetwork/defradb/issues/2258)) +* refactor: Generate field ids using a sequence ([#2339](https://github.com/sourcenetwork/defradb/issues/2339)) +* refactor: HTTP config ([#2278](https://github.com/sourcenetwork/defradb/issues/2278)) +* refactor: Make CollectionDescription.Name Option ([#2223](https://github.com/sourcenetwork/defradb/issues/2223)) +* refactor: Make config internal to CLI ([#2310](https://github.com/sourcenetwork/defradb/issues/2310)) +* refactor: Node config ([#2296](https://github.com/sourcenetwork/defradb/issues/2296) +* refactor: Remove unused Delete field from client.Document ([#2275](https://github.com/sourcenetwork/defradb/issues/2275)) + diff --git a/docs/website/release notes/v0.11.0.md b/docs/website/release notes/v0.11.0.md new file mode 100644 index 0000000000..816e487065 --- /dev/null +++ b/docs/website/release notes/v0.11.0.md @@ -0,0 +1,41 @@ +--- +sidebar_position: 110 +--- +# v0.11.0 + +> 2024-05-06 + +## Changelog +DefraDB v0.11 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + +To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.10.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.source.network/. + +### Features +* feat: Add Access Control Policy ([#2338](https://github.com/sourcenetwork/defradb/issues/2338)) +* feat: Add Defra-Lens support for branching schema ([#2421](https://github.com/sourcenetwork/defradb/issues/2421)) +* feat: Add P Counter CRDT ([#2482](https://github.com/sourcenetwork/defradb/issues/2482)) +* feat: Add PatchCollection ([#2402](https://github.com/sourcenetwork/defradb/issues/2402)) +* feat: Allow mutation of col sources via PatchCollection ([#2424](https://github.com/sourcenetwork/defradb/issues/2424)) +* feat: Force explicit primary decl. in SDL for one-ones ([#2462](https://github.com/sourcenetwork/defradb/issues/2462)) +* feat: Lens runtime config ([#2497](https://github.com/sourcenetwork/defradb/issues/2497)) +* feat: Move relation field properties onto collection ([#2529](https://github.com/sourcenetwork/defradb/issues/2529)) +* feat: Update corelog to 0.0.7 ([#2547](https://github.com/sourcenetwork/defradb/issues/2547)) +### Fix +* fix: Add check to filter result for logical ops ([#2573](https://github.com/sourcenetwork/defradb/issues/2573)) +* fix: Allow update when updating non-indexed field ([#2511](https://github.com/sourcenetwork/defradb/issues/2511)) +* fix: Handle compound filters on related indexed fields ([#2575](https://github.com/sourcenetwork/defradb/issues/2575)) +* fix: Make all array kinds nillable ([#2534](https://github.com/sourcenetwork/defradb/issues/2534)) +* fix: Return correct results from one-many indexed filter ([#2579](https://github.com/sourcenetwork/defradb/issues/2579)) +### Documentation +* docs: Add data definition document ([#2544](https://github.com/sourcenetwork/defradb/issues/2544)) +### Refactoring +* refactor: Add NormalValue ([#2404](https://github.com/sourcenetwork/defradb/issues/2404)) +* refactor: Clean up client/request package ([#2443](https://github.com/sourcenetwork/defradb/issues/2443)) +* refactor: DB transactions context ([#2513](https://github.com/sourcenetwork/defradb/issues/2513)) +* refactor: Merge collection UpdateWith and DeleteWith ([#2531](https://github.com/sourcenetwork/defradb/issues/2531)) +* refactor: Replace logging package with corelog ([#2406](https://github.com/sourcenetwork/defradb/issues/2406)) +* refactor: Rewrite convertImmutable ([#2445](https://github.com/sourcenetwork/defradb/issues/2445)) +* refactor: Unify Field Kind and Schema properties ([#2414](https://github.com/sourcenetwork/defradb/issues/2414)) +### Testing +* test: Add flag to skip network tests ([#2495](https://github.com/sourcenetwork/defradb/issues/2495)) + diff --git a/docs/website/release notes/v0.2.0.md b/docs/website/release notes/v0.2.0.md new file mode 100644 index 0000000000..ae66bf9c07 --- /dev/null +++ b/docs/website/release notes/v0.2.0.md @@ -0,0 +1,86 @@ +--- +sidebar_position: 20 +--- + +# v0.2.0 + +> 2022-02-07 + +DefraDB v0.2 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + +This release is jam-packed with new features and a small number of breaking changes. Read the full changelog for a detailed description. Most notable features include a new Peer-to-Peer (P2P) data synchronization system, an expanded query system to support GroupBy & Aggregate operations, and lastly TimeTraveling queries allowing to query previous states of a document. + +Much more than just that has been added to ensure we're building reliable software expected of any database, such as expanded test & benchmark suites, automated bug detection, performance gains, and more. + +This release does include a Breaking Change to existing v0.1 databases regarding the internal data model, which affects the "Content Identifiers" we use to generate DocKeys and VersionIDs. If you need help migrating an existing deployment, reach out at hello@source.network or join our Discord at https://discord.source.network. + +### Features + +* Added Peer-to-Peer networking data synchronization ([#177](https://github.com/sourcenetwork/defradb/issues/177)) +* TimeTraveling (History Traversing) query engine and doc fetcher ([#59](https://github.com/sourcenetwork/defradb/issues/59)) +* Add Document Deletion with a Key ([#150](https://github.com/sourcenetwork/defradb/issues/150)) +* Add support for sum aggregate ([#121](https://github.com/sourcenetwork/defradb/issues/121)) +* Add support for lwwr scalar arrays (full replace on update) ([#115](https://github.com/sourcenetwork/defradb/issues/115)) +* Add count aggregate support ([#102](https://github.com/sourcenetwork/defradb/issues/102)) +* Add support for named relationships ([#108](https://github.com/sourcenetwork/defradb/issues/108)) +* Add multi doc key lookup support ([#76](https://github.com/sourcenetwork/defradb/issues/76)) +* Add basic group by functionality ([#43](https://github.com/sourcenetwork/defradb/issues/43)) +* Update datastore packages to allow use of context ([#48](https://github.com/sourcenetwork/defradb/issues/48)) + +### Bug fixes + +* Only add join if aggregating child object collection ([#188](https://github.com/sourcenetwork/defradb/issues/188)) +* Handle errors generated during input object thunks ([#123](https://github.com/sourcenetwork/defradb/issues/123)) +* Remove new types from in-memory cache on generate error ([#122](https://github.com/sourcenetwork/defradb/issues/122)) +* Support relationships where both fields have the same name ([#109](https://github.com/sourcenetwork/defradb/issues/109)) +* Handle errors generated in fields thunk ([#66](https://github.com/sourcenetwork/defradb/issues/66)) +* Ensure OperationDefinition case has at least one selection([#24](https://github.com/sourcenetwork/defradb/pull/24)) +* Close datastore iterator on scan close ([#56](https://github.com/sourcenetwork/defradb/pull/56)) (resulted in a panic when using limit) +* Close superseded iterators before orphaning ([#56](https://github.com/sourcenetwork/defradb/pull/56)) (fixes a panic in the join code) +* Move discard to after error check ([#88](https://github.com/sourcenetwork/defradb/pull/88)) (did result in panic if transaction creation fails) +* Check for nil iterator before closing document fetcher ([#108](https://github.com/sourcenetwork/defradb/pull/108)) + +### Tooling +* Added benchmark suite ([#160](https://github.com/sourcenetwork/defradb/issues/160)) + +### Documentation + +* Correcting comment typos ([#142](https://github.com/sourcenetwork/defradb/issues/142)) +* Correcting README typos ([#140](https://github.com/sourcenetwork/defradb/issues/140)) + +### Testing + +* Add transaction integration tests ([#175](https://github.com/sourcenetwork/defradb/issues/175)) +* Allow running of tests using badger-file as well as IM options ([#128](https://github.com/sourcenetwork/defradb/issues/128)) +* Add test datastore selection support ([#88](https://github.com/sourcenetwork/defradb/issues/88)) + +### Refactoring + +* Datatype modification protection ([#138](https://github.com/sourcenetwork/defradb/issues/138)) +* Cleanup Linter Complaints and Setup Makefile ([#63](https://github.com/sourcenetwork/defradb/issues/63)) +* Rework document rendering to avoid data duplication and mutation ([#68](https://github.com/sourcenetwork/defradb/issues/68)) +* Remove dependency on concrete datastore implementations from db package ([#51](https://github.com/sourcenetwork/defradb/issues/51)) +* Remove all `errors.Wrap` and update them with `fmt.Errorf`. ([#41](https://github.com/sourcenetwork/defradb/issues/41)) +* Restructure integration tests to provide better visibility ([#15](https://github.com/sourcenetwork/defradb/pull/15)) +* Remove schemaless code branches ([#23](https://github.com/sourcenetwork/defradb/pull/23)) + +### Performance +* Add badger multi scan support ([#85](https://github.com/sourcenetwork/defradb/pull/85)) +* Add support for range spans ([#86](https://github.com/sourcenetwork/defradb/pull/86)) + +### Continous integration + +* Use more accurate test coverage. ([#134](https://github.com/sourcenetwork/defradb/issues/134)) +* Disable Codecov's Patch Check +* Make codcov less strict for now to unblock development ([#125](https://github.com/sourcenetwork/defradb/issues/125)) +* Add codecov config file. ([#118](https://github.com/sourcenetwork/defradb/issues/118)) +* Add workflow that runs a job on AWS EC2 instance. ([#110](https://github.com/sourcenetwork/defradb/issues/110)) +* Add Code Test Coverage with CodeCov ([#116](https://github.com/sourcenetwork/defradb/issues/116)) +* Integrate GitHub Action for golangci-lint Annotations ([#106](https://github.com/sourcenetwork/defradb/issues/106)) +* Add Linter Check to CircleCi ([#92](https://github.com/sourcenetwork/defradb/issues/92)) + +### Chore + +* Remove the S1038 rule of the gosimple linter. ([#129](https://github.com/sourcenetwork/defradb/issues/129)) +* Update to badger v3, and use badger as default in memory store ([#56](https://github.com/sourcenetwork/defradb/issues/56)) +* Make Cid versions consistent ([#57](https://github.com/sourcenetwork/defradb/issues/57)) \ No newline at end of file diff --git a/docs/website/release notes/v0.2.1.md b/docs/website/release notes/v0.2.1.md new file mode 100644 index 0000000000..e72ea06d8c --- /dev/null +++ b/docs/website/release notes/v0.2.1.md @@ -0,0 +1,57 @@ +--- +sidebar_position: 21 +--- + +# v0.2.1 + +> 2022-03-04 + +### Features + +* Add ability to delete multiple documents using filter ([#206](https://github.com/sourcenetwork/defradb/issues/206)) +* Add ability to delete multiple documents, using multiple ids ([#196](https://github.com/sourcenetwork/defradb/issues/196)) + +### Fixes + +* Concurrency control of Document using RWMutex ([#213](https://github.com/sourcenetwork/defradb/issues/213)) +* Only log errors and above when benchmarking ([#261](https://github.com/sourcenetwork/defradb/issues/261)) +* Handle proper type conversion on sort nodes ([#228](https://github.com/sourcenetwork/defradb/issues/228)) +* Return empty array if no values found ([#223](https://github.com/sourcenetwork/defradb/issues/223)) +* Close fetcher on error ([#210](https://github.com/sourcenetwork/defradb/issues/210)) +* Installing binary using defradb name ([#190](https://github.com/sourcenetwork/defradb/issues/190)) + +### Tooling + +* Add short benchmark runner option ([#263](https://github.com/sourcenetwork/defradb/issues/263)) + +### Documentation + +* Add data format changes documentation folder ([#89](https://github.com/sourcenetwork/defradb/issues/89)) +* Correcting typos ([#143](https://github.com/sourcenetwork/defradb/issues/143)) +* Update generated CLI docs ([#208](https://github.com/sourcenetwork/defradb/issues/208)) +* Updated readme with P2P section ([#220](https://github.com/sourcenetwork/defradb/issues/220)) +* Update old or missing license headers ([#205](https://github.com/sourcenetwork/defradb/issues/205)) +* Update git-chglog config and template ([#195](https://github.com/sourcenetwork/defradb/issues/195)) + +### Refactoring + +* Introduction of logging system ([#67](https://github.com/sourcenetwork/defradb/issues/67)) +* Restructure db/txn/multistore structures ([#199](https://github.com/sourcenetwork/defradb/issues/199)) +* Initialize database in constructor ([#211](https://github.com/sourcenetwork/defradb/issues/211)) +* Purge all println and ban it ([#253](https://github.com/sourcenetwork/defradb/issues/253)) + +### Testing + +* Detect and force breaking filesystem changes to be documented ([#89](https://github.com/sourcenetwork/defradb/issues/89)) +* Boost collection test coverage ([#183](https://github.com/sourcenetwork/defradb/issues/183)) + +### Continuous integration + +* Combine the Lint and Benchmark workflows so that the benchmark job depends on the lint job in one workflow ([#209](https://github.com/sourcenetwork/defradb/issues/209)) +* Add rule to only run benchmark if other check are successful ([#194](https://github.com/sourcenetwork/defradb/issues/194)) +* Increase linter timeout ([#230](https://github.com/sourcenetwork/defradb/issues/230)) + +### Chore + +* Remove commented out code ([#238](https://github.com/sourcenetwork/defradb/issues/238)) +* Remove dead code from multi node ([#186](https://github.com/sourcenetwork/defradb/issues/186)) \ No newline at end of file diff --git a/docs/website/release notes/v0.3.0.md b/docs/website/release notes/v0.3.0.md new file mode 100644 index 0000000000..01002c2183 --- /dev/null +++ b/docs/website/release notes/v0.3.0.md @@ -0,0 +1,178 @@ +--- +sidebar_position: 30 +--- + +# v0.3.0 + +> 2022-08-02 + +DefraDB v0.3 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + +There are *several* new features in this release, and we invite you to review the official changelog below. Some highlights are various new features for Grouping & Aggregation for the query system, like top-level aggregation and group filtering. Moreover, a brand new Query Explain system was added to introspect the execution plans created by DefraDB. Lastly we introduced a revamped CLI configuration system. + +This release does include a Breaking Change to existing v0.2.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.source.network/. + +### Features + +* Add named config overrides ([#659](https://github.com/sourcenetwork/defradb/issues/659)) +* Expose color and caller log options, add validation ([#652](https://github.com/sourcenetwork/defradb/issues/652)) +* Add ability to explain `groupNode` and it's attribute(s). ([#641](https://github.com/sourcenetwork/defradb/issues/641)) +* Add primary directive for schema definitions ([@primary](https://github.com/primary)) ([#650](https://github.com/sourcenetwork/defradb/issues/650)) +* Add support for aggregate filters on inline arrays ([#622](https://github.com/sourcenetwork/defradb/issues/622)) +* Add explainable renderLimitNode & hardLimitNode attributes. ([#614](https://github.com/sourcenetwork/defradb/issues/614)) +* Add support for top level aggregates ([#594](https://github.com/sourcenetwork/defradb/issues/594)) +* Update `countNode` explanation to be consistent. ([#600](https://github.com/sourcenetwork/defradb/issues/600)) +* Add support for stdin as input in CLI ([#608](https://github.com/sourcenetwork/defradb/issues/608)) +* Explain `cid` & `field` attributes for `dagScanNode` ([#598](https://github.com/sourcenetwork/defradb/issues/598)) +* Add ability to explain `dagScanNode` attribute(s). ([#560](https://github.com/sourcenetwork/defradb/issues/560)) +* Add the ability to send user feedback to the console even when logging to file. ([#568](https://github.com/sourcenetwork/defradb/issues/568)) +* Add ability to explain `sortNode` attribute(s). ([#558](https://github.com/sourcenetwork/defradb/issues/558)) +* Add ability to explain `sumNode` attribute(s). ([#559](https://github.com/sourcenetwork/defradb/issues/559)) +* Introduce top-level config package ([#389](https://github.com/sourcenetwork/defradb/issues/389)) +* Add ability to explain `updateNode` attributes. ([#514](https://github.com/sourcenetwork/defradb/issues/514)) +* Add `typeIndexJoin` explainable attributes. ([#499](https://github.com/sourcenetwork/defradb/issues/499)) +* Add support to explain `countNode` attributes. ([#504](https://github.com/sourcenetwork/defradb/issues/504)) +* Add CORS capability to HTTP API ([#467](https://github.com/sourcenetwork/defradb/issues/467)) +* Add explaination of spans for `scanNode`. ([#492](https://github.com/sourcenetwork/defradb/issues/492)) +* Add ability to Explain the response plan. ([#385](https://github.com/sourcenetwork/defradb/issues/385)) +* Add aggregate filter support for groups only ([#426](https://github.com/sourcenetwork/defradb/issues/426)) +* Configurable caller option in logger ([#416](https://github.com/sourcenetwork/defradb/issues/416)) +* Add Average aggregate support ([#383](https://github.com/sourcenetwork/defradb/issues/383)) +* Allow summation of aggregates ([#341](https://github.com/sourcenetwork/defradb/issues/341)) +* Add ability to check DefraDB CLI version. ([#339](https://github.com/sourcenetwork/defradb/issues/339)) + +### Fixes + +* Add a check to ensure limit is not 0 when evaluating query limit and offset ([#706](https://github.com/sourcenetwork/defradb/issues/706)) +* Support multiple `--logger` flags ([#704](https://github.com/sourcenetwork/defradb/issues/704)) +* Return without an error if relation is finalized ([#698](https://github.com/sourcenetwork/defradb/issues/698)) +* Logger not correctly applying named config ([#696](https://github.com/sourcenetwork/defradb/issues/696)) +* Add content-type media type parsing ([#678](https://github.com/sourcenetwork/defradb/issues/678)) +* Remove portSyncLock deadlock condition ([#671](https://github.com/sourcenetwork/defradb/issues/671)) +* Silence cobra default errors and usage printing ([#668](https://github.com/sourcenetwork/defradb/issues/668)) +* Add stdout validation when setting logging output path ([#666](https://github.com/sourcenetwork/defradb/issues/666)) +* Consider `--logoutput` CLI flag properly ([#645](https://github.com/sourcenetwork/defradb/issues/645)) +* Handle errors and responses in CLI `client` commands ([#579](https://github.com/sourcenetwork/defradb/issues/579)) +* Rename aggregate gql types ([#638](https://github.com/sourcenetwork/defradb/issues/638)) +* Error when attempting to insert value into relationship field ([#632](https://github.com/sourcenetwork/defradb/issues/632)) +* Allow adding of new schema to database ([#635](https://github.com/sourcenetwork/defradb/issues/635)) +* Correctly parse dockey in broadcast log event. ([#631](https://github.com/sourcenetwork/defradb/issues/631)) +* Increase system's open files limit in integration tests ([#627](https://github.com/sourcenetwork/defradb/issues/627)) +* Avoid populating `order.ordering` with empties. ([#618](https://github.com/sourcenetwork/defradb/issues/618)) +* Change to supporting of non-null inline arrays ([#609](https://github.com/sourcenetwork/defradb/issues/609)) +* Assert fields exist in collection before saving to them ([#604](https://github.com/sourcenetwork/defradb/issues/604)) +* CLI `init` command to reinitialize only config file ([#603](https://github.com/sourcenetwork/defradb/issues/603)) +* Add config and registry clearing to TestLogWritesMessagesToFeedbackLog ([#596](https://github.com/sourcenetwork/defradb/issues/596)) +* Change `$eq` to `_eq` in the failing test. ([#576](https://github.com/sourcenetwork/defradb/issues/576)) +* Resolve failing HTTP API tests via cleanup ([#557](https://github.com/sourcenetwork/defradb/issues/557)) +* Ensure Makefile compatibility with macOS ([#527](https://github.com/sourcenetwork/defradb/issues/527)) +* Separate out iotas in their own blocks. ([#464](https://github.com/sourcenetwork/defradb/issues/464)) +* Use x/cases for titling instead of strings to handle deprecation ([#457](https://github.com/sourcenetwork/defradb/issues/457)) +* Handle limit and offset in sub groups ([#440](https://github.com/sourcenetwork/defradb/issues/440)) +* Issue preventing DB from restarting with no records ([#437](https://github.com/sourcenetwork/defradb/issues/437)) +* log serving HTTP API before goroutine blocks ([#358](https://github.com/sourcenetwork/defradb/issues/358)) + +### Testing + +* Add integration testing for P2P. ([#655](https://github.com/sourcenetwork/defradb/issues/655)) +* Fix formatting of tests with no extra brackets ([#643](https://github.com/sourcenetwork/defradb/issues/643)) +* Add tests for `averageNode` explain. ([#639](https://github.com/sourcenetwork/defradb/issues/639)) +* Add schema integration tests ([#628](https://github.com/sourcenetwork/defradb/issues/628)) +* Add tests for default properties ([#611](https://github.com/sourcenetwork/defradb/issues/611)) +* Specify which collection to update in test framework ([#601](https://github.com/sourcenetwork/defradb/issues/601)) +* Add tests for grouping by undefined value ([#543](https://github.com/sourcenetwork/defradb/issues/543)) +* Add test for querying undefined field ([#544](https://github.com/sourcenetwork/defradb/issues/544)) +* Expand commit query tests ([#541](https://github.com/sourcenetwork/defradb/issues/541)) +* Add cid (time-travel) query tests ([#539](https://github.com/sourcenetwork/defradb/issues/539)) +* Restructure and expand filter tests ([#512](https://github.com/sourcenetwork/defradb/issues/512)) +* Basic unit testing of `node` package ([#503](https://github.com/sourcenetwork/defradb/issues/503)) +* Test filter in filter tests ([#473](https://github.com/sourcenetwork/defradb/issues/473)) +* Add test for deletion of records in a relationship ([#329](https://github.com/sourcenetwork/defradb/issues/329)) +* Benchmark transaction iteration ([#289](https://github.com/sourcenetwork/defradb/issues/289)) + +### Refactoring + +* Improve CLI error handling and fix small issues ([#649](https://github.com/sourcenetwork/defradb/issues/649)) +* Add top-level `version` package ([#583](https://github.com/sourcenetwork/defradb/issues/583)) +* Remove extra log levels ([#634](https://github.com/sourcenetwork/defradb/issues/634)) +* Change `sortNode` to `orderNode`. ([#591](https://github.com/sourcenetwork/defradb/issues/591)) +* Rework update and delete node to remove secondary planner ([#571](https://github.com/sourcenetwork/defradb/issues/571)) +* Trim imported connor package ([#530](https://github.com/sourcenetwork/defradb/issues/530)) +* Internal doc restructure ([#471](https://github.com/sourcenetwork/defradb/issues/471)) +* Copy-paste connor fork into repo ([#567](https://github.com/sourcenetwork/defradb/issues/567)) +* Add safety to the tests, add ability to catch stderr logs and add output path validation ([#552](https://github.com/sourcenetwork/defradb/issues/552)) +* Change handler functions implementation and response formatting ([#498](https://github.com/sourcenetwork/defradb/issues/498)) +* Improve the HTTP API implementation ([#382](https://github.com/sourcenetwork/defradb/issues/382)) +* Use new logger in net/api ([#420](https://github.com/sourcenetwork/defradb/issues/420)) +* Rename NewCidV1_SHA2_256 to mixedCaps ([#415](https://github.com/sourcenetwork/defradb/issues/415)) +* Remove utils package ([#397](https://github.com/sourcenetwork/defradb/issues/397)) +* Rework planNode Next and Value(s) function ([#374](https://github.com/sourcenetwork/defradb/issues/374)) +* Restructure aggregate query syntax ([#373](https://github.com/sourcenetwork/defradb/issues/373)) +* Remove dead code from client package and document remaining ([#356](https://github.com/sourcenetwork/defradb/issues/356)) +* Restructure datastore keys ([#316](https://github.com/sourcenetwork/defradb/issues/316)) +* Add commits lost during github outage ([#303](https://github.com/sourcenetwork/defradb/issues/303)) +* Move public members out of core and base packages ([#295](https://github.com/sourcenetwork/defradb/issues/295)) +* Make db stuff internal/private ([#291](https://github.com/sourcenetwork/defradb/issues/291)) +* Rework client.DB to ensure interface contains only public types ([#277](https://github.com/sourcenetwork/defradb/issues/277)) +* Remove GetPrimaryIndexDocKey from collection interface ([#279](https://github.com/sourcenetwork/defradb/issues/279)) +* Remove DataStoreKey from (public) dockey struct ([#278](https://github.com/sourcenetwork/defradb/issues/278)) +* Renormalize to ensure consistent file line termination. ([#226](https://github.com/sourcenetwork/defradb/issues/226)) +* Strongly typed key refactor ([#17](https://github.com/sourcenetwork/defradb/issues/17)) + +### Documentation + +* Use permanent link to BSL license document ([#692](https://github.com/sourcenetwork/defradb/issues/692)) +* README update v0.3.0 ([#646](https://github.com/sourcenetwork/defradb/issues/646)) +* Improve code documentation ([#533](https://github.com/sourcenetwork/defradb/issues/533)) +* Add CONTRIBUTING.md ([#531](https://github.com/sourcenetwork/defradb/issues/531)) +* Add package level docs for logging lib ([#338](https://github.com/sourcenetwork/defradb/issues/338)) + +### Tooling + +* Include all touched packages in code coverage ([#673](https://github.com/sourcenetwork/defradb/issues/673)) +* Use `gotestsum` over `go test` ([#619](https://github.com/sourcenetwork/defradb/issues/619)) +* Update Github pull request template ([#524](https://github.com/sourcenetwork/defradb/issues/524)) +* Fix the cross-build script ([#460](https://github.com/sourcenetwork/defradb/issues/460)) +* Add test coverage html output ([#466](https://github.com/sourcenetwork/defradb/issues/466)) +* Add linter rule for `goconst`. ([#398](https://github.com/sourcenetwork/defradb/issues/398)) +* Add github PR template. ([#394](https://github.com/sourcenetwork/defradb/issues/394)) +* Disable auto-fixing linter issues by default ([#429](https://github.com/sourcenetwork/defradb/issues/429)) +* Fix linting of empty `else` code blocks ([#402](https://github.com/sourcenetwork/defradb/issues/402)) +* Add the `gofmt` linter rule. ([#405](https://github.com/sourcenetwork/defradb/issues/405)) +* Cleanup linter config file ([#400](https://github.com/sourcenetwork/defradb/issues/400)) +* Add linter rule for copyright headers ([#360](https://github.com/sourcenetwork/defradb/issues/360)) +* Organize our config files and tooling. ([#336](https://github.com/sourcenetwork/defradb/issues/336)) +* Limit line length to 100 characters (linter check) ([#224](https://github.com/sourcenetwork/defradb/issues/224)) +* Ignore db/tests folder and the bench marks. ([#280](https://github.com/sourcenetwork/defradb/issues/280)) + +### Continuous Integration + +* Fix circleci cache permission errors. ([#371](https://github.com/sourcenetwork/defradb/issues/371)) +* Ban extra elses ([#366](https://github.com/sourcenetwork/defradb/issues/366)) +* Fix change-detection to not fail when new tests are added. ([#333](https://github.com/sourcenetwork/defradb/issues/333)) +* Update golang-ci linter and explicit go-setup to use v1.17 ([#331](https://github.com/sourcenetwork/defradb/issues/331)) +* Comment the benchmarking result comparison to the PR ([#305](https://github.com/sourcenetwork/defradb/issues/305)) +* Add benchmark performance comparisons ([#232](https://github.com/sourcenetwork/defradb/issues/232)) +* Add caching / storing of bench report on default branch ([#290](https://github.com/sourcenetwork/defradb/issues/290)) +* Ensure full-benchmarks are ran on a PR-merge. ([#282](https://github.com/sourcenetwork/defradb/issues/282)) +* Add ability to control benchmarks by PR labels. ([#267](https://github.com/sourcenetwork/defradb/issues/267)) + +### Chore + +* Update APL to refer to D2 Foundation ([#711](https://github.com/sourcenetwork/defradb/issues/711)) +* Update gitignore to include `cmd` folders ([#617](https://github.com/sourcenetwork/defradb/issues/617)) +* Enable random execution order of tests ([#554](https://github.com/sourcenetwork/defradb/issues/554)) +* Enable linters exportloopref, nolintlint, whitespace ([#535](https://github.com/sourcenetwork/defradb/issues/535)) +* Add utility for generation of man pages ([#493](https://github.com/sourcenetwork/defradb/issues/493)) +* Add Dockerfile ([#517](https://github.com/sourcenetwork/defradb/issues/517)) +* Enable errorlint linter ([#520](https://github.com/sourcenetwork/defradb/issues/520)) +* Binaries in`cmd` folder, examples in `examples` folder ([#501](https://github.com/sourcenetwork/defradb/issues/501)) +* Improve log outputs ([#506](https://github.com/sourcenetwork/defradb/issues/506)) +* Move testing to top-level `tests` folder ([#446](https://github.com/sourcenetwork/defradb/issues/446)) +* Update dependencies ([#450](https://github.com/sourcenetwork/defradb/issues/450)) +* Update go-ipfs-blockstore and ipfs-lite ([#436](https://github.com/sourcenetwork/defradb/issues/436)) +* Update libp2p dependency to v0.19 ([#424](https://github.com/sourcenetwork/defradb/issues/424)) +* Update ioutil package to io / os packages. ([#376](https://github.com/sourcenetwork/defradb/issues/376)) +* git ignore vscode ([#343](https://github.com/sourcenetwork/defradb/issues/343)) +* Updated README.md contributors section ([#292](https://github.com/sourcenetwork/defradb/issues/292)) +* Update changelog v0.2.1 ([#252](https://github.com/sourcenetwork/defradb/issues/252)) \ No newline at end of file diff --git a/docs/website/release notes/v0.3.1.md b/docs/website/release notes/v0.3.1.md new file mode 100644 index 0000000000..1d54adaa74 --- /dev/null +++ b/docs/website/release notes/v0.3.1.md @@ -0,0 +1,94 @@ +--- +sidebar_position: 31 +--- +# v0.3.1 + +> 2022-09-23 + +DefraDB v0.3.1 is a minor release, primarily focusing on additional/extended features and fixes of items added in the `v0.3.0` release. + +### Features + +* Add cid support for allCommits ([#857](https://github.com/sourcenetwork/defradb/issues/857)) +* Add offset support to allCommits ([#859](https://github.com/sourcenetwork/defradb/issues/859)) +* Add limit support to allCommits query ([#856](https://github.com/sourcenetwork/defradb/issues/856)) +* Add order support to allCommits ([#845](https://github.com/sourcenetwork/defradb/issues/845)) +* Display CLI usage on user error ([#819](https://github.com/sourcenetwork/defradb/issues/819)) +* Add support for dockey filters in child joins ([#806](https://github.com/sourcenetwork/defradb/issues/806)) +* Add sort support for numeric aggregates ([#786](https://github.com/sourcenetwork/defradb/issues/786)) +* Allow filtering by nil ([#789](https://github.com/sourcenetwork/defradb/issues/789)) +* Add aggregate offset support ([#778](https://github.com/sourcenetwork/defradb/issues/778)) +* Remove filter depth limit ([#777](https://github.com/sourcenetwork/defradb/issues/777)) +* Add support for and-or inline array aggregate filters ([#779](https://github.com/sourcenetwork/defradb/issues/779)) +* Add limit support for aggregates ([#771](https://github.com/sourcenetwork/defradb/issues/771)) +* Add support for inline arrays of nillable types ([#759](https://github.com/sourcenetwork/defradb/issues/759)) +* Create errors package ([#548](https://github.com/sourcenetwork/defradb/issues/548)) +* Add ability to display peer id ([#719](https://github.com/sourcenetwork/defradb/issues/719)) +* Add a config option to set the vlog max file size ([#743](https://github.com/sourcenetwork/defradb/issues/743)) +* Explain `topLevelNode` like a `MultiNode` plan ([#749](https://github.com/sourcenetwork/defradb/issues/749)) +* Make `topLevelNode` explainable ([#737](https://github.com/sourcenetwork/defradb/issues/737)) + +### Fixes + +* Order subtype without selecting the join child ([#810](https://github.com/sourcenetwork/defradb/issues/810)) +* Correctly handles nil one-one joins ([#837](https://github.com/sourcenetwork/defradb/issues/837)) +* Reset scan node for each join ([#828](https://github.com/sourcenetwork/defradb/issues/828)) +* Handle filter input field argument being nil ([#787](https://github.com/sourcenetwork/defradb/issues/787)) +* Ensure CLI outputs JSON to stdout when directed to pipe ([#804](https://github.com/sourcenetwork/defradb/issues/804)) +* Error if given the wrong side of a one-one relationship ([#795](https://github.com/sourcenetwork/defradb/issues/795)) +* Add object marker to enable return of empty docs ([#800](https://github.com/sourcenetwork/defradb/issues/800)) +* Resolve the extra `typeIndexJoin`s for `_avg` aggregate ([#774](https://github.com/sourcenetwork/defradb/issues/774)) +* Remove _like filter operator ([#797](https://github.com/sourcenetwork/defradb/issues/797)) +* Remove having gql types ([#785](https://github.com/sourcenetwork/defradb/issues/785)) +* Error if child _group selected without parent groupBy ([#781](https://github.com/sourcenetwork/defradb/issues/781)) +* Error nicely on missing field specifier ([#782](https://github.com/sourcenetwork/defradb/issues/782)) +* Handle order input field argument being nil ([#701](https://github.com/sourcenetwork/defradb/issues/701)) +* Change output to outputpath in config file template for logger ([#716](https://github.com/sourcenetwork/defradb/issues/716)) +* Delete mutations not correct persisting all keys ([#731](https://github.com/sourcenetwork/defradb/issues/731)) + +### Tooling + +* Ban the usage of `ioutil` package ([#747](https://github.com/sourcenetwork/defradb/issues/747)) +* Migrate from CircleCi to GitHub Actions ([#679](https://github.com/sourcenetwork/defradb/issues/679)) + +### Documentation + +* Clarify meaning of url param, update in-repo CLI docs ([#814](https://github.com/sourcenetwork/defradb/issues/814)) +* Disclaimer of exposed to network and not encrypted ([#793](https://github.com/sourcenetwork/defradb/issues/793)) +* Update logo to respect theme ([#728](https://github.com/sourcenetwork/defradb/issues/728)) + +### Refactoring + +* Replace all `interface{}` with `any` alias ([#805](https://github.com/sourcenetwork/defradb/issues/805)) +* Use fastjson to parse mutation data string ([#772](https://github.com/sourcenetwork/defradb/issues/772)) +* Rework limit node flow ([#767](https://github.com/sourcenetwork/defradb/issues/767)) +* Make Option immutable ([#769](https://github.com/sourcenetwork/defradb/issues/769)) +* Rework sum and count nodes to make use of generics ([#757](https://github.com/sourcenetwork/defradb/issues/757)) +* Remove some possible panics from codebase ([#732](https://github.com/sourcenetwork/defradb/issues/732)) +* Change logging calls to use feedback in CLI package ([#714](https://github.com/sourcenetwork/defradb/issues/714)) + +### Testing + +* Add tests for aggs with nil filters ([#813](https://github.com/sourcenetwork/defradb/issues/813)) +* Add not equals filter tests ([#798](https://github.com/sourcenetwork/defradb/issues/798)) +* Fix `cli/peerid_test` to not clash addresses ([#766](https://github.com/sourcenetwork/defradb/issues/766)) +* Add change detector summary to test readme ([#754](https://github.com/sourcenetwork/defradb/issues/754)) +* Add tests for inline array grouping ([#752](https://github.com/sourcenetwork/defradb/issues/752)) + +### Continuous integration + +* Reduce test resource usage and test with file db ([#791](https://github.com/sourcenetwork/defradb/issues/791)) +* Add makefile target to verify the local module cache ([#775](https://github.com/sourcenetwork/defradb/issues/775)) +* Allow PR titles to end with a number ([#745](https://github.com/sourcenetwork/defradb/issues/745)) +* Add a workflow to validate pull request titles ([#734](https://github.com/sourcenetwork/defradb/issues/734)) +* Fix the linter version to `v1.47` ([#726](https://github.com/sourcenetwork/defradb/issues/726)) + +### Chore + +* Remove file system paths from resulting executable ([#831](https://github.com/sourcenetwork/defradb/issues/831)) +* Add goimports linter for consistent imports ordering ([#816](https://github.com/sourcenetwork/defradb/issues/816)) +* Improve UX by providing more information ([#802](https://github.com/sourcenetwork/defradb/issues/802)) +* Change to defra errors and handle errors stacktrace ([#794](https://github.com/sourcenetwork/defradb/issues/794)) +* Clean up `go.mod` with pruned module graphs ([#756](https://github.com/sourcenetwork/defradb/issues/756)) +* Update to v0.20.3 of libp2p ([#740](https://github.com/sourcenetwork/defradb/issues/740)) +* Bump to GoLang `v1.18` ([#721](https://github.com/sourcenetwork/defradb/issues/721)) \ No newline at end of file diff --git a/docs/website/release notes/v0.4.0.md b/docs/website/release notes/v0.4.0.md new file mode 100644 index 0000000000..24ec5329d8 --- /dev/null +++ b/docs/website/release notes/v0.4.0.md @@ -0,0 +1,80 @@ +--- +sidebar_position: 40 +--- + +# v0.4.0 + +> 2023-12-23 + +DefraDB v0.4 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + +There are various new features in this release - some of which are breaking - and we invite you to review the official changelog below. Some highlights are persistence of replicators, DateTime scalars, TLS support, and GQL subscriptions. + +This release does include a Breaking Change to existing v0.3.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.source.network/. + +### Features + +* Add basic metric functionality ([#971](https://github.com/sourcenetwork/defradb/issues/971)) +* Add thread safe transactional in-memory datastore ([#947](https://github.com/sourcenetwork/defradb/issues/947)) +* Persist p2p replicators ([#960](https://github.com/sourcenetwork/defradb/issues/960)) +* Add DateTime custom scalars ([#931](https://github.com/sourcenetwork/defradb/issues/931)) +* Add GraphQL subscriptions ([#934](https://github.com/sourcenetwork/defradb/issues/934)) +* Add support for tls ([#885](https://github.com/sourcenetwork/defradb/issues/885)) +* Add group by support for commits ([#887](https://github.com/sourcenetwork/defradb/issues/887)) +* Add depth support for commits ([#889](https://github.com/sourcenetwork/defradb/issues/889)) +* Make dockey optional for allCommits queries ([#847](https://github.com/sourcenetwork/defradb/issues/847)) +* Add WithStack to the errors package ([#870](https://github.com/sourcenetwork/defradb/issues/870)) +* Add event system ([#834](https://github.com/sourcenetwork/defradb/issues/834)) + +### Fixes + +* Correct errors.WithStack behaviour ([#984](https://github.com/sourcenetwork/defradb/issues/984)) +* Correctly handle nested one to one joins ([#964](https://github.com/sourcenetwork/defradb/issues/964)) +* Do not assume parent record exists when joining ([#963](https://github.com/sourcenetwork/defradb/issues/963)) +* Change time format for HTTP API log ([#910](https://github.com/sourcenetwork/defradb/issues/910)) +* Error if group select contains non-group-by fields ([#898](https://github.com/sourcenetwork/defradb/issues/898)) +* Add inspection of values for ENV flags ([#900](https://github.com/sourcenetwork/defradb/issues/900)) +* Remove panics from document ([#881](https://github.com/sourcenetwork/defradb/issues/881)) +* Add __typename support ([#871](https://github.com/sourcenetwork/defradb/issues/871)) +* Handle subscriber close ([#877](https://github.com/sourcenetwork/defradb/issues/877)) +* Publish update events post commit ([#866](https://github.com/sourcenetwork/defradb/issues/866)) + +### Refactoring + +* Make rootstore require Batching and TxnDatastore ([#940](https://github.com/sourcenetwork/defradb/issues/940)) +* Conceptually clarify schema vs query-language ([#924](https://github.com/sourcenetwork/defradb/issues/924)) +* Decouple db.db from gql ([#912](https://github.com/sourcenetwork/defradb/issues/912)) +* Merkle clock heads cleanup ([#918](https://github.com/sourcenetwork/defradb/issues/918)) +* Simplify dag fetcher ([#913](https://github.com/sourcenetwork/defradb/issues/913)) +* Cleanup parsing logic ([#909](https://github.com/sourcenetwork/defradb/issues/909)) +* Move planner outside the gql directory ([#907](https://github.com/sourcenetwork/defradb/issues/907)) +* Refactor commit nodes ([#892](https://github.com/sourcenetwork/defradb/issues/892)) +* Make latest commits syntax sugar ([#890](https://github.com/sourcenetwork/defradb/issues/890)) +* Remove commit query ([#841](https://github.com/sourcenetwork/defradb/issues/841)) + +### Testing + +* Add event tests ([#965](https://github.com/sourcenetwork/defradb/issues/965)) +* Add new setup for testing explain functionality ([#949](https://github.com/sourcenetwork/defradb/issues/949)) +* Add txn relation-type delete and create tests ([#875](https://github.com/sourcenetwork/defradb/issues/875)) +* Skip change detection for tests that assert panic ([#883](https://github.com/sourcenetwork/defradb/issues/883)) + +### Continuous integration + +* Bump all gh-action versions to support node16 ([#990](https://github.com/sourcenetwork/defradb/issues/990)) +* Bump ssh-agent action to v0.7.0 ([#978](https://github.com/sourcenetwork/defradb/issues/978)) +* Add error message format check ([#901](https://github.com/sourcenetwork/defradb/issues/901)) + +### Chore + +* Extract (events, merkle) errors to errors.go ([#973](https://github.com/sourcenetwork/defradb/issues/973)) +* Extract (datastore, db) errors to errors.go ([#969](https://github.com/sourcenetwork/defradb/issues/969)) +* Extract (connor, crdt, core) errors to errors.go ([#968](https://github.com/sourcenetwork/defradb/issues/968)) +* Extract inline (http and client) errors to errors.go ([#967](https://github.com/sourcenetwork/defradb/issues/967)) +* Update badger version ([#966](https://github.com/sourcenetwork/defradb/issues/966)) +* Move Option and Enumerable to immutables ([#939](https://github.com/sourcenetwork/defradb/issues/939)) +* Add configuration of external loggers ([#942](https://github.com/sourcenetwork/defradb/issues/942)) +* Strip DSKey prefixes and simplify NewDataStoreKey ([#944](https://github.com/sourcenetwork/defradb/issues/944)) +* Include version metadata in cross-building ([#930](https://github.com/sourcenetwork/defradb/issues/930)) +* Update to v0.23.2 the libP2P package ([#908](https://github.com/sourcenetwork/defradb/issues/908)) +* Remove `ipfslite` dependency ([#739](https://github.com/sourcenetwork/defradb/issues/739)) \ No newline at end of file diff --git a/docs/website/release notes/v0.5.0.md b/docs/website/release notes/v0.5.0.md new file mode 100644 index 0000000000..96e8499506 --- /dev/null +++ b/docs/website/release notes/v0.5.0.md @@ -0,0 +1,144 @@ +--- +sidebar_position: 50 +--- + +# v0.5.0 + +> 2023-04-12 + +DefraDB v0.5 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + +There many new features in this release, but most importantly, this is the first open source release for DefraDB. As such, this release focused on various quality of life changes and refactors, bug fixes, and overall cleanliness of the repo so it can effectively be used and tested in the public domain. + +To get a full outline of the changes, we invite you to review the official changelog below. Some highlights are the first iteration of our schema update system, allowing developers to add new fields to schemas using our JSON Patch based DDL, a new DAG based delete system which will persist "soft-delete" ops into the CRDT Merkle DAG, and a early prototype for our collection level peer-to-peer synchronization. + +This release does include a Breaking Change to existing v0.4.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.source.network/. + +### Features + +* Add document delete mechanics ([#1263](https://github.com/sourcenetwork/defradb/issues/1263)) +* Ability to explain an executed request ([#1188](https://github.com/sourcenetwork/defradb/issues/1188)) +* Add SchemaPatch CLI command ([#1250](https://github.com/sourcenetwork/defradb/issues/1250)) +* Add support for one-one mutation from sec. side ([#1247](https://github.com/sourcenetwork/defradb/issues/1247)) +* Store only key in DAG instead of dockey path ([#1245](https://github.com/sourcenetwork/defradb/issues/1245)) +* Add collectionId field to commit field ([#1235](https://github.com/sourcenetwork/defradb/issues/1235)) +* Add field kind substitution for PatchSchema ([#1223](https://github.com/sourcenetwork/defradb/issues/1223)) +* Add dockey field for commit field ([#1216](https://github.com/sourcenetwork/defradb/issues/1216)) +* Allow new fields to be added locally to schema ([#1139](https://github.com/sourcenetwork/defradb/issues/1139)) +* Add `like` sub-string filter ([#1091](https://github.com/sourcenetwork/defradb/issues/1091)) +* Add ability for P2P to wait for pushlog by peer ([#1098](https://github.com/sourcenetwork/defradb/issues/1098)) +* Add P2P collection topic subscription ([#1086](https://github.com/sourcenetwork/defradb/issues/1086)) +* Add support for schema version id in queries ([#1067](https://github.com/sourcenetwork/defradb/issues/1067)) +* Add schema version id to commit queries ([#1061](https://github.com/sourcenetwork/defradb/issues/1061)) +* Persist schema version at time of commit ([#1055](https://github.com/sourcenetwork/defradb/issues/1055)) +* Add ability to input simple explain type arg ([#1039](https://github.com/sourcenetwork/defradb/issues/1039)) + +### Fixes + +* API address parameter validation ([#1311](https://github.com/sourcenetwork/defradb/issues/1311)) +* Improve error message for NonNull GQL types ([#1333](https://github.com/sourcenetwork/defradb/issues/1333)) +* Handle panics in the rpc server ([#1330](https://github.com/sourcenetwork/defradb/issues/1330)) +* Handle returned error in select.go ([#1329](https://github.com/sourcenetwork/defradb/issues/1329)) +* Resolve handful of CLI issues ([#1318](https://github.com/sourcenetwork/defradb/issues/1318)) +* Only check for events queue on subscription request ([#1326](https://github.com/sourcenetwork/defradb/issues/1326)) +* Remove client Create/UpdateCollection ([#1309](https://github.com/sourcenetwork/defradb/issues/1309)) +* CLI to display specific command usage help ([#1314](https://github.com/sourcenetwork/defradb/issues/1314)) +* Fix P2P collection CLI commands ([#1295](https://github.com/sourcenetwork/defradb/issues/1295)) +* Dont double up badger file path ([#1299](https://github.com/sourcenetwork/defradb/issues/1299)) +* Update immutable package ([#1290](https://github.com/sourcenetwork/defradb/issues/1290)) +* Fix panic on success of Add/RemoveP2PCollections ([#1297](https://github.com/sourcenetwork/defradb/issues/1297)) +* Fix deadlock on memory-datastore Close ([#1273](https://github.com/sourcenetwork/defradb/issues/1273)) +* Determine if query is introspection query ([#1255](https://github.com/sourcenetwork/defradb/issues/1255)) +* Allow newly added fields to sync via p2p ([#1226](https://github.com/sourcenetwork/defradb/issues/1226)) +* Expose `ExplainEnum` in the GQL schema ([#1204](https://github.com/sourcenetwork/defradb/issues/1204)) +* Resolve aggregates' mapping with deep nested subtypes ([#1175](https://github.com/sourcenetwork/defradb/issues/1175)) +* Make sort stable and handle nil comparison ([#1094](https://github.com/sourcenetwork/defradb/issues/1094)) +* Change successful schema add status to 200 ([#1106](https://github.com/sourcenetwork/defradb/issues/1106)) +* Add delay in P2P test util execution ([#1093](https://github.com/sourcenetwork/defradb/issues/1093)) +* Ensure errors test don't hard expect folder name ([#1072](https://github.com/sourcenetwork/defradb/issues/1072)) +* Remove potential P2P deadlock ([#1056](https://github.com/sourcenetwork/defradb/issues/1056)) +* Rework the P2P integration tests ([#989](https://github.com/sourcenetwork/defradb/issues/989)) +* Improve DAG sync with highly concurrent updates ([#1031](https://github.com/sourcenetwork/defradb/issues/1031)) + +### Documentation + +* Update docs for the v0.5 release ([#1320](https://github.com/sourcenetwork/defradb/issues/1320)) +* Document client interfaces in client/db.go ([#1305](https://github.com/sourcenetwork/defradb/issues/1305)) +* Document client Description types ([#1307](https://github.com/sourcenetwork/defradb/issues/1307)) +* Improve security policy ([#1240](https://github.com/sourcenetwork/defradb/issues/1240)) +* Add security disclosure policy ([#1194](https://github.com/sourcenetwork/defradb/issues/1194)) +* Correct commits query example in readme ([#1172](https://github.com/sourcenetwork/defradb/issues/1172)) + +### Refactoring + +* Improve p2p collection operations on peer ([#1286](https://github.com/sourcenetwork/defradb/issues/1286)) +* Migrate gql introspection tests to new framework ([#1211](https://github.com/sourcenetwork/defradb/issues/1211)) +* Reorganise client transaction related interfaces ([#1180](https://github.com/sourcenetwork/defradb/issues/1180)) +* Config-local viper, rootdir, and logger parsing ([#1132](https://github.com/sourcenetwork/defradb/issues/1132)) +* Migrate mutation-relation tests to new framework ([#1109](https://github.com/sourcenetwork/defradb/issues/1109)) +* Rework integration test framework ([#1089](https://github.com/sourcenetwork/defradb/issues/1089)) +* Generate gql types using col. desc ([#1080](https://github.com/sourcenetwork/defradb/issues/1080)) +* Extract config errors to dedicated file ([#1107](https://github.com/sourcenetwork/defradb/issues/1107)) +* Change terminology from query to request ([#1054](https://github.com/sourcenetwork/defradb/issues/1054)) +* Allow db keys to handle multiple schema versions ([#1026](https://github.com/sourcenetwork/defradb/issues/1026)) +* Extract query schema errors to dedicated file ([#1037](https://github.com/sourcenetwork/defradb/issues/1037)) +* Extract planner errors to dedicated file ([#1034](https://github.com/sourcenetwork/defradb/issues/1034)) +* Extract query parser errors to dedicated file ([#1035](https://github.com/sourcenetwork/defradb/issues/1035)) + +### Testing + +* Remove test reference to DEFRA_ROOTDIR env var ([#1328](https://github.com/sourcenetwork/defradb/issues/1328)) +* Expand tests for Peer subscribe actions ([#1287](https://github.com/sourcenetwork/defradb/issues/1287)) +* Fix flaky TestCloseThroughContext test ([#1265](https://github.com/sourcenetwork/defradb/issues/1265)) +* Add gql introspection tests for patch schema ([#1219](https://github.com/sourcenetwork/defradb/issues/1219)) +* Explicitly state change detector split for test ([#1228](https://github.com/sourcenetwork/defradb/issues/1228)) +* Add test for successful one-one create mutation ([#1215](https://github.com/sourcenetwork/defradb/issues/1215)) +* Ensure that all databases are always closed on exit ([#1187](https://github.com/sourcenetwork/defradb/issues/1187)) +* Add P2P tests for Schema Update adding field ([#1182](https://github.com/sourcenetwork/defradb/issues/1182)) +* Migrate P2P/state tests to new framework ([#1160](https://github.com/sourcenetwork/defradb/issues/1160)) +* Remove sleep from subscription tests ([#1156](https://github.com/sourcenetwork/defradb/issues/1156)) +* Fetch documents on test execution start ([#1163](https://github.com/sourcenetwork/defradb/issues/1163)) +* Introduce basic testing for the `version` module ([#1111](https://github.com/sourcenetwork/defradb/issues/1111)) +* Boost test coverage for collection_update ([#1050](https://github.com/sourcenetwork/defradb/issues/1050)) +* Wait between P2P update retry attempts ([#1052](https://github.com/sourcenetwork/defradb/issues/1052)) +* Exclude auto-generated protobuf files from codecov ([#1048](https://github.com/sourcenetwork/defradb/issues/1048)) +* Add P2P tests for relational docs ([#1042](https://github.com/sourcenetwork/defradb/issues/1042)) + +### Continuous integration + +* Add workflow that builds DefraDB AMI upon tag push ([#1304](https://github.com/sourcenetwork/defradb/issues/1304)) +* Allow PR title to end with a capital letter ([#1291](https://github.com/sourcenetwork/defradb/issues/1291)) +* Changes for `dependabot` to be well-behaved ([#1165](https://github.com/sourcenetwork/defradb/issues/1165)) +* Skip benchmarks for dependabot ([#1144](https://github.com/sourcenetwork/defradb/issues/1144)) +* Add workflow to ensure deps build properly ([#1078](https://github.com/sourcenetwork/defradb/issues/1078)) +* Runner and Builder Containerfiles ([#951](https://github.com/sourcenetwork/defradb/issues/951)) +* Fix go-header linter rule to be any year ([#1021](https://github.com/sourcenetwork/defradb/issues/1021)) + +### Chore + +* Add Islam as contributor ([#1302](https://github.com/sourcenetwork/defradb/issues/1302)) +* Update go-libp2p to 0.26.4 ([#1257](https://github.com/sourcenetwork/defradb/issues/1257)) +* Improve the test coverage of datastore ([#1203](https://github.com/sourcenetwork/defradb/issues/1203)) +* Add issue and discussion templates ([#1193](https://github.com/sourcenetwork/defradb/issues/1193)) +* Bump libp2p/go-libp2p-kad-dht from 0.21.0 to 0.21.1 ([#1146](https://github.com/sourcenetwork/defradb/issues/1146)) +* Enable dependabot ([#1120](https://github.com/sourcenetwork/defradb/issues/1120)) +* Update `opentelemetry` dependencies ([#1114](https://github.com/sourcenetwork/defradb/issues/1114)) +* Update dependencies including go-ipfs ([#1112](https://github.com/sourcenetwork/defradb/issues/1112)) +* Bump to GoLang v1.19 ([#818](https://github.com/sourcenetwork/defradb/issues/818)) +* Remove versionedScan node ([#1049](https://github.com/sourcenetwork/defradb/issues/1049)) + +### Bot + +* Bump github.com/multiformats/go-multiaddr from 0.8.0 to 0.9.0 ([#1277](https://github.com/sourcenetwork/defradb/issues/1277)) +* Bump google.golang.org/grpc from 1.53.0 to 1.54.0 ([#1233](https://github.com/sourcenetwork/defradb/issues/1233)) +* Bump github.com/multiformats/go-multibase from 0.1.1 to 0.2.0 ([#1230](https://github.com/sourcenetwork/defradb/issues/1230)) +* Bump github.com/ipfs/go-libipfs from 0.6.2 to 0.7.0 ([#1231](https://github.com/sourcenetwork/defradb/issues/1231)) +* Bump github.com/ipfs/go-cid from 0.3.2 to 0.4.0 ([#1200](https://github.com/sourcenetwork/defradb/issues/1200)) +* Bump github.com/ipfs/go-ipfs-blockstore from 1.2.0 to 1.3.0 ([#1199](https://github.com/sourcenetwork/defradb/issues/1199)) +* Bump github.com/stretchr/testify from 1.8.1 to 1.8.2 ([#1198](https://github.com/sourcenetwork/defradb/issues/1198)) +* Bump github.com/ipfs/go-libipfs from 0.6.1 to 0.6.2 ([#1201](https://github.com/sourcenetwork/defradb/issues/1201)) +* Bump golang.org/x/crypto from 0.6.0 to 0.7.0 ([#1197](https://github.com/sourcenetwork/defradb/issues/1197)) +* Bump libp2p/go-libp2p-gostream from 0.5.0 to 0.6.0 ([#1152](https://github.com/sourcenetwork/defradb/issues/1152)) +* Bump github.com/ipfs/go-libipfs from 0.5.0 to 0.6.1 ([#1166](https://github.com/sourcenetwork/defradb/issues/1166)) +* Bump github.com/ugorji/go/codec from 1.2.9 to 1.2.11 ([#1173](https://github.com/sourcenetwork/defradb/issues/1173)) +* Bump github.com/libp2p/go-libp2p-pubsub from 0.9.0 to 0.9.3 ([#1183](https://github.com/sourcenetwork/defradb/issues/1183)) \ No newline at end of file diff --git a/docs/website/release notes/v0.5.1.md b/docs/website/release notes/v0.5.1.md new file mode 100644 index 0000000000..f204c56106 --- /dev/null +++ b/docs/website/release notes/v0.5.1.md @@ -0,0 +1,91 @@ +--- +sidebar_position: 51 +--- + +# v0.5.1 + +> 2023-05-16 + +### Features + +* Add collection response information on creation ([#1499](https://github.com/sourcenetwork/defradb/issues/1499)) +* CLI client request from file ([#1503](https://github.com/sourcenetwork/defradb/issues/1503)) +* Add commits fieldName and fieldId fields ([#1451](https://github.com/sourcenetwork/defradb/issues/1451)) +* Add allowed origins config ([#1408](https://github.com/sourcenetwork/defradb/issues/1408)) +* Add descriptions to all system defined GQL stuff ([#1387](https://github.com/sourcenetwork/defradb/issues/1387)) +* Strongly type Request.Errors ([#1364](https://github.com/sourcenetwork/defradb/issues/1364)) + +### Fixes + +* Skip new test packages in change detector ([#1495](https://github.com/sourcenetwork/defradb/issues/1495)) +* Make nested joins work correctly from primary direction ([#1491](https://github.com/sourcenetwork/defradb/issues/1491)) +* Add reconnection to known peers ([#1482](https://github.com/sourcenetwork/defradb/issues/1482)) +* Rename commit field input arg to fieldId ([#1460](https://github.com/sourcenetwork/defradb/issues/1460)) +* Reference collectionID in p2p readme ([#1466](https://github.com/sourcenetwork/defradb/issues/1466)) +* Handling SIGTERM in CLI `start` command ([#1459](https://github.com/sourcenetwork/defradb/issues/1459)) +* Update QL documentation link and replicator command ([#1440](https://github.com/sourcenetwork/defradb/issues/1440)) +* Fix typo in readme ([#1419](https://github.com/sourcenetwork/defradb/issues/1419)) +* Limit the size of http request bodies that we handle ([#1405](https://github.com/sourcenetwork/defradb/issues/1405)) +* Improve P2P event handling ([#1388](https://github.com/sourcenetwork/defradb/issues/1388)) +* Serialize DB errors to json in http package ([#1401](https://github.com/sourcenetwork/defradb/issues/1401)) +* Do not commit if errors have been returned ([#1390](https://github.com/sourcenetwork/defradb/issues/1390)) +* Unlock replicator lock before returning error ([#1369](https://github.com/sourcenetwork/defradb/issues/1369)) +* Improve NonNull error message ([#1362](https://github.com/sourcenetwork/defradb/issues/1362)) +* Use ring-buffer for WaitForFoo chans ([#1359](https://github.com/sourcenetwork/defradb/issues/1359)) +* Guarantee event processing order ([#1352](https://github.com/sourcenetwork/defradb/issues/1352)) +* Explain of _group with dockeys filter to be []string ([#1348](https://github.com/sourcenetwork/defradb/issues/1348)) + +### Refactoring + +* Use `int32` for proper gql scalar Int parsing ([#1493](https://github.com/sourcenetwork/defradb/issues/1493)) +* Improve rollback on peer P2P collection error ([#1461](https://github.com/sourcenetwork/defradb/issues/1461)) +* Improve CLI with test suite and builder pattern ([#928](https://github.com/sourcenetwork/defradb/issues/928)) + +### Testing + +* Add DB/Node Restart tests ([#1504](https://github.com/sourcenetwork/defradb/issues/1504)) +* Provide tests for client introspection query ([#1492](https://github.com/sourcenetwork/defradb/issues/1492)) +* Convert explain count tests to new explain setup ([#1488](https://github.com/sourcenetwork/defradb/issues/1488)) +* Convert explain sum tests to new explain setup ([#1489](https://github.com/sourcenetwork/defradb/issues/1489)) +* Convert explain average tests to new explain setup ([#1487](https://github.com/sourcenetwork/defradb/issues/1487)) +* Convert explain top-level tests to new explain setup ([#1480](https://github.com/sourcenetwork/defradb/issues/1480)) +* Convert explain order tests to new explain setup ([#1478](https://github.com/sourcenetwork/defradb/issues/1478)) +* Convert explain join tests to new explain setup ([#1476](https://github.com/sourcenetwork/defradb/issues/1476)) +* Convert explain dagscan tests to new explain setup ([#1474](https://github.com/sourcenetwork/defradb/issues/1474)) +* Add tests to assert schema id order independence ([#1456](https://github.com/sourcenetwork/defradb/issues/1456)) +* Capitalize all integration schema types ([#1445](https://github.com/sourcenetwork/defradb/issues/1445)) +* Convert explain limit tests to new explain setup ([#1446](https://github.com/sourcenetwork/defradb/issues/1446)) +* Improve change detector performance ([#1433](https://github.com/sourcenetwork/defradb/issues/1433)) +* Convert mutation explain tests to new explain setup ([#1416](https://github.com/sourcenetwork/defradb/issues/1416)) +* Convert filter explain tests to new explain setup ([#1380](https://github.com/sourcenetwork/defradb/issues/1380)) +* Retry test doc mutation on transaction conflict ([#1366](https://github.com/sourcenetwork/defradb/issues/1366)) + +### Continuous integration + +* Remove secret ssh key stuff from change detector wf ([#1438](https://github.com/sourcenetwork/defradb/issues/1438)) +* Fix the SSH security issue from AMI scan report ([#1426](https://github.com/sourcenetwork/defradb/issues/1426)) +* Add a separate workflow to run the linter ([#1434](https://github.com/sourcenetwork/defradb/issues/1434)) +* Allow CI to work from forked repo ([#1392](https://github.com/sourcenetwork/defradb/issues/1392)) +* Bump go version within packer for AWS AMI ([#1344](https://github.com/sourcenetwork/defradb/issues/1344)) + +### Chore + +* Enshrine defra logger names ([#1410](https://github.com/sourcenetwork/defradb/issues/1410)) +* Remove some dead code ([#1470](https://github.com/sourcenetwork/defradb/issues/1470)) +* Update graphql-go ([#1422](https://github.com/sourcenetwork/defradb/issues/1422)) +* Improve logging consistency ([#1424](https://github.com/sourcenetwork/defradb/issues/1424)) +* Makefile tests with shorter timeout and common flags ([#1397](https://github.com/sourcenetwork/defradb/issues/1397)) +* Move to gofrs/uuid ([#1396](https://github.com/sourcenetwork/defradb/issues/1396)) +* Move to ipfs boxo ([#1393](https://github.com/sourcenetwork/defradb/issues/1393)) +* Document collection.txn ([#1363](https://github.com/sourcenetwork/defradb/issues/1363)) + +### Bot + +* Bump golang.org/x/crypto from 0.8.0 to 0.9.0 ([#1497](https://github.com/sourcenetwork/defradb/issues/1497)) +* Bump golang.org/x/net from 0.9.0 to 0.10.0 ([#1496](https://github.com/sourcenetwork/defradb/issues/1496)) +* Bump google.golang.org/grpc from 1.54.0 to 1.55.0 ([#1464](https://github.com/sourcenetwork/defradb/issues/1464)) +* Bump github.com/ipfs/boxo from 0.8.0 to 0.8.1 ([#1427](https://github.com/sourcenetwork/defradb/issues/1427)) +* Bump golang.org/x/crypto from 0.7.0 to 0.8.0 ([#1398](https://github.com/sourcenetwork/defradb/issues/1398)) +* Bump github.com/spf13/cobra from 1.6.1 to 1.7.0 ([#1399](https://github.com/sourcenetwork/defradb/issues/1399)) +* Bump github.com/ipfs/go-blockservice from 0.5.0 to 0.5.1 ([#1300](https://github.com/sourcenetwork/defradb/issues/1300)) +* Bump github.com/ipfs/go-cid from 0.4.0 to 0.4.1 ([#1301](https://github.com/sourcenetwork/defradb/issues/1301)) diff --git a/docs/website/release notes/v0.6.0.md b/docs/website/release notes/v0.6.0.md new file mode 100644 index 0000000000..8026fec0c8 --- /dev/null +++ b/docs/website/release notes/v0.6.0.md @@ -0,0 +1,85 @@ +--- +sidebar_position: 61 +--- + +# v0.6.0 + +> 2023-07-31 + +DefraDB v0.6 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + +There are several new and powerful features, important bug fixes, and notable refactors in this release. Some highlight features include: The initial release of our LensVM based schema migration engine powered by WebAssembly ([#1650](https://github.com/sourcenetwork/defradb/issues/1650)), newly embedded DefraDB Playround which includes a bundled GraphQL client and schema manager, and last but not least a relation field (type_id) alias to improve the developer experience ([#1609](https://github.com/sourcenetwork/defradb/issues/1609)). + +To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.5.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.gg/w7jYQVJ/. + +### Features + +* Add `_not` operator ([#1631](https://github.com/sourcenetwork/defradb/issues/1631)) +* Schema list API ([#1625](https://github.com/sourcenetwork/defradb/issues/1625)) +* Add simple data import and export ([#1630](https://github.com/sourcenetwork/defradb/issues/1630)) +* Playground ([#1575](https://github.com/sourcenetwork/defradb/issues/1575)) +* Add schema migration get and set cmds to CLI ([#1650](https://github.com/sourcenetwork/defradb/issues/1650)) +* Allow relation alias on create and update ([#1609](https://github.com/sourcenetwork/defradb/issues/1609)) +* Make fetcher calculate docFetches and fieldFetches ([#1713](https://github.com/sourcenetwork/defradb/issues/1713)) +* Add lens migration engine to defra ([#1564](https://github.com/sourcenetwork/defradb/issues/1564)) +* Add `_keys` attribute to `selectNode` simple explain ([#1546](https://github.com/sourcenetwork/defradb/issues/1546)) +* CLI commands for secondary indexes ([#1595](https://github.com/sourcenetwork/defradb/issues/1595)) +* Add alias to `groupBy` related object ([#1579](https://github.com/sourcenetwork/defradb/issues/1579)) +* Non-unique secondary index (no querying) ([#1450](https://github.com/sourcenetwork/defradb/issues/1450)) +* Add ability to explain-debug all nodes ([#1563](https://github.com/sourcenetwork/defradb/issues/1563)) +* Include dockey in doc exists err ([#1558](https://github.com/sourcenetwork/defradb/issues/1558)) + +### Fixes + +* Better wait in CLI integration test ([#1415](https://github.com/sourcenetwork/defradb/issues/1415)) +* Return error when relation is not defined on both types ([#1647](https://github.com/sourcenetwork/defradb/issues/1647)) +* Change `core.DocumentMapping` to pointer ([#1528](https://github.com/sourcenetwork/defradb/issues/1528)) +* Fix invalid (badger) datastore state ([#1685](https://github.com/sourcenetwork/defradb/issues/1685)) +* Discard index and subscription implicit transactions ([#1715](https://github.com/sourcenetwork/defradb/issues/1715)) +* Remove duplicated `peers` in peerstore prefix ([#1678](https://github.com/sourcenetwork/defradb/issues/1678)) +* Return errors from typeJoinOne ([#1716](https://github.com/sourcenetwork/defradb/issues/1716)) +* Document change detector breaking change ([#1531](https://github.com/sourcenetwork/defradb/issues/1531)) +* Standardise `schema migration` CLI errors ([#1682](https://github.com/sourcenetwork/defradb/issues/1682)) +* Introspection OrderArg returns null inputFields ([#1633](https://github.com/sourcenetwork/defradb/issues/1633)) +* Avoid duplicated requestable fields ([#1621](https://github.com/sourcenetwork/defradb/issues/1621)) +* Normalize int field kind ([#1619](https://github.com/sourcenetwork/defradb/issues/1619)) +* Change the WriteSyncer to use lock when piping ([#1608](https://github.com/sourcenetwork/defradb/issues/1608)) +* Filter splitting and rendering for related types ([#1541](https://github.com/sourcenetwork/defradb/issues/1541)) + +### Documentation + +* Improve CLI command documentation ([#1505](https://github.com/sourcenetwork/defradb/issues/1505)) + +### Refactoring + +* Schema list output to include schemaVersionID ([#1706](https://github.com/sourcenetwork/defradb/issues/1706)) +* Reuse lens wasm modules ([#1641](https://github.com/sourcenetwork/defradb/issues/1641)) +* Remove redundant txn param from fetcher start ([#1635](https://github.com/sourcenetwork/defradb/issues/1635)) +* Remove first CRDT byte from field encoded values ([#1622](https://github.com/sourcenetwork/defradb/issues/1622)) +* Merge `node` into `net` and improve coverage ([#1593](https://github.com/sourcenetwork/defradb/issues/1593)) +* Fetcher filter and field optimization ([#1500](https://github.com/sourcenetwork/defradb/issues/1500)) + +### Testing + +* Rework transaction test framework capabilities ([#1603](https://github.com/sourcenetwork/defradb/issues/1603)) +* Expand backup integration tests ([#1699](https://github.com/sourcenetwork/defradb/issues/1699)) +* Disable test ([#1675](https://github.com/sourcenetwork/defradb/issues/1675)) +* Add tests for 1-1 group by id ([#1655](https://github.com/sourcenetwork/defradb/issues/1655)) +* Remove CLI tests from make test ([#1643](https://github.com/sourcenetwork/defradb/issues/1643)) +* Bundle test state into single var ([#1645](https://github.com/sourcenetwork/defradb/issues/1645)) +* Convert explain group tests to new explain setup ([#1537](https://github.com/sourcenetwork/defradb/issues/1537)) +* Add tests for foo_id field name clashes ([#1521](https://github.com/sourcenetwork/defradb/issues/1521)) +* Resume wait correctly following test node restart ([#1515](https://github.com/sourcenetwork/defradb/issues/1515)) +* Require no errors when none expected ([#1509](https://github.com/sourcenetwork/defradb/issues/1509)) + +### Continuous integration + +* Add workflows to push, pull, and validate docker images ([#1676](https://github.com/sourcenetwork/defradb/issues/1676)) +* Build mocks using make ([#1612](https://github.com/sourcenetwork/defradb/issues/1612)) +* Fix terraform plan and merge AMI build + deploy workflow ([#1514](https://github.com/sourcenetwork/defradb/issues/1514)) +* Reconfigure CodeCov action to ensure stability ([#1414](https://github.com/sourcenetwork/defradb/issues/1414)) + +### Chore + +* Bump to GoLang v1.20 ([#1689](https://github.com/sourcenetwork/defradb/issues/1689)) +* Update to ipfs boxo 0.10.0 ([#1573](https://github.com/sourcenetwork/defradb/issues/1573)) \ No newline at end of file diff --git a/docs/website/release notes/v0.7.0.md b/docs/website/release notes/v0.7.0.md new file mode 100644 index 0000000000..00ea8d5045 --- /dev/null +++ b/docs/website/release notes/v0.7.0.md @@ -0,0 +1,74 @@ +--- +sidebar_position: 70 +--- +# v0.7.0 + +> 2023-09-18 + +DefraDB v0.7 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + +This release has focused on robustness, testing, and schema management. Some highlight new features include notable expansions to the expressiveness of schema migrations. + +To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.6.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.gg/w7jYQVJ/. + +### Features + +* Allow field indexing by name in PatchSchema ([#1810](https://github.com/sourcenetwork/defradb/issues/1810)) +* Auto-create relation id fields via PatchSchema ([#1807](https://github.com/sourcenetwork/defradb/issues/1807)) +* Support PatchSchema relational field kind substitution ([#1777](https://github.com/sourcenetwork/defradb/issues/1777)) +* Add support for adding of relational fields ([#1766](https://github.com/sourcenetwork/defradb/issues/1766)) +* Enable downgrading of documents via Lens inverses ([#1721](https://github.com/sourcenetwork/defradb/issues/1721)) + +### Fixes + +* Correctly handle serialisation of nil field values ([#1872](https://github.com/sourcenetwork/defradb/issues/1872)) +* Compound filter operators with relations ([#1855](https://github.com/sourcenetwork/defradb/issues/1855)) +* Only update updated fields via update requests ([#1817](https://github.com/sourcenetwork/defradb/issues/1817)) +* Error when saving a deleted document ([#1806](https://github.com/sourcenetwork/defradb/issues/1806)) +* Prevent multiple docs from being linked in one one ([#1790](https://github.com/sourcenetwork/defradb/issues/1790)) +* Handle the querying of secondary relation id fields ([#1768](https://github.com/sourcenetwork/defradb/issues/1768)) +* Improve the way migrations handle transactions ([#1737](https://github.com/sourcenetwork/defradb/issues/1737)) + +### Tooling + +* Add Akash deployment configuration ([#1736](https://github.com/sourcenetwork/defradb/issues/1736)) + +### Refactoring + +* HTTP client interface ([#1776](https://github.com/sourcenetwork/defradb/issues/1776)) +* Simplify fetcher interface ([#1746](https://github.com/sourcenetwork/defradb/issues/1746)) + +### Testing + +* Convert and move out of place explain tests ([#1878](https://github.com/sourcenetwork/defradb/issues/1878)) +* Update mutation tests to make use of mutation system ([#1853](https://github.com/sourcenetwork/defradb/issues/1853)) +* Test top level agg. with compound relational filter ([#1870](https://github.com/sourcenetwork/defradb/issues/1870)) +* Skip unsupported mutation types at test level ([#1850](https://github.com/sourcenetwork/defradb/issues/1850)) +* Extend mutation tests with col.Update and Create ([#1838](https://github.com/sourcenetwork/defradb/issues/1838)) +* Add tests for multiple one-one joins ([#1793](https://github.com/sourcenetwork/defradb/issues/1793)) + +### Chore + +* Update Badger version to v4 ([#1740](https://github.com/sourcenetwork/defradb/issues/1740)) +* Update go-libp2p to 0.29.2 ([#1780](https://github.com/sourcenetwork/defradb/issues/1780)) +* Bump golangci-lint to v1.54 ([#1881](https://github.com/sourcenetwork/defradb/issues/1881)) +* Bump go.opentelemetry.io/otel/metric from 1.17.0 to 1.18.0 ([#1890](https://github.com/sourcenetwork/defradb/issues/1890)) +* Bump [@tanstack](https://github.com/tanstack)/react-query from 4.35.0 to 4.35.3 in /playground ([#1876](https://github.com/sourcenetwork/defradb/issues/1876)) +* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.5.0 to 6.7.0 in /playground ([#1874](https://github.com/sourcenetwork/defradb/issues/1874)) +* Bump [@typescript](https://github.com/typescript)-eslint/parser from 6.6.0 to 6.7.0 in /playground ([#1875](https://github.com/sourcenetwork/defradb/issues/1875)) +* Combined PRs 2023-09-14 ([#1873](https://github.com/sourcenetwork/defradb/issues/1873)) +* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.4.0 to 6.5.0 in /playground ([#1827](https://github.com/sourcenetwork/defradb/issues/1827)) +* Bump go.opentelemetry.io/otel/sdk/metric from 0.39.0 to 0.40.0 ([#1829](https://github.com/sourcenetwork/defradb/issues/1829)) +* Bump github.com/ipfs/go-block-format from 0.1.2 to 0.2.0 ([#1819](https://github.com/sourcenetwork/defradb/issues/1819)) +* Combined PRs ([#1826](https://github.com/sourcenetwork/defradb/issues/1826)) +* Bump [@typescript](https://github.com/typescript)-eslint/parser from 6.4.0 to 6.4.1 in /playground ([#1804](https://github.com/sourcenetwork/defradb/issues/1804)) +* Combined PRs ([#1803](https://github.com/sourcenetwork/defradb/issues/1803)) +* Combined PRs ([#1791](https://github.com/sourcenetwork/defradb/issues/1791)) +* Combined PRs ([#1778](https://github.com/sourcenetwork/defradb/issues/1778)) +* Bump dependencies ([#1761](https://github.com/sourcenetwork/defradb/issues/1761)) +* Bump vite from 4.3.9 to 4.4.8 in /playground ([#1748](https://github.com/sourcenetwork/defradb/issues/1748)) +* Bump graphiql from 3.0.4 to 3.0.5 in /playground ([#1730](https://github.com/sourcenetwork/defradb/issues/1730)) +* Combined bumps of dependencies under /playground ([#1744](https://github.com/sourcenetwork/defradb/issues/1744)) +* Bump github.com/ipfs/boxo from 0.10.2 to 0.11.0 ([#1726](https://github.com/sourcenetwork/defradb/issues/1726)) +* Bump github.com/libp2p/go-libp2p-kad-dht from 0.24.2 to 0.24.3 ([#1724](https://github.com/sourcenetwork/defradb/issues/1724)) +* Bump google.golang.org/grpc from 1.56.2 to 1.57.0 ([#1725](https://github.com/sourcenetwork/defradb/issues/1725)) \ No newline at end of file diff --git a/docs/website/release notes/v0.8.0.md b/docs/website/release notes/v0.8.0.md new file mode 100644 index 0000000000..9ff4e854ea --- /dev/null +++ b/docs/website/release notes/v0.8.0.md @@ -0,0 +1,75 @@ +--- +sidebar_position: 80 +--- +# v0.8.0 + +> 2023-11-14 + +DefraDB v0.8 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + +To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.7.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.gg/w7jYQVJ/. + +### Features + +* Add means to fetch schema ([#2006](https://github.com/sourcenetwork/defradb/issues/2006)) +* Rename Schema.SchemaID to Schema.Root ([#2005](https://github.com/sourcenetwork/defradb/issues/2005)) +* Enable playground in Docker build ([#1986](https://github.com/sourcenetwork/defradb/issues/1986)) +* Change GetCollectionBySchemaFoo funcs to return many ([#1984](https://github.com/sourcenetwork/defradb/issues/1984)) +* Add Swagger UI to playground ([#1979](https://github.com/sourcenetwork/defradb/issues/1979)) +* Add OpenAPI route ([#1960](https://github.com/sourcenetwork/defradb/issues/1960)) +* Remove CollectionDescription.Schema ([#1965](https://github.com/sourcenetwork/defradb/issues/1965)) +* Remove collection from patch schema ([#1957](https://github.com/sourcenetwork/defradb/issues/1957)) +* Make queries utilise secondary indexes ([#1925](https://github.com/sourcenetwork/defradb/issues/1925)) +* Allow setting of default schema version ([#1888](https://github.com/sourcenetwork/defradb/issues/1888)) +* Add CCIP Support ([#1896](https://github.com/sourcenetwork/defradb/issues/1896)) + +### Fixes + +* Fix test module relying on closed memory leak ([#2037](https://github.com/sourcenetwork/defradb/issues/2037)) +* Make return type for FieldKind_INT an int64 ([#1982](https://github.com/sourcenetwork/defradb/issues/1982)) +* Node private key requires data directory ([#1938](https://github.com/sourcenetwork/defradb/issues/1938)) +* Remove collection name from schema ID generation ([#1920](https://github.com/sourcenetwork/defradb/issues/1920)) +* Infinite loop when updating one-one relation ([#1915](https://github.com/sourcenetwork/defradb/issues/1915)) + +### Refactoring + +* CRDT merge direction ([#2016](https://github.com/sourcenetwork/defradb/issues/2016)) +* Reorganise collection description storage ([#1988](https://github.com/sourcenetwork/defradb/issues/1988)) +* Add peerstore to multistore ([#1980](https://github.com/sourcenetwork/defradb/issues/1980)) +* P2P client interface ([#1924](https://github.com/sourcenetwork/defradb/issues/1924)) +* Deprecate CollectionDescription.Schema ([#1939](https://github.com/sourcenetwork/defradb/issues/1939)) +* Remove net GRPC API ([#1927](https://github.com/sourcenetwork/defradb/issues/1927)) +* CLI client interface ([#1839](https://github.com/sourcenetwork/defradb/issues/1839)) + +### Continuous integration + +* Add goreleaser workflow ([#2040](https://github.com/sourcenetwork/defradb/issues/2040)) +* Add mac test runner ([#2035](https://github.com/sourcenetwork/defradb/issues/2035)) +* Parallelize change detector ([#1871](https://github.com/sourcenetwork/defradb/issues/1871)) + +### Chore + +* Update dependencies ([#2044](https://github.com/sourcenetwork/defradb/issues/2044)) + +### Bot + +* Bump [@typescript](https://github.com/typescript)-eslint/parser from 6.10.0 to 6.11.0 in /playground ([#2053](https://github.com/sourcenetwork/defradb/issues/2053)) +* Update dependencies (bulk dependabot PRs) 13-11-2023 ([#2052](https://github.com/sourcenetwork/defradb/issues/2052)) +* Bump axios from 1.5.1 to 1.6.1 in /playground ([#2041](https://github.com/sourcenetwork/defradb/issues/2041)) +* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.9.1 to 6.10.0 in /playground ([#2042](https://github.com/sourcenetwork/defradb/issues/2042)) +* Bump [@vitejs](https://github.com/vitejs)/plugin-react-swc from 3.4.0 to 3.4.1 in /playground ([#2022](https://github.com/sourcenetwork/defradb/issues/2022)) +* Update dependencies (bulk dependabot PRs) 08-11-2023 ([#2038](https://github.com/sourcenetwork/defradb/issues/2038)) +* Update dependencies (bulk dependabot PRs) 30-10-2023 ([#2015](https://github.com/sourcenetwork/defradb/issues/2015)) +* Bump eslint-plugin and parser from 6.8.0 to 6.9.0 in /playground ([#2000](https://github.com/sourcenetwork/defradb/issues/2000)) +* Update dependencies (bulk dependabot PRs) 16-10-2023 ([#1998](https://github.com/sourcenetwork/defradb/issues/1998)) +* Update dependencies (bulk dependabot PRs) 16-10-2023 ([#1976](https://github.com/sourcenetwork/defradb/issues/1976)) +* Bump golang.org/x/net from 0.16.0 to 0.17.0 ([#1961](https://github.com/sourcenetwork/defradb/issues/1961)) +* Bump [@types](https://github.com/types)/react-dom from 18.2.11 to 18.2.12 in /playground ([#1952](https://github.com/sourcenetwork/defradb/issues/1952)) +* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.7.4 to 6.7.5 in /playground ([#1953](https://github.com/sourcenetwork/defradb/issues/1953)) +* Bump combined dependencies 09-10-2023 ([#1951](https://github.com/sourcenetwork/defradb/issues/1951)) +* Bump [@types](https://github.com/types)/react from 18.2.24 to 18.2.25 in /playground ([#1932](https://github.com/sourcenetwork/defradb/issues/1932)) +* Bump [@typescript](https://github.com/typescript)-eslint/parser from 6.7.3 to 6.7.4 in /playground ([#1933](https://github.com/sourcenetwork/defradb/issues/1933)) +* Bump [@vitejs](https://github.com/vitejs)/plugin-react-swc from 3.3.2 to 3.4.0 in /playground ([#1904](https://github.com/sourcenetwork/defradb/issues/1904)) +* Bump combined dependencies 19-09-2023 ([#1931](https://github.com/sourcenetwork/defradb/issues/1931)) +* Bump graphql from 16.8.0 to 16.8.1 in /playground ([#1901](https://github.com/sourcenetwork/defradb/issues/1901)) +* Update combined dependabot PRs 19-09-2023 ([#1898](https://github.com/sourcenetwork/defradb/issues/1898)) \ No newline at end of file diff --git a/docs/website/release notes/v0.9.0.md b/docs/website/release notes/v0.9.0.md new file mode 100644 index 0000000000..71a60885f2 --- /dev/null +++ b/docs/website/release notes/v0.9.0.md @@ -0,0 +1,78 @@ +--- +sidebar_position: 90 +--- +# v0.9.0 + +> 2024-01-18 + +DefraDB v0.9 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + +To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.8.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.gg/w7jYQVJ/. + +### Features + +* Mutation typed input ([#2167](https://github.com/sourcenetwork/defradb/issues/2167)) +* Add PN Counter CRDT type ([#2119](https://github.com/sourcenetwork/defradb/issues/2119)) +* Allow users to add Views ([#2114](https://github.com/sourcenetwork/defradb/issues/2114)) +* Add unique secondary index ([#2131](https://github.com/sourcenetwork/defradb/issues/2131)) +* New cmd for docs auto generation ([#2096](https://github.com/sourcenetwork/defradb/issues/2096)) +* Add blob scalar type ([#2091](https://github.com/sourcenetwork/defradb/issues/2091)) + +### Fixes + +* Add entropy to counter CRDT type updates ([#2186](https://github.com/sourcenetwork/defradb/issues/2186)) +* Handle multiple nil values on unique indexed fields ([#2178](https://github.com/sourcenetwork/defradb/issues/2178)) +* Filtering on unique index if there is no match ([#2177](https://github.com/sourcenetwork/defradb/issues/2177)) + +### Performance + +* Switch LensVM to wasmtime runtime ([#2030](https://github.com/sourcenetwork/defradb/issues/2030)) + +### Refactoring + +* Add strong typing to document creation ([#2161](https://github.com/sourcenetwork/defradb/issues/2161)) +* Rename key,id,dockey to docID terminology ([#1749](https://github.com/sourcenetwork/defradb/issues/1749)) +* Simplify Merkle CRDT workflow ([#2111](https://github.com/sourcenetwork/defradb/issues/2111)) + +### Testing + +* Add auto-doc generation ([#2051](https://github.com/sourcenetwork/defradb/issues/2051)) + +### Continuous integration + +* Add windows test runner ([#2033](https://github.com/sourcenetwork/defradb/issues/2033)) + +### Chore + +* Update Lens to v0.5 ([#2083](https://github.com/sourcenetwork/defradb/issues/2083)) + +### Bot + +* Bump [@types](https://github.com/types)/react from 18.2.47 to 18.2.48 in /playground ([#2213](https://github.com/sourcenetwork/defradb/issues/2213)) +* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.18.0 to 6.18.1 in /playground ([#2215](https://github.com/sourcenetwork/defradb/issues/2215)) +* Update dependencies (bulk dependabot PRs) 15-01-2024 ([#2217](https://github.com/sourcenetwork/defradb/issues/2217)) +* Bump follow-redirects from 1.15.3 to 1.15.4 in /playground ([#2181](https://github.com/sourcenetwork/defradb/issues/2181)) +* Bump github.com/getkin/kin-openapi from 0.120.0 to 0.122.0 ([#2097](https://github.com/sourcenetwork/defradb/issues/2097)) +* Update dependencies (bulk dependabot PRs) 08-01-2024 ([#2173](https://github.com/sourcenetwork/defradb/issues/2173)) +* Bump github.com/bits-and-blooms/bitset from 1.12.0 to 1.13.0 ([#2160](https://github.com/sourcenetwork/defradb/issues/2160)) +* Bump [@types](https://github.com/types)/react from 18.2.45 to 18.2.46 in /playground ([#2159](https://github.com/sourcenetwork/defradb/issues/2159)) +* Bump [@typescript](https://github.com/typescript)-eslint/parser from 6.15.0 to 6.16.0 in /playground ([#2156](https://github.com/sourcenetwork/defradb/issues/2156)) +* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.15.0 to 6.16.0 in /playground ([#2155](https://github.com/sourcenetwork/defradb/issues/2155)) +* Update dependencies (bulk dependabot PRs) 27-12-2023 ([#2154](https://github.com/sourcenetwork/defradb/issues/2154)) +* Bump github.com/spf13/viper from 1.17.0 to 1.18.2 ([#2145](https://github.com/sourcenetwork/defradb/issues/2145)) +* Bump golang.org/x/crypto from 0.16.0 to 0.17.0 ([#2144](https://github.com/sourcenetwork/defradb/issues/2144)) +* Update dependencies (bulk dependabot PRs) 18-12-2023 ([#2142](https://github.com/sourcenetwork/defradb/issues/2142)) +* Bump [@typescript](https://github.com/typescript)-eslint/parser from 6.13.2 to 6.14.0 in /playground ([#2136](https://github.com/sourcenetwork/defradb/issues/2136)) +* Bump [@types](https://github.com/types)/react from 18.2.43 to 18.2.45 in /playground ([#2134](https://github.com/sourcenetwork/defradb/issues/2134)) +* Bump vite from 5.0.7 to 5.0.10 in /playground ([#2135](https://github.com/sourcenetwork/defradb/issues/2135)) +* Update dependencies (bulk dependabot PRs) 04-12-2023 ([#2133](https://github.com/sourcenetwork/defradb/issues/2133)) +* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.13.1 to 6.13.2 in /playground ([#2109](https://github.com/sourcenetwork/defradb/issues/2109)) +* Bump vite from 5.0.2 to 5.0.5 in /playground ([#2112](https://github.com/sourcenetwork/defradb/issues/2112)) +* Bump [@types](https://github.com/types)/react from 18.2.41 to 18.2.42 in /playground ([#2108](https://github.com/sourcenetwork/defradb/issues/2108)) +* Update dependencies (bulk dependabot PRs) 04-12-2023 ([#2107](https://github.com/sourcenetwork/defradb/issues/2107)) +* Bump [@types](https://github.com/types)/react from 18.2.38 to 18.2.39 in /playground ([#2086](https://github.com/sourcenetwork/defradb/issues/2086)) +* Bump [@typescript](https://github.com/typescript)-eslint/parser from 6.12.0 to 6.13.0 in /playground ([#2085](https://github.com/sourcenetwork/defradb/issues/2085)) +* Update dependencies (bulk dependabot PRs) 27-11-2023 ([#2081](https://github.com/sourcenetwork/defradb/issues/2081)) +* Bump swagger-ui-react from 5.10.0 to 5.10.3 in /playground ([#2067](https://github.com/sourcenetwork/defradb/issues/2067)) +* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.11.0 to 6.12.0 in /playground ([#2068](https://github.com/sourcenetwork/defradb/issues/2068)) +* Update dependencies (bulk dependabot PRs) 20-11-2023 ([#2066](https://github.com/sourcenetwork/defradb/issues/2066)) \ No newline at end of file From 9f2625cad15f4b81ffed6c6cc13300ec76d48009 Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Wed, 8 May 2024 14:06:40 -0400 Subject: [PATCH 03/78] refactor: Move internal packages to internal dir (#2599) ## Relevant issue(s) Resolves #2507 ## Description Moves our internal packages to an internal directory. No code has been changed, only moved, and import paths have been updated. --- cli/server_dump.go | 2 +- cli/start.go | 2 +- cli/utils.go | 4 ++-- client/document.go | 2 +- client/document_test.go | 2 +- datastore/blockstore_test.go | 2 +- http/handler_ccip_test.go | 2 +- http/http_client.go | 2 +- http/middleware.go | 4 ++-- {acp => internal/acp}/README.md | 0 {acp => internal/acp}/acp.go | 0 {acp => internal/acp}/acp_local.go | 0 {acp => internal/acp}/acp_local_test.go | 0 {acp => internal/acp}/doc.go | 0 {acp => internal/acp}/dpi.go | 0 {acp => internal/acp}/errors.go | 0 {acp => internal/acp}/identity/identity.go | 0 {connor => internal/connor}/LICENSE | 0 {connor => internal/connor}/and.go | 0 {connor => internal/connor}/connor.go | 0 {connor => internal/connor}/eq.go | 6 +++--- {connor => internal/connor}/errors.go | 0 {connor => internal/connor}/ge.go | 2 +- {connor => internal/connor}/gt.go | 2 +- {connor => internal/connor}/ilike.go | 0 {connor => internal/connor}/ilike_test.go | 0 {connor => internal/connor}/in.go | 0 {connor => internal/connor}/key.go | 0 {connor => internal/connor}/le.go | 2 +- {connor => internal/connor}/like.go | 0 {connor => internal/connor}/like_test.go | 0 {connor => internal/connor}/lt.go | 2 +- {connor => internal/connor}/ne.go | 0 {connor => internal/connor}/nilike.go | 0 {connor => internal/connor}/nilike_test.go | 0 {connor => internal/connor}/nin.go | 0 {connor => internal/connor}/nlike.go | 0 {connor => internal/connor}/nlike_test.go | 0 {connor => internal/connor}/not.go | 0 {connor => internal/connor}/not_test.go | 0 {connor => internal/connor}/numbers/equality.go | 0 {connor => internal/connor}/numbers/upcast.go | 0 {connor => internal/connor}/or.go | 0 {connor => internal/connor}/time/equality.go | 0 {core => internal/core}/cid/cid.go | 0 {core => internal/core}/clock.go | 0 {core => internal/core}/crdt/README.md | 0 {core => internal/core}/crdt/base.go | 2 +- {core => internal/core}/crdt/base_test.go | 2 +- {core => internal/core}/crdt/composite.go | 4 ++-- {core => internal/core}/crdt/counter.go | 4 ++-- {core => internal/core}/crdt/doc.go | 0 {core => internal/core}/crdt/errors.go | 0 {core => internal/core}/crdt/lwwreg.go | 4 ++-- {core => internal/core}/crdt/lwwreg_test.go | 2 +- {core => internal/core}/data.go | 0 {core => internal/core}/data_test.go | 0 {core => internal/core}/delta.go | 0 {core => internal/core}/doc.go | 0 {core => internal/core}/encoding.go | 2 +- {core => internal/core}/errors.go | 0 {core => internal/core}/key.go | 0 {core => internal/core}/key_test.go | 2 +- {core => internal/core}/net/broadcaster.go | 0 {core => internal/core}/net/protocol.go | 0 {core => internal/core}/node.go | 0 {core => internal/core}/parser.go | 0 {core => internal/core}/replicated.go | 0 {core => internal/core}/type.go | 0 {db => internal/db}/backup.go | 0 {db => internal/db}/backup_test.go | 2 +- {db => internal/db}/base/collection_keys.go | 2 +- {db => internal/db}/base/compare.go | 0 {db => internal/db}/base/descriptions.go | 0 {db => internal/db}/base/encoding.go | 0 {db => internal/db}/base/errors.go | 0 {db => internal/db}/collection.go | 14 +++++++------- {db => internal/db}/collection_acp.go | 4 ++-- {db => internal/db}/collection_delete.go | 6 +++--- {db => internal/db}/collection_get.go | 6 +++--- {db => internal/db}/collection_index.go | 10 +++++----- {db => internal/db}/collection_test.go | 0 {db => internal/db}/collection_update.go | 2 +- {db => internal/db}/config.go | 2 +- {db => internal/db}/config_test.go | 0 {db => internal/db}/container/container.go | 2 +- {db => internal/db}/context.go | 2 +- {db => internal/db}/context_test.go | 0 {db => internal/db}/db.go | 8 ++++---- {db => internal/db}/db_test.go | 0 {db => internal/db}/description/collection.go | 2 +- {db => internal/db}/description/errors.go | 0 {db => internal/db}/description/schema.go | 4 ++-- {db => internal/db}/errors.go | 0 {db => internal/db}/fetcher/dag.go | 2 +- {db => internal/db}/fetcher/encoded_doc.go | 2 +- {db => internal/db}/fetcher/errors.go | 0 {db => internal/db}/fetcher/fetcher.go | 14 +++++++------- {db => internal/db}/fetcher/indexer.go | 10 +++++----- {db => internal/db}/fetcher/indexer_iterators.go | 6 +++--- .../db}/fetcher/mocks/encoded_document.go | 0 {db => internal/db}/fetcher/mocks/fetcher.go | 10 +++++----- {db => internal/db}/fetcher/mocks/utils.go | 0 {db => internal/db}/fetcher/versioned.go | 12 ++++++------ {db => internal/db}/fetcher_test.go | 4 ++-- {db => internal/db}/index.go | 4 ++-- {db => internal/db}/index_test.go | 4 ++-- {db => internal/db}/indexed_docs_test.go | 12 ++++++------ {db => internal/db}/lens.go | 4 ++-- {db => internal/db}/permission/check.go | 4 ++-- {db => internal/db}/permission/permission.go | 0 {db => internal/db}/permission/register.go | 4 ++-- {db => internal/db}/request.go | 2 +- {db => internal/db}/schema.go | 2 +- {db => internal/db}/sequence.go | 2 +- {db => internal/db}/store.go | 0 {db => internal/db}/subscriptions.go | 2 +- {db => internal/db}/view.go | 2 +- {encoding => internal/encoding}/bytes.go | 0 {encoding => internal/encoding}/bytes_test.go | 0 {encoding => internal/encoding}/encoding.go | 0 {encoding => internal/encoding}/errors.go | 0 {encoding => internal/encoding}/field_value.go | 0 .../encoding}/field_value_test.go | 0 {encoding => internal/encoding}/float.go | 0 {encoding => internal/encoding}/float_test.go | 0 {encoding => internal/encoding}/int.go | 0 {encoding => internal/encoding}/int_test.go | 0 {encoding => internal/encoding}/null.go | 0 {encoding => internal/encoding}/null_test.go | 0 {encoding => internal/encoding}/string.go | 0 {encoding => internal/encoding}/string_test.go | 0 {encoding => internal/encoding}/type.go | 0 {encoding => internal/encoding}/type_test.go | 0 {lens => internal/lens}/fetcher.go | 10 +++++----- {lens => internal/lens}/history.go | 2 +- {lens => internal/lens}/lens.go | 0 {lens => internal/lens}/registry.go | 2 +- {lens => internal/lens}/txn_registry.go | 0 {merkle => internal/merkle}/clock/clock.go | 2 +- {merkle => internal/merkle}/clock/clock_test.go | 6 +++--- {merkle => internal/merkle}/clock/errors.go | 0 {merkle => internal/merkle}/clock/heads.go | 2 +- {merkle => internal/merkle}/clock/heads_test.go | 4 ++-- {merkle => internal/merkle}/clock/ipld.go | 2 +- {merkle => internal/merkle}/clock/ipld_test.go | 0 {merkle => internal/merkle}/crdt/composite.go | 6 +++--- {merkle => internal/merkle}/crdt/counter.go | 6 +++--- {merkle => internal/merkle}/crdt/errors.go | 0 {merkle => internal/merkle}/crdt/lwwreg.go | 6 +++--- {merkle => internal/merkle}/crdt/merklecrdt.go | 2 +- .../merkle}/crdt/merklecrdt_test.go | 6 +++--- {merkle => internal/merkle}/doc.go | 0 {metric => internal/metric}/metric.go | 0 {metric => internal/metric}/metric_test.go | 0 {planner => internal/planner}/arbitrary_join.go | 4 ++-- {planner => internal/planner}/average.go | 4 ++-- {planner => internal/planner}/commit.go | 6 +++--- {planner => internal/planner}/count.go | 4 ++-- {planner => internal/planner}/create.go | 6 +++--- {planner => internal/planner}/datasource.go | 2 +- {planner => internal/planner}/delete.go | 4 ++-- {planner => internal/planner}/doc.go | 0 {planner => internal/planner}/errors.go | 0 {planner => internal/planner}/explain.go | 0 {planner => internal/planner}/filter/complex.go | 4 ++-- .../planner}/filter/complex_test.go | 2 +- {planner => internal/planner}/filter/copy.go | 2 +- {planner => internal/planner}/filter/copy_field.go | 4 ++-- .../planner}/filter/copy_field_test.go | 4 ++-- {planner => internal/planner}/filter/copy_test.go | 4 ++-- .../planner}/filter/extract_properties.go | 4 ++-- .../planner}/filter/extract_properties_test.go | 2 +- {planner => internal/planner}/filter/merge.go | 4 ++-- {planner => internal/planner}/filter/merge_test.go | 4 ++-- {planner => internal/planner}/filter/normalize.go | 4 ++-- .../planner}/filter/normalize_test.go | 2 +- .../planner}/filter/remove_field.go | 2 +- .../planner}/filter/remove_field_test.go | 4 ++-- {planner => internal/planner}/filter/split.go | 2 +- {planner => internal/planner}/filter/split_test.go | 2 +- .../planner}/filter/unwrap_relation.go | 4 ++-- .../planner}/filter/unwrap_relation_test.go | 4 ++-- {planner => internal/planner}/filter/util_test.go | 6 +++--- {planner => internal/planner}/group.go | 4 ++-- {planner => internal/planner}/lens.go | 2 +- {planner => internal/planner}/limit.go | 4 ++-- {planner => internal/planner}/mapper/aggregate.go | 2 +- .../planner}/mapper/commitSelect.go | 0 {planner => internal/planner}/mapper/errors.go | 0 {planner => internal/planner}/mapper/field.go | 0 {planner => internal/planner}/mapper/mapper.go | 4 ++-- {planner => internal/planner}/mapper/mutation.go | 0 .../planner}/mapper/requestable.go | 0 {planner => internal/planner}/mapper/select.go | 2 +- {planner => internal/planner}/mapper/targetable.go | 4 ++-- {planner => internal/planner}/multi.go | 2 +- {planner => internal/planner}/operations.go | 0 {planner => internal/planner}/order.go | 4 ++-- {planner => internal/planner}/pipe.go | 4 ++-- {planner => internal/planner}/plan.go | 0 {planner => internal/planner}/planner.go | 12 ++++++------ {planner => internal/planner}/scan.go | 14 +++++++------- {planner => internal/planner}/select.go | 8 ++++---- {planner => internal/planner}/sum.go | 4 ++-- {planner => internal/planner}/top.go | 4 ++-- {planner => internal/planner}/type_join.go | 10 +++++----- {planner => internal/planner}/type_join.md | 0 {planner => internal/planner}/update.go | 4 ++-- {planner => internal/planner}/values.go | 8 ++++---- {planner => internal/planner}/view.go | 4 ++-- {request => internal/request}/doc.go | 0 {request => internal/request}/graphql/parser.go | 6 +++--- .../request}/graphql/parser/commit.go | 2 +- .../request}/graphql/parser/doc.go | 0 .../request}/graphql/parser/errors.go | 0 .../request}/graphql/parser/filter.go | 0 .../request}/graphql/parser/introspection.go | 0 .../request}/graphql/parser/mutation.go | 0 .../request}/graphql/parser/parser.go | 2 +- .../request}/graphql/parser/query.go | 0 .../request}/graphql/parser/request.go | 2 +- .../request}/graphql/parser/subscription.go | 0 .../request}/graphql/schema/collection.go | 2 +- .../request}/graphql/schema/descriptions.go | 2 +- .../request}/graphql/schema/descriptions_test.go | 0 .../request}/graphql/schema/doc.go | 0 .../request}/graphql/schema/errors.go | 0 .../graphql/schema/examples/example.schema.gql | 0 .../graphql/schema/examples/root.schema.gql | 0 .../request}/graphql/schema/generate.go | 2 +- .../request}/graphql/schema/index_parse_test.go | 0 .../request}/graphql/schema/manager.go | 2 +- .../request}/graphql/schema/manager_test.go | 0 .../graphql/schema/type.schema.gen.gql.template | 0 .../request}/graphql/schema/types/base.go | 0 .../request}/graphql/schema/types/commits.go | 0 .../request}/graphql/schema/types/descriptions.go | 0 .../request}/graphql/schema/types/scalars.go | 0 .../request}/graphql/schema/types/scalars_test.go | 0 .../request}/graphql/schema/types/types.go | 0 net/dag_test.go | 4 ++-- net/dialer.go | 2 +- net/node_test.go | 2 +- net/peer.go | 6 +++--- net/peer_collection.go | 4 ++-- net/peer_replicator.go | 4 ++-- net/peer_test.go | 6 +++--- net/process.go | 6 +++--- net/server.go | 4 ++-- node/node.go | 2 +- node/node_test.go | 2 +- tests/bench/query/planner/utils.go | 12 ++++++------ tests/clients/cli/wrapper_cli.go | 2 +- tests/gen/cli/util_test.go | 2 +- tests/gen/schema_parser.go | 2 +- tests/integration/acp.go | 4 ++-- tests/integration/db.go | 2 +- .../events/simple/with_create_txn_test.go | 2 +- tests/integration/events/utils.go | 2 +- tests/integration/index/create_test.go | 2 +- .../index/create_unique_composite_test.go | 2 +- tests/integration/index/create_unique_test.go | 2 +- tests/integration/lens.go | 2 +- tests/integration/net/order/utils.go | 2 +- tests/integration/schema/client_test.go | 2 +- tests/integration/utils2.go | 6 +++--- tests/predefined/gen_predefined.go | 2 +- tools/configs/mockery.yaml | 4 ++-- version/version.go | 2 +- 270 files changed, 296 insertions(+), 296 deletions(-) rename {acp => internal/acp}/README.md (100%) rename {acp => internal/acp}/acp.go (100%) rename {acp => internal/acp}/acp_local.go (100%) rename {acp => internal/acp}/acp_local_test.go (100%) rename {acp => internal/acp}/doc.go (100%) rename {acp => internal/acp}/dpi.go (100%) rename {acp => internal/acp}/errors.go (100%) rename {acp => internal/acp}/identity/identity.go (100%) rename {connor => internal/connor}/LICENSE (100%) rename {connor => internal/connor}/and.go (100%) rename {connor => internal/connor}/connor.go (100%) rename {connor => internal/connor}/eq.go (90%) rename {connor => internal/connor}/errors.go (100%) rename {connor => internal/connor}/ge.go (95%) rename {connor => internal/connor}/gt.go (94%) rename {connor => internal/connor}/ilike.go (100%) rename {connor => internal/connor}/ilike_test.go (100%) rename {connor => internal/connor}/in.go (100%) rename {connor => internal/connor}/key.go (100%) rename {connor => internal/connor}/le.go (95%) rename {connor => internal/connor}/like.go (100%) rename {connor => internal/connor}/like_test.go (100%) rename {connor => internal/connor}/lt.go (94%) rename {connor => internal/connor}/ne.go (100%) rename {connor => internal/connor}/nilike.go (100%) rename {connor => internal/connor}/nilike_test.go (100%) rename {connor => internal/connor}/nin.go (100%) rename {connor => internal/connor}/nlike.go (100%) rename {connor => internal/connor}/nlike_test.go (100%) rename {connor => internal/connor}/not.go (100%) rename {connor => internal/connor}/not_test.go (100%) rename {connor => internal/connor}/numbers/equality.go (100%) rename {connor => internal/connor}/numbers/upcast.go (100%) rename {connor => internal/connor}/or.go (100%) rename {connor => internal/connor}/time/equality.go (100%) rename {core => internal/core}/cid/cid.go (100%) rename {core => internal/core}/clock.go (100%) rename {core => internal/core}/crdt/README.md (100%) rename {core => internal/core}/crdt/base.go (97%) rename {core => internal/core}/crdt/base_test.go (97%) rename {core => internal/core}/crdt/composite.go (98%) rename {core => internal/core}/crdt/counter.go (98%) rename {core => internal/core}/crdt/doc.go (100%) rename {core => internal/core}/crdt/errors.go (100%) rename {core => internal/core}/crdt/lwwreg.go (97%) rename {core => internal/core}/crdt/lwwreg_test.go (99%) rename {core => internal/core}/data.go (100%) rename {core => internal/core}/data_test.go (100%) rename {core => internal/core}/delta.go (100%) rename {core => internal/core}/doc.go (100%) rename {core => internal/core}/encoding.go (99%) rename {core => internal/core}/errors.go (100%) rename {core => internal/core}/key.go (100%) rename {core => internal/core}/key_test.go (99%) rename {core => internal/core}/net/broadcaster.go (100%) rename {core => internal/core}/net/protocol.go (100%) rename {core => internal/core}/node.go (100%) rename {core => internal/core}/parser.go (100%) rename {core => internal/core}/replicated.go (100%) rename {core => internal/core}/type.go (100%) rename {db => internal/db}/backup.go (100%) rename {db => internal/db}/backup_test.go (99%) rename {db => internal/db}/base/collection_keys.go (97%) rename {db => internal/db}/base/compare.go (100%) rename {db => internal/db}/base/descriptions.go (100%) rename {db => internal/db}/base/encoding.go (100%) rename {db => internal/db}/base/errors.go (100%) rename {db => internal/db}/collection.go (99%) rename {db => internal/db}/collection_acp.go (93%) rename {db => internal/db}/collection_delete.go (96%) rename {db => internal/db}/collection_get.go (93%) rename {db => internal/db}/collection_index.go (98%) rename {db => internal/db}/collection_test.go (100%) rename {db => internal/db}/collection_update.go (99%) rename {db => internal/db}/config.go (97%) rename {db => internal/db}/config_test.go (100%) rename {db => internal/db}/container/container.go (97%) rename {db => internal/db}/context.go (98%) rename {db => internal/db}/context_test.go (100%) rename {db => internal/db}/db.go (97%) rename {db => internal/db}/db_test.go (100%) rename {db => internal/db}/description/collection.go (99%) rename {db => internal/db}/description/errors.go (100%) rename {db => internal/db}/description/schema.go (98%) rename {db => internal/db}/errors.go (100%) rename {db => internal/db}/fetcher/dag.go (97%) rename {db => internal/db}/fetcher/encoded_doc.go (99%) rename {db => internal/db}/fetcher/errors.go (100%) rename {db => internal/db}/fetcher/fetcher.go (97%) rename {db => internal/db}/fetcher/indexer.go (94%) rename {db => internal/db}/fetcher/indexer_iterators.go (99%) rename {db => internal/db}/fetcher/mocks/encoded_document.go (100%) rename {db => internal/db}/fetcher/mocks/fetcher.go (96%) rename {db => internal/db}/fetcher/mocks/utils.go (100%) rename {db => internal/db}/fetcher/versioned.go (97%) rename {db => internal/db}/fetcher_test.go (85%) rename {db => internal/db}/index.go (98%) rename {db => internal/db}/index_test.go (99%) rename {db => internal/db}/indexed_docs_test.go (99%) rename {db => internal/db}/lens.go (97%) rename {db => internal/db}/permission/check.go (95%) rename {db => internal/db}/permission/permission.go (100%) rename {db => internal/db}/permission/register.go (92%) rename {db => internal/db}/request.go (96%) rename {db => internal/db}/schema.go (99%) rename {db => internal/db}/sequence.go (96%) rename {db => internal/db}/store.go (100%) rename {db => internal/db}/subscriptions.go (97%) rename {db => internal/db}/view.go (97%) rename {encoding => internal/encoding}/bytes.go (100%) rename {encoding => internal/encoding}/bytes_test.go (100%) rename {encoding => internal/encoding}/encoding.go (100%) rename {encoding => internal/encoding}/errors.go (100%) rename {encoding => internal/encoding}/field_value.go (100%) rename {encoding => internal/encoding}/field_value_test.go (100%) rename {encoding => internal/encoding}/float.go (100%) rename {encoding => internal/encoding}/float_test.go (100%) rename {encoding => internal/encoding}/int.go (100%) rename {encoding => internal/encoding}/int_test.go (100%) rename {encoding => internal/encoding}/null.go (100%) rename {encoding => internal/encoding}/null_test.go (100%) rename {encoding => internal/encoding}/string.go (100%) rename {encoding => internal/encoding}/string_test.go (100%) rename {encoding => internal/encoding}/type.go (100%) rename {encoding => internal/encoding}/type_test.go (100%) rename {lens => internal/lens}/fetcher.go (97%) rename {lens => internal/lens}/history.go (99%) rename {lens => internal/lens}/lens.go (100%) rename {lens => internal/lens}/registry.go (99%) rename {lens => internal/lens}/txn_registry.go (100%) rename {merkle => internal/merkle}/clock/clock.go (98%) rename {merkle => internal/merkle}/clock/clock_test.go (96%) rename {merkle => internal/merkle}/clock/errors.go (100%) rename {merkle => internal/merkle}/clock/heads.go (98%) rename {merkle => internal/merkle}/clock/heads_test.go (96%) rename {merkle => internal/merkle}/clock/ipld.go (98%) rename {merkle => internal/merkle}/clock/ipld_test.go (100%) rename {merkle => internal/merkle}/crdt/composite.go (92%) rename {merkle => internal/merkle}/crdt/counter.go (91%) rename {merkle => internal/merkle}/crdt/errors.go (100%) rename {merkle => internal/merkle}/crdt/lwwreg.go (91%) rename {merkle => internal/merkle}/crdt/merklecrdt.go (98%) rename {merkle => internal/merkle}/crdt/merklecrdt_test.go (89%) rename {merkle => internal/merkle}/doc.go (100%) rename {metric => internal/metric}/metric.go (100%) rename {metric => internal/metric}/metric_test.go (100%) rename {planner => internal/planner}/arbitrary_join.go (98%) rename {planner => internal/planner}/average.go (96%) rename {planner => internal/planner}/commit.go (98%) rename {planner => internal/planner}/count.go (98%) rename {planner => internal/planner}/create.go (96%) rename {planner => internal/planner}/datasource.go (95%) rename {planner => internal/planner}/delete.go (96%) rename {planner => internal/planner}/doc.go (100%) rename {planner => internal/planner}/errors.go (100%) rename {planner => internal/planner}/explain.go (100%) rename {planner => internal/planner}/filter/complex.go (93%) rename {planner => internal/planner}/filter/complex_test.go (98%) rename {planner => internal/planner}/filter/copy.go (95%) rename {planner => internal/planner}/filter/copy_field.go (95%) rename {planner => internal/planner}/filter/copy_field_test.go (97%) rename {planner => internal/planner}/filter/copy_test.go (97%) rename {planner => internal/planner}/filter/extract_properties.go (95%) rename {planner => internal/planner}/filter/extract_properties_test.go (98%) rename {planner => internal/planner}/filter/merge.go (91%) rename {planner => internal/planner}/filter/merge_test.go (94%) rename {planner => internal/planner}/filter/normalize.go (98%) rename {planner => internal/planner}/filter/normalize_test.go (99%) rename {planner => internal/planner}/filter/remove_field.go (93%) rename {planner => internal/planner}/filter/remove_field_test.go (97%) rename {planner => internal/planner}/filter/split.go (96%) rename {planner => internal/planner}/filter/split_test.go (98%) rename {planner => internal/planner}/filter/unwrap_relation.go (94%) rename {planner => internal/planner}/filter/unwrap_relation_test.go (96%) rename {planner => internal/planner}/filter/util_test.go (96%) rename {planner => internal/planner}/group.go (98%) rename {planner => internal/planner}/lens.go (98%) rename {planner => internal/planner}/limit.go (95%) rename {planner => internal/planner}/mapper/aggregate.go (97%) rename {planner => internal/planner}/mapper/commitSelect.go (100%) rename {planner => internal/planner}/mapper/errors.go (100%) rename {planner => internal/planner}/mapper/field.go (100%) rename {planner => internal/planner}/mapper/mapper.go (99%) rename {planner => internal/planner}/mapper/mutation.go (100%) rename {planner => internal/planner}/mapper/requestable.go (100%) rename {planner => internal/planner}/mapper/select.go (97%) rename {planner => internal/planner}/mapper/targetable.go (98%) rename {planner => internal/planner}/multi.go (99%) rename {planner => internal/planner}/operations.go (100%) rename {planner => internal/planner}/order.go (98%) rename {planner => internal/planner}/pipe.go (95%) rename {planner => internal/planner}/plan.go (100%) rename {planner => internal/planner}/planner.go (97%) rename {planner => internal/planner}/scan.go (95%) rename {planner => internal/planner}/select.go (98%) rename {planner => internal/planner}/sum.go (98%) rename {planner => internal/planner}/top.go (97%) rename {planner => internal/planner}/type_join.go (98%) rename {planner => internal/planner}/type_join.md (100%) rename {planner => internal/planner}/update.go (97%) rename {planner => internal/planner}/values.go (94%) rename {planner => internal/planner}/view.go (96%) rename {request => internal/request}/doc.go (100%) rename {request => internal/request}/graphql/parser.go (94%) rename {request => internal/request}/graphql/parser/commit.go (98%) rename {request => internal/request}/graphql/parser/doc.go (100%) rename {request => internal/request}/graphql/parser/errors.go (100%) rename {request => internal/request}/graphql/parser/filter.go (100%) rename {request => internal/request}/graphql/parser/introspection.go (100%) rename {request => internal/request}/graphql/parser/mutation.go (100%) rename {request => internal/request}/graphql/parser/parser.go (86%) rename {request => internal/request}/graphql/parser/query.go (100%) rename {request => internal/request}/graphql/parser/request.go (98%) rename {request => internal/request}/graphql/parser/subscription.go (100%) rename {request => internal/request}/graphql/schema/collection.go (99%) rename {request => internal/request}/graphql/schema/descriptions.go (98%) rename {request => internal/request}/graphql/schema/descriptions_test.go (100%) rename {request => internal/request}/graphql/schema/doc.go (100%) rename {request => internal/request}/graphql/schema/errors.go (100%) rename {request => internal/request}/graphql/schema/examples/example.schema.gql (100%) rename {request => internal/request}/graphql/schema/examples/root.schema.gql (100%) rename {request => internal/request}/graphql/schema/generate.go (99%) rename {request => internal/request}/graphql/schema/index_parse_test.go (100%) rename {request => internal/request}/graphql/schema/manager.go (98%) rename {request => internal/request}/graphql/schema/manager_test.go (100%) rename {request => internal/request}/graphql/schema/type.schema.gen.gql.template (100%) rename {request => internal/request}/graphql/schema/types/base.go (100%) rename {request => internal/request}/graphql/schema/types/commits.go (100%) rename {request => internal/request}/graphql/schema/types/descriptions.go (100%) rename {request => internal/request}/graphql/schema/types/scalars.go (100%) rename {request => internal/request}/graphql/schema/types/scalars_test.go (100%) rename {request => internal/request}/graphql/schema/types/types.go (100%) diff --git a/cli/server_dump.go b/cli/server_dump.go index 767b86f364..70ba5e557b 100644 --- a/cli/server_dump.go +++ b/cli/server_dump.go @@ -13,8 +13,8 @@ package cli import ( "github.com/spf13/cobra" - "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/internal/db" "github.com/sourcenetwork/defradb/node" ) diff --git a/cli/start.go b/cli/start.go index ca9267e7e9..5c0e092c4a 100644 --- a/cli/start.go +++ b/cli/start.go @@ -20,9 +20,9 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/spf13/cobra" - "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/http" + "github.com/sourcenetwork/defradb/internal/db" "github.com/sourcenetwork/defradb/net" netutils "github.com/sourcenetwork/defradb/net/utils" "github.com/sourcenetwork/defradb/node" diff --git a/cli/utils.go b/cli/utils.go index 25af57528b..105323c33b 100644 --- a/cli/utils.go +++ b/cli/utils.go @@ -20,10 +20,10 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" - acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/http" + acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" + "github.com/sourcenetwork/defradb/internal/db" ) type contextKey string diff --git a/client/document.go b/client/document.go index 4534e9fa33..ccbdfd688c 100644 --- a/client/document.go +++ b/client/document.go @@ -24,7 +24,7 @@ import ( "github.com/valyala/fastjson" "github.com/sourcenetwork/defradb/client/request" - ccid "github.com/sourcenetwork/defradb/core/cid" + ccid "github.com/sourcenetwork/defradb/internal/core/cid" ) // This is the main implementation starting point for accessing the internal Document API diff --git a/client/document_test.go b/client/document_test.go index a70e868e0e..b15c7b019a 100644 --- a/client/document_test.go +++ b/client/document_test.go @@ -18,7 +18,7 @@ import ( "github.com/sourcenetwork/immutable" - ccid "github.com/sourcenetwork/defradb/core/cid" + ccid "github.com/sourcenetwork/defradb/internal/core/cid" ) var ( diff --git a/datastore/blockstore_test.go b/datastore/blockstore_test.go index 81e086c99f..29daffcc76 100644 --- a/datastore/blockstore_test.go +++ b/datastore/blockstore_test.go @@ -19,7 +19,7 @@ import ( ipld "github.com/ipfs/go-ipld-format" "github.com/stretchr/testify/require" - ccid "github.com/sourcenetwork/defradb/core/cid" + ccid "github.com/sourcenetwork/defradb/internal/core/cid" "github.com/sourcenetwork/defradb/datastore/memory" ) diff --git a/http/handler_ccip_test.go b/http/handler_ccip_test.go index b89517b975..40e088b685 100644 --- a/http/handler_ccip_test.go +++ b/http/handler_ccip_test.go @@ -27,7 +27,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore/memory" - "github.com/sourcenetwork/defradb/db" + "github.com/sourcenetwork/defradb/internal/db" ) func TestCCIPGet_WithValidData(t *testing.T) { diff --git a/http/http_client.go b/http/http_client.go index f8e63fe70a..972e96e94d 100644 --- a/http/http_client.go +++ b/http/http_client.go @@ -18,7 +18,7 @@ import ( "net/url" "strings" - "github.com/sourcenetwork/defradb/db" + "github.com/sourcenetwork/defradb/internal/db" ) type httpClient struct { diff --git a/http/middleware.go b/http/middleware.go index 4655868373..39783a3396 100644 --- a/http/middleware.go +++ b/http/middleware.go @@ -21,10 +21,10 @@ import ( "github.com/go-chi/cors" "golang.org/x/exp/slices" - acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/db" + acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" + "github.com/sourcenetwork/defradb/internal/db" ) const ( diff --git a/acp/README.md b/internal/acp/README.md similarity index 100% rename from acp/README.md rename to internal/acp/README.md diff --git a/acp/acp.go b/internal/acp/acp.go similarity index 100% rename from acp/acp.go rename to internal/acp/acp.go diff --git a/acp/acp_local.go b/internal/acp/acp_local.go similarity index 100% rename from acp/acp_local.go rename to internal/acp/acp_local.go diff --git a/acp/acp_local_test.go b/internal/acp/acp_local_test.go similarity index 100% rename from acp/acp_local_test.go rename to internal/acp/acp_local_test.go diff --git a/acp/doc.go b/internal/acp/doc.go similarity index 100% rename from acp/doc.go rename to internal/acp/doc.go diff --git a/acp/dpi.go b/internal/acp/dpi.go similarity index 100% rename from acp/dpi.go rename to internal/acp/dpi.go diff --git a/acp/errors.go b/internal/acp/errors.go similarity index 100% rename from acp/errors.go rename to internal/acp/errors.go diff --git a/acp/identity/identity.go b/internal/acp/identity/identity.go similarity index 100% rename from acp/identity/identity.go rename to internal/acp/identity/identity.go diff --git a/connor/LICENSE b/internal/connor/LICENSE similarity index 100% rename from connor/LICENSE rename to internal/connor/LICENSE diff --git a/connor/and.go b/internal/connor/and.go similarity index 100% rename from connor/and.go rename to internal/connor/and.go diff --git a/connor/connor.go b/internal/connor/connor.go similarity index 100% rename from connor/connor.go rename to internal/connor/connor.go diff --git a/connor/eq.go b/internal/connor/eq.go similarity index 90% rename from connor/eq.go rename to internal/connor/eq.go index eb60f43564..86888eef37 100644 --- a/connor/eq.go +++ b/internal/connor/eq.go @@ -6,9 +6,9 @@ import ( "github.com/sourcenetwork/immutable" - "github.com/sourcenetwork/defradb/connor/numbers" - ctime "github.com/sourcenetwork/defradb/connor/time" - "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/internal/connor/numbers" + ctime "github.com/sourcenetwork/defradb/internal/connor/time" + "github.com/sourcenetwork/defradb/internal/core" ) // eq is an operator which performs object equality diff --git a/connor/errors.go b/internal/connor/errors.go similarity index 100% rename from connor/errors.go rename to internal/connor/errors.go diff --git a/connor/ge.go b/internal/connor/ge.go similarity index 95% rename from connor/ge.go rename to internal/connor/ge.go index 2ca0f89da5..851c59c53f 100644 --- a/connor/ge.go +++ b/internal/connor/ge.go @@ -4,7 +4,7 @@ import ( "time" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/connor/numbers" + "github.com/sourcenetwork/defradb/internal/connor/numbers" ) // ge does value comparisons to determine whether one diff --git a/connor/gt.go b/internal/connor/gt.go similarity index 94% rename from connor/gt.go rename to internal/connor/gt.go index 036d98c7a5..d5689ebd52 100644 --- a/connor/gt.go +++ b/internal/connor/gt.go @@ -4,7 +4,7 @@ import ( "time" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/connor/numbers" + "github.com/sourcenetwork/defradb/internal/connor/numbers" ) // gt does value comparisons to determine whether one diff --git a/connor/ilike.go b/internal/connor/ilike.go similarity index 100% rename from connor/ilike.go rename to internal/connor/ilike.go diff --git a/connor/ilike_test.go b/internal/connor/ilike_test.go similarity index 100% rename from connor/ilike_test.go rename to internal/connor/ilike_test.go diff --git a/connor/in.go b/internal/connor/in.go similarity index 100% rename from connor/in.go rename to internal/connor/in.go diff --git a/connor/key.go b/internal/connor/key.go similarity index 100% rename from connor/key.go rename to internal/connor/key.go diff --git a/connor/le.go b/internal/connor/le.go similarity index 95% rename from connor/le.go rename to internal/connor/le.go index 0ebbfb5bbf..0c89c8ffbf 100644 --- a/connor/le.go +++ b/internal/connor/le.go @@ -4,7 +4,7 @@ import ( "time" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/connor/numbers" + "github.com/sourcenetwork/defradb/internal/connor/numbers" ) // le does value comparisons to determine whether one diff --git a/connor/like.go b/internal/connor/like.go similarity index 100% rename from connor/like.go rename to internal/connor/like.go diff --git a/connor/like_test.go b/internal/connor/like_test.go similarity index 100% rename from connor/like_test.go rename to internal/connor/like_test.go diff --git a/connor/lt.go b/internal/connor/lt.go similarity index 94% rename from connor/lt.go rename to internal/connor/lt.go index 303f6255d6..a7e5e7bb03 100644 --- a/connor/lt.go +++ b/internal/connor/lt.go @@ -4,7 +4,7 @@ import ( "time" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/connor/numbers" + "github.com/sourcenetwork/defradb/internal/connor/numbers" ) // lt does value comparisons to determine whether one diff --git a/connor/ne.go b/internal/connor/ne.go similarity index 100% rename from connor/ne.go rename to internal/connor/ne.go diff --git a/connor/nilike.go b/internal/connor/nilike.go similarity index 100% rename from connor/nilike.go rename to internal/connor/nilike.go diff --git a/connor/nilike_test.go b/internal/connor/nilike_test.go similarity index 100% rename from connor/nilike_test.go rename to internal/connor/nilike_test.go diff --git a/connor/nin.go b/internal/connor/nin.go similarity index 100% rename from connor/nin.go rename to internal/connor/nin.go diff --git a/connor/nlike.go b/internal/connor/nlike.go similarity index 100% rename from connor/nlike.go rename to internal/connor/nlike.go diff --git a/connor/nlike_test.go b/internal/connor/nlike_test.go similarity index 100% rename from connor/nlike_test.go rename to internal/connor/nlike_test.go diff --git a/connor/not.go b/internal/connor/not.go similarity index 100% rename from connor/not.go rename to internal/connor/not.go diff --git a/connor/not_test.go b/internal/connor/not_test.go similarity index 100% rename from connor/not_test.go rename to internal/connor/not_test.go diff --git a/connor/numbers/equality.go b/internal/connor/numbers/equality.go similarity index 100% rename from connor/numbers/equality.go rename to internal/connor/numbers/equality.go diff --git a/connor/numbers/upcast.go b/internal/connor/numbers/upcast.go similarity index 100% rename from connor/numbers/upcast.go rename to internal/connor/numbers/upcast.go diff --git a/connor/or.go b/internal/connor/or.go similarity index 100% rename from connor/or.go rename to internal/connor/or.go diff --git a/connor/time/equality.go b/internal/connor/time/equality.go similarity index 100% rename from connor/time/equality.go rename to internal/connor/time/equality.go diff --git a/core/cid/cid.go b/internal/core/cid/cid.go similarity index 100% rename from core/cid/cid.go rename to internal/core/cid/cid.go diff --git a/core/clock.go b/internal/core/clock.go similarity index 100% rename from core/clock.go rename to internal/core/clock.go diff --git a/core/crdt/README.md b/internal/core/crdt/README.md similarity index 100% rename from core/crdt/README.md rename to internal/core/crdt/README.md diff --git a/core/crdt/base.go b/internal/core/crdt/base.go similarity index 97% rename from core/crdt/base.go rename to internal/core/crdt/base.go index a0d8b5375f..9958b7b1ac 100644 --- a/core/crdt/base.go +++ b/internal/core/crdt/base.go @@ -16,9 +16,9 @@ import ( ds "github.com/ipfs/go-datastore" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/internal/core" ) // baseCRDT is embedded as a base layer into all diff --git a/core/crdt/base_test.go b/internal/core/crdt/base_test.go similarity index 97% rename from core/crdt/base_test.go rename to internal/core/crdt/base_test.go index c3db4af3d6..661c5cb7ce 100644 --- a/core/crdt/base_test.go +++ b/internal/core/crdt/base_test.go @@ -16,8 +16,8 @@ import ( ds "github.com/ipfs/go-datastore" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/internal/core" ) func newDS() datastore.DSReaderWriter { diff --git a/core/crdt/composite.go b/internal/core/crdt/composite.go similarity index 98% rename from core/crdt/composite.go rename to internal/core/crdt/composite.go index 6900387db3..be762b1596 100644 --- a/core/crdt/composite.go +++ b/internal/core/crdt/composite.go @@ -23,10 +23,10 @@ import ( "github.com/ugorji/go/codec" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/db/base" "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/db/base" ) // CompositeDAGDelta represents a delta-state update made of sub-MerkleCRDTs. diff --git a/core/crdt/counter.go b/internal/core/crdt/counter.go similarity index 98% rename from core/crdt/counter.go rename to internal/core/crdt/counter.go index 01ca3cf0da..51292d064c 100644 --- a/core/crdt/counter.go +++ b/internal/core/crdt/counter.go @@ -25,10 +25,10 @@ import ( "golang.org/x/exp/constraints" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/db/base" "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/db/base" ) var ( diff --git a/core/crdt/doc.go b/internal/core/crdt/doc.go similarity index 100% rename from core/crdt/doc.go rename to internal/core/crdt/doc.go diff --git a/core/crdt/errors.go b/internal/core/crdt/errors.go similarity index 100% rename from core/crdt/errors.go rename to internal/core/crdt/errors.go diff --git a/core/crdt/lwwreg.go b/internal/core/crdt/lwwreg.go similarity index 97% rename from core/crdt/lwwreg.go rename to internal/core/crdt/lwwreg.go index 937552d868..066c8e1523 100644 --- a/core/crdt/lwwreg.go +++ b/internal/core/crdt/lwwreg.go @@ -20,10 +20,10 @@ import ( "github.com/ugorji/go/codec" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/db/base" "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/db/base" ) // LWWRegDelta is a single delta operation for an LWWRegister diff --git a/core/crdt/lwwreg_test.go b/internal/core/crdt/lwwreg_test.go similarity index 99% rename from core/crdt/lwwreg_test.go rename to internal/core/crdt/lwwreg_test.go index 5e6e1b27a4..c51fda6a46 100644 --- a/core/crdt/lwwreg_test.go +++ b/internal/core/crdt/lwwreg_test.go @@ -22,8 +22,8 @@ import ( mh "github.com/multiformats/go-multihash" "github.com/ugorji/go/codec" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/internal/core" ) func newMockStore() datastore.DSReaderWriter { diff --git a/core/data.go b/internal/core/data.go similarity index 100% rename from core/data.go rename to internal/core/data.go diff --git a/core/data_test.go b/internal/core/data_test.go similarity index 100% rename from core/data_test.go rename to internal/core/data_test.go diff --git a/core/delta.go b/internal/core/delta.go similarity index 100% rename from core/delta.go rename to internal/core/delta.go diff --git a/core/doc.go b/internal/core/doc.go similarity index 100% rename from core/doc.go rename to internal/core/doc.go diff --git a/core/encoding.go b/internal/core/encoding.go similarity index 99% rename from core/encoding.go rename to internal/core/encoding.go index eab401c7a2..b18733f055 100644 --- a/core/encoding.go +++ b/internal/core/encoding.go @@ -17,7 +17,7 @@ import ( "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/encoding" + "github.com/sourcenetwork/defradb/internal/encoding" ) // NormalizeFieldValue takes a field value and description and converts it to the diff --git a/core/errors.go b/internal/core/errors.go similarity index 100% rename from core/errors.go rename to internal/core/errors.go diff --git a/core/key.go b/internal/core/key.go similarity index 100% rename from core/key.go rename to internal/core/key.go diff --git a/core/key_test.go b/internal/core/key_test.go similarity index 99% rename from core/key_test.go rename to internal/core/key_test.go index 7791075a17..c5e34073a3 100644 --- a/core/key_test.go +++ b/internal/core/key_test.go @@ -19,7 +19,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/encoding" + "github.com/sourcenetwork/defradb/internal/encoding" ) func TestNewDataStoreKey_ReturnsEmptyStruct_GivenEmptyString(t *testing.T) { diff --git a/core/net/broadcaster.go b/internal/core/net/broadcaster.go similarity index 100% rename from core/net/broadcaster.go rename to internal/core/net/broadcaster.go diff --git a/core/net/protocol.go b/internal/core/net/protocol.go similarity index 100% rename from core/net/protocol.go rename to internal/core/net/protocol.go diff --git a/core/node.go b/internal/core/node.go similarity index 100% rename from core/node.go rename to internal/core/node.go diff --git a/core/parser.go b/internal/core/parser.go similarity index 100% rename from core/parser.go rename to internal/core/parser.go diff --git a/core/replicated.go b/internal/core/replicated.go similarity index 100% rename from core/replicated.go rename to internal/core/replicated.go diff --git a/core/type.go b/internal/core/type.go similarity index 100% rename from core/type.go rename to internal/core/type.go diff --git a/db/backup.go b/internal/db/backup.go similarity index 100% rename from db/backup.go rename to internal/db/backup.go diff --git a/db/backup_test.go b/internal/db/backup_test.go similarity index 99% rename from db/backup_test.go rename to internal/db/backup_test.go index 486080db81..ce22333526 100644 --- a/db/backup_test.go +++ b/internal/db/backup_test.go @@ -18,8 +18,8 @@ import ( "github.com/stretchr/testify/require" - acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" + acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" ) func TestBasicExport_WithNormalFormatting_NoError(t *testing.T) { diff --git a/db/base/collection_keys.go b/internal/db/base/collection_keys.go similarity index 97% rename from db/base/collection_keys.go rename to internal/db/base/collection_keys.go index 98584454ab..e23707285c 100644 --- a/db/base/collection_keys.go +++ b/internal/db/base/collection_keys.go @@ -14,7 +14,7 @@ import ( "fmt" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/internal/core" ) // MakeDataStoreKeyWithCollectionDescription returns the datastore key for the given collection description. diff --git a/db/base/compare.go b/internal/db/base/compare.go similarity index 100% rename from db/base/compare.go rename to internal/db/base/compare.go diff --git a/db/base/descriptions.go b/internal/db/base/descriptions.go similarity index 100% rename from db/base/descriptions.go rename to internal/db/base/descriptions.go diff --git a/db/base/encoding.go b/internal/db/base/encoding.go similarity index 100% rename from db/base/encoding.go rename to internal/db/base/encoding.go diff --git a/db/base/errors.go b/internal/db/base/errors.go similarity index 100% rename from db/base/errors.go rename to internal/db/base/errors.go diff --git a/db/collection.go b/internal/db/collection.go similarity index 99% rename from db/collection.go rename to internal/db/collection.go index e84530d3e7..40f020a13c 100644 --- a/db/collection.go +++ b/internal/db/collection.go @@ -27,17 +27,17 @@ import ( "github.com/lens-vm/lens/host-go/config/model" "github.com/sourcenetwork/immutable" - "github.com/sourcenetwork/defradb/acp" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/db/base" - "github.com/sourcenetwork/defradb/db/description" - "github.com/sourcenetwork/defradb/db/fetcher" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/events" - "github.com/sourcenetwork/defradb/lens" - merklecrdt "github.com/sourcenetwork/defradb/merkle/crdt" + "github.com/sourcenetwork/defradb/internal/acp" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/db/base" + "github.com/sourcenetwork/defradb/internal/db/description" + "github.com/sourcenetwork/defradb/internal/db/fetcher" + "github.com/sourcenetwork/defradb/internal/lens" + merklecrdt "github.com/sourcenetwork/defradb/internal/merkle/crdt" ) var _ client.Collection = (*collection)(nil) diff --git a/db/collection_acp.go b/internal/db/collection_acp.go similarity index 93% rename from db/collection_acp.go rename to internal/db/collection_acp.go index 4a273e907e..b61aeb8441 100644 --- a/db/collection_acp.go +++ b/internal/db/collection_acp.go @@ -13,8 +13,8 @@ package db import ( "context" - "github.com/sourcenetwork/defradb/acp" - "github.com/sourcenetwork/defradb/db/permission" + "github.com/sourcenetwork/defradb/internal/acp" + "github.com/sourcenetwork/defradb/internal/db/permission" ) // registerDocWithACP handles the registration of the document with acp. diff --git a/db/collection_delete.go b/internal/db/collection_delete.go similarity index 96% rename from db/collection_delete.go rename to internal/db/collection_delete.go index 62ebd7f167..05a4144c0f 100644 --- a/db/collection_delete.go +++ b/internal/db/collection_delete.go @@ -13,11 +13,11 @@ package db import ( "context" - "github.com/sourcenetwork/defradb/acp" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/events" - "github.com/sourcenetwork/defradb/merkle/clock" + "github.com/sourcenetwork/defradb/internal/acp" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/merkle/clock" ) // DeleteWithFilter deletes using a filter to target documents for delete. diff --git a/db/collection_get.go b/internal/db/collection_get.go similarity index 93% rename from db/collection_get.go rename to internal/db/collection_get.go index 75d3d2826b..87c8ccbca6 100644 --- a/db/collection_get.go +++ b/internal/db/collection_get.go @@ -14,9 +14,9 @@ import ( "context" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/db/base" - "github.com/sourcenetwork/defradb/db/fetcher" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/db/base" + "github.com/sourcenetwork/defradb/internal/db/fetcher" ) func (c *collection) Get( diff --git a/db/collection_index.go b/internal/db/collection_index.go similarity index 98% rename from db/collection_index.go rename to internal/db/collection_index.go index 2327ae027a..a5db0a96d3 100644 --- a/db/collection_index.go +++ b/internal/db/collection_index.go @@ -21,12 +21,12 @@ import ( "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/db/base" - "github.com/sourcenetwork/defradb/db/description" - "github.com/sourcenetwork/defradb/db/fetcher" - "github.com/sourcenetwork/defradb/request/graphql/schema" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/db/base" + "github.com/sourcenetwork/defradb/internal/db/description" + "github.com/sourcenetwork/defradb/internal/db/fetcher" + "github.com/sourcenetwork/defradb/internal/request/graphql/schema" ) // createCollectionIndex creates a new collection index and saves it to the database in its system store. diff --git a/db/collection_test.go b/internal/db/collection_test.go similarity index 100% rename from db/collection_test.go rename to internal/db/collection_test.go diff --git a/db/collection_update.go b/internal/db/collection_update.go similarity index 99% rename from db/collection_update.go rename to internal/db/collection_update.go index e59469715a..7dbc9c72d9 100644 --- a/db/collection_update.go +++ b/internal/db/collection_update.go @@ -21,7 +21,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/planner" + "github.com/sourcenetwork/defradb/internal/planner" ) // UpdateWithFilter updates using a filter to target documents for update. diff --git a/db/config.go b/internal/db/config.go similarity index 97% rename from db/config.go rename to internal/db/config.go index 397956ed8b..56141068c4 100644 --- a/db/config.go +++ b/internal/db/config.go @@ -16,8 +16,8 @@ import ( "github.com/lens-vm/lens/host-go/engine/module" "github.com/sourcenetwork/immutable" - "github.com/sourcenetwork/defradb/acp" "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/internal/acp" ) const ( diff --git a/db/config_test.go b/internal/db/config_test.go similarity index 100% rename from db/config_test.go rename to internal/db/config_test.go diff --git a/db/container/container.go b/internal/db/container/container.go similarity index 97% rename from db/container/container.go rename to internal/db/container/container.go index 9146b4ba17..791bc7c906 100644 --- a/db/container/container.go +++ b/internal/db/container/container.go @@ -11,7 +11,7 @@ package container import ( - "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/internal/core" ) // DocumentContainer is a specialized buffer to store potentially diff --git a/db/context.go b/internal/db/context.go similarity index 98% rename from db/context.go rename to internal/db/context.go index 88019af323..96b28e0799 100644 --- a/db/context.go +++ b/internal/db/context.go @@ -15,8 +15,8 @@ import ( "github.com/sourcenetwork/immutable" - acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/datastore" + acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" ) // txnContextKey is the key type for transaction context values. diff --git a/db/context_test.go b/internal/db/context_test.go similarity index 100% rename from db/context_test.go rename to internal/db/context_test.go diff --git a/db/db.go b/internal/db/db.go similarity index 97% rename from db/db.go rename to internal/db/db.go index 613eea0b23..4721e40e48 100644 --- a/db/db.go +++ b/internal/db/db.go @@ -26,14 +26,14 @@ import ( "github.com/sourcenetwork/corelog" "github.com/sourcenetwork/immutable" - "github.com/sourcenetwork/defradb/acp" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/events" - "github.com/sourcenetwork/defradb/lens" - "github.com/sourcenetwork/defradb/request/graphql" + "github.com/sourcenetwork/defradb/internal/acp" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/lens" + "github.com/sourcenetwork/defradb/internal/request/graphql" ) var ( diff --git a/db/db_test.go b/internal/db/db_test.go similarity index 100% rename from db/db_test.go rename to internal/db/db_test.go diff --git a/db/description/collection.go b/internal/db/description/collection.go similarity index 99% rename from db/description/collection.go rename to internal/db/description/collection.go index 3658d3d318..90ef594a39 100644 --- a/db/description/collection.go +++ b/internal/db/description/collection.go @@ -20,8 +20,8 @@ import ( "github.com/ipfs/go-datastore/query" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/internal/core" ) // SaveCollection saves the given collection to the system store overwriting any diff --git a/db/description/errors.go b/internal/db/description/errors.go similarity index 100% rename from db/description/errors.go rename to internal/db/description/errors.go diff --git a/db/description/schema.go b/internal/db/description/schema.go similarity index 98% rename from db/description/schema.go rename to internal/db/description/schema.go index c46b1f7564..6f5a782ec7 100644 --- a/db/description/schema.go +++ b/internal/db/description/schema.go @@ -17,9 +17,9 @@ import ( "github.com/ipfs/go-datastore/query" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/core/cid" "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/core/cid" ) // CreateSchemaVersion creates and saves to the store a new schema version. diff --git a/db/errors.go b/internal/db/errors.go similarity index 100% rename from db/errors.go rename to internal/db/errors.go diff --git a/db/fetcher/dag.go b/internal/db/fetcher/dag.go similarity index 97% rename from db/fetcher/dag.go rename to internal/db/fetcher/dag.go index 41039508b1..cec1121827 100644 --- a/db/fetcher/dag.go +++ b/internal/db/fetcher/dag.go @@ -19,8 +19,8 @@ import ( dsq "github.com/ipfs/go-datastore/query" "github.com/sourcenetwork/immutable" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/internal/core" ) // HeadFetcher is a utility to incrementally fetch all the MerkleCRDT heads of a given doc/field. diff --git a/db/fetcher/encoded_doc.go b/internal/db/fetcher/encoded_doc.go similarity index 99% rename from db/fetcher/encoded_doc.go rename to internal/db/fetcher/encoded_doc.go index cb4345abe1..9bb3c6261c 100644 --- a/db/fetcher/encoded_doc.go +++ b/internal/db/fetcher/encoded_doc.go @@ -15,7 +15,7 @@ import ( "github.com/fxamacker/cbor/v2" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/internal/core" ) type EncodedDocument interface { diff --git a/db/fetcher/errors.go b/internal/db/fetcher/errors.go similarity index 100% rename from db/fetcher/errors.go rename to internal/db/fetcher/errors.go diff --git a/db/fetcher/fetcher.go b/internal/db/fetcher/fetcher.go similarity index 97% rename from db/fetcher/fetcher.go rename to internal/db/fetcher/fetcher.go index 894361dea4..ffb42f9c96 100644 --- a/db/fetcher/fetcher.go +++ b/internal/db/fetcher/fetcher.go @@ -20,16 +20,16 @@ import ( "github.com/sourcenetwork/immutable" - "github.com/sourcenetwork/defradb/acp" - acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/datastore/iterable" - "github.com/sourcenetwork/defradb/db/base" - "github.com/sourcenetwork/defradb/db/permission" - "github.com/sourcenetwork/defradb/planner/mapper" - "github.com/sourcenetwork/defradb/request/graphql/parser" + "github.com/sourcenetwork/defradb/internal/acp" + acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/db/base" + "github.com/sourcenetwork/defradb/internal/db/permission" + "github.com/sourcenetwork/defradb/internal/planner/mapper" + "github.com/sourcenetwork/defradb/internal/request/graphql/parser" ) // ExecInfo contains statistics about the fetcher execution. diff --git a/db/fetcher/indexer.go b/internal/db/fetcher/indexer.go similarity index 94% rename from db/fetcher/indexer.go rename to internal/db/fetcher/indexer.go index 2e776fd55b..31104cc13e 100644 --- a/db/fetcher/indexer.go +++ b/internal/db/fetcher/indexer.go @@ -15,13 +15,13 @@ import ( "github.com/sourcenetwork/immutable" - "github.com/sourcenetwork/defradb/acp" - acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/db/base" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/acp" + acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/db/base" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) // IndexFetcher is a fetcher that fetches documents by index. diff --git a/db/fetcher/indexer_iterators.go b/internal/db/fetcher/indexer_iterators.go similarity index 99% rename from db/fetcher/indexer_iterators.go rename to internal/db/fetcher/indexer_iterators.go index d1ca5841c3..9f01379fa6 100644 --- a/db/fetcher/indexer_iterators.go +++ b/internal/db/fetcher/indexer_iterators.go @@ -19,10 +19,10 @@ import ( ds "github.com/ipfs/go-datastore" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/connor" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/connor" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/planner/mapper" "github.com/ipfs/go-datastore/query" ) diff --git a/db/fetcher/mocks/encoded_document.go b/internal/db/fetcher/mocks/encoded_document.go similarity index 100% rename from db/fetcher/mocks/encoded_document.go rename to internal/db/fetcher/mocks/encoded_document.go diff --git a/db/fetcher/mocks/fetcher.go b/internal/db/fetcher/mocks/fetcher.go similarity index 96% rename from db/fetcher/mocks/fetcher.go rename to internal/db/fetcher/mocks/fetcher.go index 4f537aefea..5f0019befb 100644 --- a/db/fetcher/mocks/fetcher.go +++ b/internal/db/fetcher/mocks/fetcher.go @@ -3,22 +3,22 @@ package mocks import ( - acp "github.com/sourcenetwork/defradb/acp" + acp "github.com/sourcenetwork/defradb/internal/acp" client "github.com/sourcenetwork/defradb/client" context "context" - core "github.com/sourcenetwork/defradb/core" + core "github.com/sourcenetwork/defradb/internal/core" datastore "github.com/sourcenetwork/defradb/datastore" - fetcher "github.com/sourcenetwork/defradb/db/fetcher" + fetcher "github.com/sourcenetwork/defradb/internal/db/fetcher" - identity "github.com/sourcenetwork/defradb/acp/identity" + identity "github.com/sourcenetwork/defradb/internal/acp/identity" immutable "github.com/sourcenetwork/immutable" - mapper "github.com/sourcenetwork/defradb/planner/mapper" + mapper "github.com/sourcenetwork/defradb/internal/planner/mapper" mock "github.com/stretchr/testify/mock" ) diff --git a/db/fetcher/mocks/utils.go b/internal/db/fetcher/mocks/utils.go similarity index 100% rename from db/fetcher/mocks/utils.go rename to internal/db/fetcher/mocks/utils.go diff --git a/db/fetcher/versioned.go b/internal/db/fetcher/versioned.go similarity index 97% rename from db/fetcher/versioned.go rename to internal/db/fetcher/versioned.go index 096002521c..5c81cea62b 100644 --- a/db/fetcher/versioned.go +++ b/internal/db/fetcher/versioned.go @@ -21,16 +21,16 @@ import ( "github.com/sourcenetwork/immutable" - "github.com/sourcenetwork/defradb/acp" - acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/datastore/memory" - "github.com/sourcenetwork/defradb/db/base" "github.com/sourcenetwork/defradb/errors" - merklecrdt "github.com/sourcenetwork/defradb/merkle/crdt" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/acp" + acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/db/base" + merklecrdt "github.com/sourcenetwork/defradb/internal/merkle/crdt" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) var ( diff --git a/db/fetcher_test.go b/internal/db/fetcher_test.go similarity index 85% rename from db/fetcher_test.go rename to internal/db/fetcher_test.go index f7de9bf036..01b5ff065f 100644 --- a/db/fetcher_test.go +++ b/internal/db/fetcher_test.go @@ -16,8 +16,8 @@ import ( "github.com/stretchr/testify/assert" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/db/fetcher" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/db/fetcher" ) func TestFetcherStartWithoutInit(t *testing.T) { diff --git a/db/index.go b/internal/db/index.go similarity index 98% rename from db/index.go rename to internal/db/index.go index 693a18a5bf..71569e64db 100644 --- a/db/index.go +++ b/internal/db/index.go @@ -15,10 +15,10 @@ import ( "time" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/request/graphql/schema/types" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/request/graphql/schema/types" ) // CollectionIndex is an interface for collection indexes diff --git a/db/index_test.go b/internal/db/index_test.go similarity index 99% rename from db/index_test.go rename to internal/db/index_test.go index 5409b6c20e..b7f4bbfb96 100644 --- a/db/index_test.go +++ b/internal/db/index_test.go @@ -26,11 +26,11 @@ import ( "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/datastore/mocks" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/request/graphql/schema" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/request/graphql/schema" ) const ( diff --git a/db/indexed_docs_test.go b/internal/db/indexed_docs_test.go similarity index 99% rename from db/indexed_docs_test.go rename to internal/db/indexed_docs_test.go index c3c1c6de7b..fe51e7b354 100644 --- a/db/indexed_docs_test.go +++ b/internal/db/indexed_docs_test.go @@ -23,15 +23,15 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/sourcenetwork/defradb/acp" - acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/datastore/mocks" - "github.com/sourcenetwork/defradb/db/fetcher" - fetcherMocks "github.com/sourcenetwork/defradb/db/fetcher/mocks" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/acp" + acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/db/fetcher" + fetcherMocks "github.com/sourcenetwork/defradb/internal/db/fetcher/mocks" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) type userDoc struct { diff --git a/db/lens.go b/internal/db/lens.go similarity index 97% rename from db/lens.go rename to internal/db/lens.go index f21d084f88..581ad41f58 100644 --- a/db/lens.go +++ b/internal/db/lens.go @@ -17,9 +17,9 @@ import ( "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/db/description" "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/db/description" ) func (db *db) setMigration(ctx context.Context, cfg client.LensConfig) error { diff --git a/db/permission/check.go b/internal/db/permission/check.go similarity index 95% rename from db/permission/check.go rename to internal/db/permission/check.go index 36dce10489..8a2057d030 100644 --- a/db/permission/check.go +++ b/internal/db/permission/check.go @@ -15,9 +15,9 @@ import ( "github.com/sourcenetwork/immutable" - "github.com/sourcenetwork/defradb/acp" - acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/internal/acp" + acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" ) // CheckAccessOfDocOnCollectionWithACP handles the check, which tells us if access to the target diff --git a/db/permission/permission.go b/internal/db/permission/permission.go similarity index 100% rename from db/permission/permission.go rename to internal/db/permission/permission.go diff --git a/db/permission/register.go b/internal/db/permission/register.go similarity index 92% rename from db/permission/register.go rename to internal/db/permission/register.go index a46e5eef34..06a48f398a 100644 --- a/db/permission/register.go +++ b/internal/db/permission/register.go @@ -15,9 +15,9 @@ import ( "github.com/sourcenetwork/immutable" - "github.com/sourcenetwork/defradb/acp" - acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/internal/acp" + acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" ) // RegisterDocOnCollectionWithACP handles the registration of the document with acp. diff --git a/db/request.go b/internal/db/request.go similarity index 96% rename from db/request.go rename to internal/db/request.go index 83a2fb09bb..ff60c0835f 100644 --- a/db/request.go +++ b/internal/db/request.go @@ -14,7 +14,7 @@ import ( "context" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/planner" + "github.com/sourcenetwork/defradb/internal/planner" ) // execRequest executes a request against the database. diff --git a/db/schema.go b/internal/db/schema.go similarity index 99% rename from db/schema.go rename to internal/db/schema.go index 756c02f1ff..eca05f2a1f 100644 --- a/db/schema.go +++ b/internal/db/schema.go @@ -23,7 +23,7 @@ import ( "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/db/description" + "github.com/sourcenetwork/defradb/internal/db/description" ) const ( diff --git a/db/sequence.go b/internal/db/sequence.go similarity index 96% rename from db/sequence.go rename to internal/db/sequence.go index f39bdcfb65..06c2989b99 100644 --- a/db/sequence.go +++ b/internal/db/sequence.go @@ -16,8 +16,8 @@ import ( ds "github.com/ipfs/go-datastore" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/internal/core" ) type sequence struct { diff --git a/db/store.go b/internal/db/store.go similarity index 100% rename from db/store.go rename to internal/db/store.go diff --git a/db/subscriptions.go b/internal/db/subscriptions.go similarity index 97% rename from db/subscriptions.go rename to internal/db/subscriptions.go index 0d16074887..b52504467e 100644 --- a/db/subscriptions.go +++ b/internal/db/subscriptions.go @@ -16,7 +16,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/events" - "github.com/sourcenetwork/defradb/planner" + "github.com/sourcenetwork/defradb/internal/planner" ) func (db *db) checkForClientSubscriptions(r *request.Request) ( diff --git a/db/view.go b/internal/db/view.go similarity index 97% rename from db/view.go rename to internal/db/view.go index 7cf040cbc5..a663da7add 100644 --- a/db/view.go +++ b/internal/db/view.go @@ -20,7 +20,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/db/description" + "github.com/sourcenetwork/defradb/internal/db/description" ) func (db *db) addView( diff --git a/encoding/bytes.go b/internal/encoding/bytes.go similarity index 100% rename from encoding/bytes.go rename to internal/encoding/bytes.go diff --git a/encoding/bytes_test.go b/internal/encoding/bytes_test.go similarity index 100% rename from encoding/bytes_test.go rename to internal/encoding/bytes_test.go diff --git a/encoding/encoding.go b/internal/encoding/encoding.go similarity index 100% rename from encoding/encoding.go rename to internal/encoding/encoding.go diff --git a/encoding/errors.go b/internal/encoding/errors.go similarity index 100% rename from encoding/errors.go rename to internal/encoding/errors.go diff --git a/encoding/field_value.go b/internal/encoding/field_value.go similarity index 100% rename from encoding/field_value.go rename to internal/encoding/field_value.go diff --git a/encoding/field_value_test.go b/internal/encoding/field_value_test.go similarity index 100% rename from encoding/field_value_test.go rename to internal/encoding/field_value_test.go diff --git a/encoding/float.go b/internal/encoding/float.go similarity index 100% rename from encoding/float.go rename to internal/encoding/float.go diff --git a/encoding/float_test.go b/internal/encoding/float_test.go similarity index 100% rename from encoding/float_test.go rename to internal/encoding/float_test.go diff --git a/encoding/int.go b/internal/encoding/int.go similarity index 100% rename from encoding/int.go rename to internal/encoding/int.go diff --git a/encoding/int_test.go b/internal/encoding/int_test.go similarity index 100% rename from encoding/int_test.go rename to internal/encoding/int_test.go diff --git a/encoding/null.go b/internal/encoding/null.go similarity index 100% rename from encoding/null.go rename to internal/encoding/null.go diff --git a/encoding/null_test.go b/internal/encoding/null_test.go similarity index 100% rename from encoding/null_test.go rename to internal/encoding/null_test.go diff --git a/encoding/string.go b/internal/encoding/string.go similarity index 100% rename from encoding/string.go rename to internal/encoding/string.go diff --git a/encoding/string_test.go b/internal/encoding/string_test.go similarity index 100% rename from encoding/string_test.go rename to internal/encoding/string_test.go diff --git a/encoding/type.go b/internal/encoding/type.go similarity index 100% rename from encoding/type.go rename to internal/encoding/type.go diff --git a/encoding/type_test.go b/internal/encoding/type_test.go similarity index 100% rename from encoding/type_test.go rename to internal/encoding/type_test.go diff --git a/lens/fetcher.go b/internal/lens/fetcher.go similarity index 97% rename from lens/fetcher.go rename to internal/lens/fetcher.go index 5477b948b5..7d5801ad25 100644 --- a/lens/fetcher.go +++ b/internal/lens/fetcher.go @@ -18,14 +18,14 @@ import ( "github.com/sourcenetwork/immutable" - "github.com/sourcenetwork/defradb/acp" - acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/db/fetcher" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/acp" + acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/db/fetcher" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) // todo: The code in here can be significantly simplified with: diff --git a/lens/history.go b/internal/lens/history.go similarity index 99% rename from lens/history.go rename to internal/lens/history.go index eb793bff8c..e4e04d657f 100644 --- a/lens/history.go +++ b/internal/lens/history.go @@ -17,7 +17,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/db/description" + "github.com/sourcenetwork/defradb/internal/db/description" ) // collectionHistoryLink represents an item in a particular collection's schema history, it diff --git a/lens/lens.go b/internal/lens/lens.go similarity index 100% rename from lens/lens.go rename to internal/lens/lens.go diff --git a/lens/registry.go b/internal/lens/registry.go similarity index 99% rename from lens/registry.go rename to internal/lens/registry.go index ede3b99bb2..1d9c51ab46 100644 --- a/lens/registry.go +++ b/internal/lens/registry.go @@ -23,8 +23,8 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/db/description" "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/internal/db/description" ) // todo: This file, particularly the `lensPool` stuff, contains fairly sensitive code that is both diff --git a/lens/txn_registry.go b/internal/lens/txn_registry.go similarity index 100% rename from lens/txn_registry.go rename to internal/lens/txn_registry.go diff --git a/merkle/clock/clock.go b/internal/merkle/clock/clock.go similarity index 98% rename from merkle/clock/clock.go rename to internal/merkle/clock/clock.go index 3f1ae47cf6..5fa6621cd4 100644 --- a/merkle/clock/clock.go +++ b/internal/merkle/clock/clock.go @@ -21,8 +21,8 @@ import ( "github.com/sourcenetwork/corelog" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/internal/core" ) var ( diff --git a/merkle/clock/clock_test.go b/internal/merkle/clock/clock_test.go similarity index 96% rename from merkle/clock/clock_test.go rename to internal/merkle/clock/clock_test.go index 763c27ed4f..e6a882c931 100644 --- a/merkle/clock/clock_test.go +++ b/internal/merkle/clock/clock_test.go @@ -18,10 +18,10 @@ import ( ds "github.com/ipfs/go-datastore" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/core" - ccid "github.com/sourcenetwork/defradb/core/cid" - "github.com/sourcenetwork/defradb/core/crdt" "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/internal/core" + ccid "github.com/sourcenetwork/defradb/internal/core/cid" + "github.com/sourcenetwork/defradb/internal/core/crdt" ) func newDS() ds.Datastore { diff --git a/merkle/clock/errors.go b/internal/merkle/clock/errors.go similarity index 100% rename from merkle/clock/errors.go rename to internal/merkle/clock/errors.go diff --git a/merkle/clock/heads.go b/internal/merkle/clock/heads.go similarity index 98% rename from merkle/clock/heads.go rename to internal/merkle/clock/heads.go index 2bbb04d2d9..9b1fad43dd 100644 --- a/merkle/clock/heads.go +++ b/internal/merkle/clock/heads.go @@ -20,8 +20,8 @@ import ( "github.com/ipfs/go-datastore/query" "github.com/sourcenetwork/corelog" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/internal/core" ) // heads manages the current Merkle-CRDT heads. diff --git a/merkle/clock/heads_test.go b/internal/merkle/clock/heads_test.go similarity index 96% rename from merkle/clock/heads_test.go rename to internal/merkle/clock/heads_test.go index 18db117ebb..94680569a8 100644 --- a/merkle/clock/heads_test.go +++ b/internal/merkle/clock/heads_test.go @@ -21,9 +21,9 @@ import ( "github.com/ipfs/go-cid" - "github.com/sourcenetwork/defradb/core" - ccid "github.com/sourcenetwork/defradb/core/cid" "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/internal/core" + ccid "github.com/sourcenetwork/defradb/internal/core/cid" ) func newRandomCID() cid.Cid { diff --git a/merkle/clock/ipld.go b/internal/merkle/clock/ipld.go similarity index 98% rename from merkle/clock/ipld.go rename to internal/merkle/clock/ipld.go index 484a145dce..edef74291c 100644 --- a/merkle/clock/ipld.go +++ b/internal/merkle/clock/ipld.go @@ -18,7 +18,7 @@ import ( ipld "github.com/ipfs/go-ipld-format" mh "github.com/multiformats/go-multihash" - "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/internal/core" ) // Credit: This file is from github.com/ipfs/go-ds-crdt diff --git a/merkle/clock/ipld_test.go b/internal/merkle/clock/ipld_test.go similarity index 100% rename from merkle/clock/ipld_test.go rename to internal/merkle/clock/ipld_test.go diff --git a/merkle/crdt/composite.go b/internal/merkle/crdt/composite.go similarity index 92% rename from merkle/crdt/composite.go rename to internal/merkle/crdt/composite.go index f58813235a..6912ccb9a0 100644 --- a/merkle/crdt/composite.go +++ b/internal/merkle/crdt/composite.go @@ -16,9 +16,9 @@ import ( ipld "github.com/ipfs/go-ipld-format" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" - corecrdt "github.com/sourcenetwork/defradb/core/crdt" - "github.com/sourcenetwork/defradb/merkle/clock" + "github.com/sourcenetwork/defradb/internal/core" + corecrdt "github.com/sourcenetwork/defradb/internal/core/crdt" + "github.com/sourcenetwork/defradb/internal/merkle/clock" ) // MerkleCompositeDAG is a MerkleCRDT implementation of the CompositeDAG using MerkleClocks. diff --git a/merkle/crdt/counter.go b/internal/merkle/crdt/counter.go similarity index 91% rename from merkle/crdt/counter.go rename to internal/merkle/crdt/counter.go index 6ca016cea6..d7b7e17302 100644 --- a/merkle/crdt/counter.go +++ b/internal/merkle/crdt/counter.go @@ -16,9 +16,9 @@ import ( ipld "github.com/ipfs/go-ipld-format" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/core/crdt" - "github.com/sourcenetwork/defradb/merkle/clock" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/core/crdt" + "github.com/sourcenetwork/defradb/internal/merkle/clock" ) // MerkleCounter is a MerkleCRDT implementation of the Counter using MerkleClocks. diff --git a/merkle/crdt/errors.go b/internal/merkle/crdt/errors.go similarity index 100% rename from merkle/crdt/errors.go rename to internal/merkle/crdt/errors.go diff --git a/merkle/crdt/lwwreg.go b/internal/merkle/crdt/lwwreg.go similarity index 91% rename from merkle/crdt/lwwreg.go rename to internal/merkle/crdt/lwwreg.go index 901d458c53..04b2cf0f04 100644 --- a/merkle/crdt/lwwreg.go +++ b/internal/merkle/crdt/lwwreg.go @@ -16,9 +16,9 @@ import ( ipld "github.com/ipfs/go-ipld-format" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" - corecrdt "github.com/sourcenetwork/defradb/core/crdt" - "github.com/sourcenetwork/defradb/merkle/clock" + "github.com/sourcenetwork/defradb/internal/core" + corecrdt "github.com/sourcenetwork/defradb/internal/core/crdt" + "github.com/sourcenetwork/defradb/internal/merkle/clock" ) // MerkleLWWRegister is a MerkleCRDT implementation of the LWWRegister using MerkleClocks. diff --git a/merkle/crdt/merklecrdt.go b/internal/merkle/crdt/merklecrdt.go similarity index 98% rename from merkle/crdt/merklecrdt.go rename to internal/merkle/crdt/merklecrdt.go index 5bd95c86cd..6d1ba22fdf 100644 --- a/merkle/crdt/merklecrdt.go +++ b/internal/merkle/crdt/merklecrdt.go @@ -19,8 +19,8 @@ import ( ipld "github.com/ipfs/go-ipld-format" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/internal/core" ) type Stores interface { diff --git a/merkle/crdt/merklecrdt_test.go b/internal/merkle/crdt/merklecrdt_test.go similarity index 89% rename from merkle/crdt/merklecrdt_test.go rename to internal/merkle/crdt/merklecrdt_test.go index 08bd26dc77..fafcfc5905 100644 --- a/merkle/crdt/merklecrdt_test.go +++ b/internal/merkle/crdt/merklecrdt_test.go @@ -17,10 +17,10 @@ import ( "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" - "github.com/sourcenetwork/defradb/core" - crdt "github.com/sourcenetwork/defradb/core/crdt" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/merkle/clock" + "github.com/sourcenetwork/defradb/internal/core" + crdt "github.com/sourcenetwork/defradb/internal/core/crdt" + "github.com/sourcenetwork/defradb/internal/merkle/clock" ) func newDS() ds.Datastore { diff --git a/merkle/doc.go b/internal/merkle/doc.go similarity index 100% rename from merkle/doc.go rename to internal/merkle/doc.go diff --git a/metric/metric.go b/internal/metric/metric.go similarity index 100% rename from metric/metric.go rename to internal/metric/metric.go diff --git a/metric/metric_test.go b/internal/metric/metric_test.go similarity index 100% rename from metric/metric_test.go rename to internal/metric/metric_test.go diff --git a/planner/arbitrary_join.go b/internal/planner/arbitrary_join.go similarity index 98% rename from planner/arbitrary_join.go rename to internal/planner/arbitrary_join.go index 985e80484e..978015298b 100644 --- a/planner/arbitrary_join.go +++ b/internal/planner/arbitrary_join.go @@ -14,8 +14,8 @@ import ( "fmt" "strings" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) // A data-source that may yield child items, parent items, or both depending on configuration diff --git a/planner/average.go b/internal/planner/average.go similarity index 96% rename from planner/average.go rename to internal/planner/average.go index 9de120ed98..24ef567011 100644 --- a/planner/average.go +++ b/internal/planner/average.go @@ -13,8 +13,8 @@ package planner import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) type averageNode struct { diff --git a/planner/commit.go b/internal/planner/commit.go similarity index 98% rename from planner/commit.go rename to internal/planner/commit.go index 8508ab9980..9da7324f8c 100644 --- a/planner/commit.go +++ b/internal/planner/commit.go @@ -20,9 +20,9 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/db/fetcher" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/db/fetcher" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) type dagScanNode struct { diff --git a/planner/count.go b/internal/planner/count.go similarity index 98% rename from planner/count.go rename to internal/planner/count.go index a3eddf0fbc..d0c0642cae 100644 --- a/planner/count.go +++ b/internal/planner/count.go @@ -21,8 +21,8 @@ import ( "github.com/sourcenetwork/immutable/enumerable" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) type countNode struct { diff --git a/planner/create.go b/internal/planner/create.go similarity index 96% rename from planner/create.go rename to internal/planner/create.go index addf8e4d97..21a36fcc24 100644 --- a/planner/create.go +++ b/internal/planner/create.go @@ -13,9 +13,9 @@ package planner import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/db/base" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/db/base" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) // createNode is used to construct and execute diff --git a/planner/datasource.go b/internal/planner/datasource.go similarity index 95% rename from planner/datasource.go rename to internal/planner/datasource.go index 526621d9d4..46a71e0ae8 100644 --- a/planner/datasource.go +++ b/internal/planner/datasource.go @@ -12,7 +12,7 @@ package planner import ( "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) type planSource struct { diff --git a/planner/delete.go b/internal/planner/delete.go similarity index 96% rename from planner/delete.go rename to internal/planner/delete.go index 71313e9f5d..093bf527dc 100644 --- a/planner/delete.go +++ b/internal/planner/delete.go @@ -13,8 +13,8 @@ package planner import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) type deleteNode struct { diff --git a/planner/doc.go b/internal/planner/doc.go similarity index 100% rename from planner/doc.go rename to internal/planner/doc.go diff --git a/planner/errors.go b/internal/planner/errors.go similarity index 100% rename from planner/errors.go rename to internal/planner/errors.go diff --git a/planner/explain.go b/internal/planner/explain.go similarity index 100% rename from planner/explain.go rename to internal/planner/explain.go diff --git a/planner/filter/complex.go b/internal/planner/filter/complex.go similarity index 93% rename from planner/filter/complex.go rename to internal/planner/filter/complex.go index ce72ead07e..d67cf6f796 100644 --- a/planner/filter/complex.go +++ b/internal/planner/filter/complex.go @@ -11,8 +11,8 @@ package filter import ( "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/connor" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/connor" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) // IsComplex returns true if the provided filter is complex. diff --git a/planner/filter/complex_test.go b/internal/planner/filter/complex_test.go similarity index 98% rename from planner/filter/complex_test.go rename to internal/planner/filter/complex_test.go index 208860501f..7d73ce1e4a 100644 --- a/planner/filter/complex_test.go +++ b/internal/planner/filter/complex_test.go @@ -15,7 +15,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) func TestIsComplex(t *testing.T) { diff --git a/planner/filter/copy.go b/internal/planner/filter/copy.go similarity index 95% rename from planner/filter/copy.go rename to internal/planner/filter/copy.go index fec591f5ab..f37b4cbf57 100644 --- a/planner/filter/copy.go +++ b/internal/planner/filter/copy.go @@ -10,7 +10,7 @@ package filter import ( - "github.com/sourcenetwork/defradb/connor" + "github.com/sourcenetwork/defradb/internal/connor" ) // Copy performs a deep copy of the provided filter. diff --git a/planner/filter/copy_field.go b/internal/planner/filter/copy_field.go similarity index 95% rename from planner/filter/copy_field.go rename to internal/planner/filter/copy_field.go index fff974da06..9a524ecabb 100644 --- a/planner/filter/copy_field.go +++ b/internal/planner/filter/copy_field.go @@ -10,8 +10,8 @@ package filter import ( - "github.com/sourcenetwork/defradb/connor" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/connor" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) // CopyField copies the given field from the provided filter. diff --git a/planner/filter/copy_field_test.go b/internal/planner/filter/copy_field_test.go similarity index 97% rename from planner/filter/copy_field_test.go rename to internal/planner/filter/copy_field_test.go index 611f1d1fd8..d539e437e3 100644 --- a/planner/filter/copy_field_test.go +++ b/internal/planner/filter/copy_field_test.go @@ -13,8 +13,8 @@ import ( "testing" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/connor" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/connor" + "github.com/sourcenetwork/defradb/internal/planner/mapper" "github.com/stretchr/testify/assert" ) diff --git a/planner/filter/copy_test.go b/internal/planner/filter/copy_test.go similarity index 97% rename from planner/filter/copy_test.go rename to internal/planner/filter/copy_test.go index a45d368964..d22ba18b3a 100644 --- a/planner/filter/copy_test.go +++ b/internal/planner/filter/copy_test.go @@ -14,8 +14,8 @@ import ( "github.com/stretchr/testify/assert" - "github.com/sourcenetwork/defradb/connor" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/connor" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) func TestCopyFilter(t *testing.T) { diff --git a/planner/filter/extract_properties.go b/internal/planner/filter/extract_properties.go similarity index 95% rename from planner/filter/extract_properties.go rename to internal/planner/filter/extract_properties.go index 4c3e6bb0be..82c2068a19 100644 --- a/planner/filter/extract_properties.go +++ b/internal/planner/filter/extract_properties.go @@ -11,8 +11,8 @@ package filter import ( "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/connor" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/connor" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) // Property represents a single field and is being filtered on. diff --git a/planner/filter/extract_properties_test.go b/internal/planner/filter/extract_properties_test.go similarity index 98% rename from planner/filter/extract_properties_test.go rename to internal/planner/filter/extract_properties_test.go index c90dbe85b0..fd93f0ff54 100644 --- a/planner/filter/extract_properties_test.go +++ b/internal/planner/filter/extract_properties_test.go @@ -14,7 +14,7 @@ import ( "testing" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/planner/mapper" "github.com/stretchr/testify/assert" ) diff --git a/planner/filter/merge.go b/internal/planner/filter/merge.go similarity index 91% rename from planner/filter/merge.go rename to internal/planner/filter/merge.go index 3bc38f4ba3..d5644e807e 100644 --- a/planner/filter/merge.go +++ b/internal/planner/filter/merge.go @@ -11,8 +11,8 @@ package filter import ( "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/connor" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/connor" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) // Merge merges two filters into one. diff --git a/planner/filter/merge_test.go b/internal/planner/filter/merge_test.go similarity index 94% rename from planner/filter/merge_test.go rename to internal/planner/filter/merge_test.go index 153c850e80..6ea663eba5 100644 --- a/planner/filter/merge_test.go +++ b/internal/planner/filter/merge_test.go @@ -13,8 +13,8 @@ import ( "testing" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/connor" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/connor" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) func TestMergeFilterConditions(t *testing.T) { diff --git a/planner/filter/normalize.go b/internal/planner/filter/normalize.go similarity index 98% rename from planner/filter/normalize.go rename to internal/planner/filter/normalize.go index 65317f2170..a1445622a0 100644 --- a/planner/filter/normalize.go +++ b/internal/planner/filter/normalize.go @@ -11,8 +11,8 @@ package filter import ( "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/connor" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/connor" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) // normalize normalizes the provided filter conditions. diff --git a/planner/filter/normalize_test.go b/internal/planner/filter/normalize_test.go similarity index 99% rename from planner/filter/normalize_test.go rename to internal/planner/filter/normalize_test.go index 22e4f69ed0..ad52db3720 100644 --- a/planner/filter/normalize_test.go +++ b/internal/planner/filter/normalize_test.go @@ -13,7 +13,7 @@ import ( "testing" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) func TestNormalizeConditions(t *testing.T) { diff --git a/planner/filter/remove_field.go b/internal/planner/filter/remove_field.go similarity index 93% rename from planner/filter/remove_field.go rename to internal/planner/filter/remove_field.go index 5e9f2f532e..4128c76828 100644 --- a/planner/filter/remove_field.go +++ b/internal/planner/filter/remove_field.go @@ -10,7 +10,7 @@ package filter import ( - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) // RemoveField removes the given field from the provided filter. diff --git a/planner/filter/remove_field_test.go b/internal/planner/filter/remove_field_test.go similarity index 97% rename from planner/filter/remove_field_test.go rename to internal/planner/filter/remove_field_test.go index 8a34999e60..6a3e6d06ed 100644 --- a/planner/filter/remove_field_test.go +++ b/internal/planner/filter/remove_field_test.go @@ -13,8 +13,8 @@ import ( "testing" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/connor" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/connor" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) func TestRemoveFieldFromFilter(t *testing.T) { diff --git a/planner/filter/split.go b/internal/planner/filter/split.go similarity index 96% rename from planner/filter/split.go rename to internal/planner/filter/split.go index e562c8165a..69aed9fc2e 100644 --- a/planner/filter/split.go +++ b/internal/planner/filter/split.go @@ -10,7 +10,7 @@ package filter import ( - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) // SplitByFields splits the provided filter into 2 filters based on fields. diff --git a/planner/filter/split_test.go b/internal/planner/filter/split_test.go similarity index 98% rename from planner/filter/split_test.go rename to internal/planner/filter/split_test.go index 221bd31527..d7eb32db16 100644 --- a/planner/filter/split_test.go +++ b/internal/planner/filter/split_test.go @@ -13,7 +13,7 @@ import ( "testing" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/planner/mapper" "github.com/stretchr/testify/assert" ) diff --git a/planner/filter/unwrap_relation.go b/internal/planner/filter/unwrap_relation.go similarity index 94% rename from planner/filter/unwrap_relation.go rename to internal/planner/filter/unwrap_relation.go index aa1be2e25d..ddf3d77b19 100644 --- a/planner/filter/unwrap_relation.go +++ b/internal/planner/filter/unwrap_relation.go @@ -10,8 +10,8 @@ package filter import ( - "github.com/sourcenetwork/defradb/connor" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/connor" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) // UnwrapRelation runs through the filter and returns a new filter with only the diff --git a/planner/filter/unwrap_relation_test.go b/internal/planner/filter/unwrap_relation_test.go similarity index 96% rename from planner/filter/unwrap_relation_test.go rename to internal/planner/filter/unwrap_relation_test.go index a7446f9d30..af00341fb6 100644 --- a/planner/filter/unwrap_relation_test.go +++ b/internal/planner/filter/unwrap_relation_test.go @@ -13,8 +13,8 @@ import ( "testing" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/connor" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/connor" + "github.com/sourcenetwork/defradb/internal/planner/mapper" "github.com/stretchr/testify/assert" ) diff --git a/planner/filter/util_test.go b/internal/planner/filter/util_test.go similarity index 96% rename from planner/filter/util_test.go rename to internal/planner/filter/util_test.go index 19b367172c..60006b1b1d 100644 --- a/planner/filter/util_test.go +++ b/internal/planner/filter/util_test.go @@ -14,9 +14,9 @@ import ( "reflect" "testing" - "github.com/sourcenetwork/defradb/connor" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/connor" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) func assertEqualFilterMap(expected, actual map[connor.FilterKey]any, prefix string) string { diff --git a/planner/group.go b/internal/planner/group.go similarity index 98% rename from planner/group.go rename to internal/planner/group.go index ae2a0c8bee..b412b9e86e 100644 --- a/planner/group.go +++ b/internal/planner/group.go @@ -13,8 +13,8 @@ package planner import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) // A node responsible for the grouping of documents by a given selection of fields. diff --git a/planner/lens.go b/internal/planner/lens.go similarity index 98% rename from planner/lens.go rename to internal/planner/lens.go index eba0edd587..5b18a60138 100644 --- a/planner/lens.go +++ b/internal/planner/lens.go @@ -15,7 +15,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/internal/core" ) // viewNode applies a lens transform to data yielded from the source node. diff --git a/planner/limit.go b/internal/planner/limit.go similarity index 95% rename from planner/limit.go rename to internal/planner/limit.go index 979bc50c02..0da7a8b249 100644 --- a/planner/limit.go +++ b/internal/planner/limit.go @@ -12,8 +12,8 @@ package planner import ( "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) // Limit the results, yielding only what the limit/offset permits diff --git a/planner/mapper/aggregate.go b/internal/planner/mapper/aggregate.go similarity index 97% rename from planner/mapper/aggregate.go rename to internal/planner/mapper/aggregate.go index ceed03448e..55b141b2f1 100644 --- a/planner/mapper/aggregate.go +++ b/internal/planner/mapper/aggregate.go @@ -10,7 +10,7 @@ package mapper -import "github.com/sourcenetwork/defradb/core" +import "github.com/sourcenetwork/defradb/internal/core" // An optional child target. type OptionalChildTarget struct { diff --git a/planner/mapper/commitSelect.go b/internal/planner/mapper/commitSelect.go similarity index 100% rename from planner/mapper/commitSelect.go rename to internal/planner/mapper/commitSelect.go diff --git a/planner/mapper/errors.go b/internal/planner/mapper/errors.go similarity index 100% rename from planner/mapper/errors.go rename to internal/planner/mapper/errors.go diff --git a/planner/mapper/field.go b/internal/planner/mapper/field.go similarity index 100% rename from planner/mapper/field.go rename to internal/planner/mapper/field.go diff --git a/planner/mapper/mapper.go b/internal/planner/mapper/mapper.go similarity index 99% rename from planner/mapper/mapper.go rename to internal/planner/mapper/mapper.go index af3542c403..07ec0db8e6 100644 --- a/planner/mapper/mapper.go +++ b/internal/planner/mapper/mapper.go @@ -19,8 +19,8 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/connor" - "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/internal/connor" + "github.com/sourcenetwork/defradb/internal/core" ) const ( diff --git a/planner/mapper/mutation.go b/internal/planner/mapper/mutation.go similarity index 100% rename from planner/mapper/mutation.go rename to internal/planner/mapper/mutation.go diff --git a/planner/mapper/requestable.go b/internal/planner/mapper/requestable.go similarity index 100% rename from planner/mapper/requestable.go rename to internal/planner/mapper/requestable.go diff --git a/planner/mapper/select.go b/internal/planner/mapper/select.go similarity index 97% rename from planner/mapper/select.go rename to internal/planner/mapper/select.go index 1c4b509caa..8b67b60937 100644 --- a/planner/mapper/select.go +++ b/internal/planner/mapper/select.go @@ -13,7 +13,7 @@ package mapper import ( "github.com/sourcenetwork/immutable" - "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/internal/core" ) // Select represents a request to return data from Defra. diff --git a/planner/mapper/targetable.go b/internal/planner/mapper/targetable.go similarity index 98% rename from planner/mapper/targetable.go rename to internal/planner/mapper/targetable.go index ae9d81e29a..68f7f993ef 100644 --- a/planner/mapper/targetable.go +++ b/internal/planner/mapper/targetable.go @@ -14,8 +14,8 @@ import ( "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/connor" - "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/internal/connor" + "github.com/sourcenetwork/defradb/internal/core" ) var ( diff --git a/planner/multi.go b/internal/planner/multi.go similarity index 99% rename from planner/multi.go rename to internal/planner/multi.go index 1b5fc14bbc..27d6886d7c 100644 --- a/planner/multi.go +++ b/internal/planner/multi.go @@ -12,7 +12,7 @@ package planner import ( "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/internal/core" ) /* diff --git a/planner/operations.go b/internal/planner/operations.go similarity index 100% rename from planner/operations.go rename to internal/planner/operations.go diff --git a/planner/order.go b/internal/planner/order.go similarity index 98% rename from planner/order.go rename to internal/planner/order.go index 5f61a952c9..7f30800f2e 100644 --- a/planner/order.go +++ b/internal/planner/order.go @@ -13,8 +13,8 @@ package planner import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) // simplified planNode interface. diff --git a/planner/pipe.go b/internal/planner/pipe.go similarity index 95% rename from planner/pipe.go rename to internal/planner/pipe.go index fe6da4f0ec..a14432bc12 100644 --- a/planner/pipe.go +++ b/internal/planner/pipe.go @@ -11,8 +11,8 @@ package planner import ( - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/db/container" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/db/container" ) // A lazily loaded cache-node that allows retrieval of cached documents at arbitrary indexes. diff --git a/planner/plan.go b/internal/planner/plan.go similarity index 100% rename from planner/plan.go rename to internal/planner/plan.go diff --git a/planner/planner.go b/internal/planner/planner.go similarity index 97% rename from planner/planner.go rename to internal/planner/planner.go index 2a181ce621..384ebf8ecb 100644 --- a/planner/planner.go +++ b/internal/planner/planner.go @@ -15,15 +15,15 @@ import ( "github.com/sourcenetwork/immutable" - "github.com/sourcenetwork/defradb/acp" - acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/connor" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/planner/filter" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/acp" + acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" + "github.com/sourcenetwork/defradb/internal/connor" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/planner/filter" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) // planNode is an interface all nodes in the plan tree need to implement. diff --git a/planner/scan.go b/internal/planner/scan.go similarity index 95% rename from planner/scan.go rename to internal/planner/scan.go index e52b3c2131..a9274fed98 100644 --- a/planner/scan.go +++ b/internal/planner/scan.go @@ -15,13 +15,13 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/db/base" - "github.com/sourcenetwork/defradb/db/fetcher" - "github.com/sourcenetwork/defradb/lens" - "github.com/sourcenetwork/defradb/planner/filter" - "github.com/sourcenetwork/defradb/planner/mapper" - "github.com/sourcenetwork/defradb/request/graphql/parser" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/db/base" + "github.com/sourcenetwork/defradb/internal/db/fetcher" + "github.com/sourcenetwork/defradb/internal/lens" + "github.com/sourcenetwork/defradb/internal/planner/filter" + "github.com/sourcenetwork/defradb/internal/planner/mapper" + "github.com/sourcenetwork/defradb/internal/request/graphql/parser" ) // scanExecInfo contains information about the execution of a scan. diff --git a/planner/select.go b/internal/planner/select.go similarity index 98% rename from planner/select.go rename to internal/planner/select.go index ce7ff19030..c3e7f4dd2d 100644 --- a/planner/select.go +++ b/internal/planner/select.go @@ -16,10 +16,10 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/db/base" - "github.com/sourcenetwork/defradb/db/fetcher" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/db/base" + "github.com/sourcenetwork/defradb/internal/db/fetcher" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) /* diff --git a/planner/sum.go b/internal/planner/sum.go similarity index 98% rename from planner/sum.go rename to internal/planner/sum.go index fafd0cc4b5..ff0b714ebf 100644 --- a/planner/sum.go +++ b/internal/planner/sum.go @@ -16,8 +16,8 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) type sumNode struct { diff --git a/planner/top.go b/internal/planner/top.go similarity index 97% rename from planner/top.go rename to internal/planner/top.go index 93e530b2fc..d5faa491c1 100644 --- a/planner/top.go +++ b/internal/planner/top.go @@ -12,8 +12,8 @@ package planner import ( "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) const topLevelNodeKind string = "topLevelNode" diff --git a/planner/type_join.go b/internal/planner/type_join.go similarity index 98% rename from planner/type_join.go rename to internal/planner/type_join.go index f93a8fe7db..07dfb4655b 100644 --- a/planner/type_join.go +++ b/internal/planner/type_join.go @@ -15,11 +15,11 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/connor" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/db/base" - "github.com/sourcenetwork/defradb/planner/filter" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/connor" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/db/base" + "github.com/sourcenetwork/defradb/internal/planner/filter" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) /* diff --git a/planner/type_join.md b/internal/planner/type_join.md similarity index 100% rename from planner/type_join.md rename to internal/planner/type_join.md diff --git a/planner/update.go b/internal/planner/update.go similarity index 97% rename from planner/update.go rename to internal/planner/update.go index b93934c136..2baf3f971f 100644 --- a/planner/update.go +++ b/internal/planner/update.go @@ -13,8 +13,8 @@ package planner import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) type updateNode struct { diff --git a/planner/values.go b/internal/planner/values.go similarity index 94% rename from planner/values.go rename to internal/planner/values.go index 289b120a58..8053d2df1e 100644 --- a/planner/values.go +++ b/internal/planner/values.go @@ -13,10 +13,10 @@ package planner import ( "sort" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/db/base" - "github.com/sourcenetwork/defradb/db/container" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/db/base" + "github.com/sourcenetwork/defradb/internal/db/container" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) // valuesNode contains a collection diff --git a/planner/view.go b/internal/planner/view.go similarity index 96% rename from planner/view.go rename to internal/planner/view.go index 2bb5f94fa8..d014bbe108 100644 --- a/planner/view.go +++ b/internal/planner/view.go @@ -12,8 +12,8 @@ package planner import ( "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/planner/mapper" ) // viewNode processes queries to a Defra View constructed from a base query ahead of time. diff --git a/request/doc.go b/internal/request/doc.go similarity index 100% rename from request/doc.go rename to internal/request/doc.go diff --git a/request/graphql/parser.go b/internal/request/graphql/parser.go similarity index 94% rename from request/graphql/parser.go rename to internal/request/graphql/parser.go index 310673c347..ab995e660c 100644 --- a/request/graphql/parser.go +++ b/internal/request/graphql/parser.go @@ -21,10 +21,10 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" - defrap "github.com/sourcenetwork/defradb/request/graphql/parser" - "github.com/sourcenetwork/defradb/request/graphql/schema" + "github.com/sourcenetwork/defradb/internal/core" + defrap "github.com/sourcenetwork/defradb/internal/request/graphql/parser" + "github.com/sourcenetwork/defradb/internal/request/graphql/schema" ) var _ core.Parser = (*parser)(nil) diff --git a/request/graphql/parser/commit.go b/internal/request/graphql/parser/commit.go similarity index 98% rename from request/graphql/parser/commit.go rename to internal/request/graphql/parser/commit.go index a6b468fc35..8dc4db5aa3 100644 --- a/request/graphql/parser/commit.go +++ b/internal/request/graphql/parser/commit.go @@ -18,7 +18,7 @@ import ( "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/internal/core" ) func parseCommitSelect(schema gql.Schema, parent *gql.Object, field *ast.Field) (*request.CommitSelect, error) { diff --git a/request/graphql/parser/doc.go b/internal/request/graphql/parser/doc.go similarity index 100% rename from request/graphql/parser/doc.go rename to internal/request/graphql/parser/doc.go diff --git a/request/graphql/parser/errors.go b/internal/request/graphql/parser/errors.go similarity index 100% rename from request/graphql/parser/errors.go rename to internal/request/graphql/parser/errors.go diff --git a/request/graphql/parser/filter.go b/internal/request/graphql/parser/filter.go similarity index 100% rename from request/graphql/parser/filter.go rename to internal/request/graphql/parser/filter.go diff --git a/request/graphql/parser/introspection.go b/internal/request/graphql/parser/introspection.go similarity index 100% rename from request/graphql/parser/introspection.go rename to internal/request/graphql/parser/introspection.go diff --git a/request/graphql/parser/mutation.go b/internal/request/graphql/parser/mutation.go similarity index 100% rename from request/graphql/parser/mutation.go rename to internal/request/graphql/parser/mutation.go diff --git a/request/graphql/parser/parser.go b/internal/request/graphql/parser/parser.go similarity index 86% rename from request/graphql/parser/parser.go rename to internal/request/graphql/parser/parser.go index 7c1ff5ac05..9412e96d69 100644 --- a/request/graphql/parser/parser.go +++ b/internal/request/graphql/parser/parser.go @@ -10,7 +10,7 @@ package parser -import "github.com/sourcenetwork/defradb/request/graphql/schema" +import "github.com/sourcenetwork/defradb/internal/request/graphql/schema" type Parser struct { SchemaManager *schema.SchemaManager diff --git a/request/graphql/parser/query.go b/internal/request/graphql/parser/query.go similarity index 100% rename from request/graphql/parser/query.go rename to internal/request/graphql/parser/query.go diff --git a/request/graphql/parser/request.go b/internal/request/graphql/parser/request.go similarity index 98% rename from request/graphql/parser/request.go rename to internal/request/graphql/parser/request.go index f0a73a4667..eda5e53169 100644 --- a/request/graphql/parser/request.go +++ b/internal/request/graphql/parser/request.go @@ -18,7 +18,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/errors" - schemaTypes "github.com/sourcenetwork/defradb/request/graphql/schema/types" + schemaTypes "github.com/sourcenetwork/defradb/internal/request/graphql/schema/types" ) // ParseRequest parses a root ast.Document, and returns a formatted Request object. diff --git a/request/graphql/parser/subscription.go b/internal/request/graphql/parser/subscription.go similarity index 100% rename from request/graphql/parser/subscription.go rename to internal/request/graphql/parser/subscription.go diff --git a/request/graphql/schema/collection.go b/internal/request/graphql/schema/collection.go similarity index 99% rename from request/graphql/schema/collection.go rename to internal/request/graphql/schema/collection.go index 937a6e2973..0416504ef4 100644 --- a/request/graphql/schema/collection.go +++ b/internal/request/graphql/schema/collection.go @@ -23,7 +23,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/request/graphql/schema/types" + "github.com/sourcenetwork/defradb/internal/request/graphql/schema/types" ) // FromString parses a GQL SDL string into a set of collection descriptions. diff --git a/request/graphql/schema/descriptions.go b/internal/request/graphql/schema/descriptions.go similarity index 98% rename from request/graphql/schema/descriptions.go rename to internal/request/graphql/schema/descriptions.go index dc97705b5d..945f4e8bdf 100644 --- a/request/graphql/schema/descriptions.go +++ b/internal/request/graphql/schema/descriptions.go @@ -14,7 +14,7 @@ import ( gql "github.com/sourcenetwork/graphql-go" "github.com/sourcenetwork/defradb/client" - schemaTypes "github.com/sourcenetwork/defradb/request/graphql/schema/types" + schemaTypes "github.com/sourcenetwork/defradb/internal/request/graphql/schema/types" ) var ( diff --git a/request/graphql/schema/descriptions_test.go b/internal/request/graphql/schema/descriptions_test.go similarity index 100% rename from request/graphql/schema/descriptions_test.go rename to internal/request/graphql/schema/descriptions_test.go diff --git a/request/graphql/schema/doc.go b/internal/request/graphql/schema/doc.go similarity index 100% rename from request/graphql/schema/doc.go rename to internal/request/graphql/schema/doc.go diff --git a/request/graphql/schema/errors.go b/internal/request/graphql/schema/errors.go similarity index 100% rename from request/graphql/schema/errors.go rename to internal/request/graphql/schema/errors.go diff --git a/request/graphql/schema/examples/example.schema.gql b/internal/request/graphql/schema/examples/example.schema.gql similarity index 100% rename from request/graphql/schema/examples/example.schema.gql rename to internal/request/graphql/schema/examples/example.schema.gql diff --git a/request/graphql/schema/examples/root.schema.gql b/internal/request/graphql/schema/examples/root.schema.gql similarity index 100% rename from request/graphql/schema/examples/root.schema.gql rename to internal/request/graphql/schema/examples/root.schema.gql diff --git a/request/graphql/schema/generate.go b/internal/request/graphql/schema/generate.go similarity index 99% rename from request/graphql/schema/generate.go rename to internal/request/graphql/schema/generate.go index 6b7483be4f..1f489daf47 100644 --- a/request/graphql/schema/generate.go +++ b/internal/request/graphql/schema/generate.go @@ -20,7 +20,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" - schemaTypes "github.com/sourcenetwork/defradb/request/graphql/schema/types" + schemaTypes "github.com/sourcenetwork/defradb/internal/request/graphql/schema/types" ) // Given a basic developer defined schema in GraphQL Schema Definition Language diff --git a/request/graphql/schema/index_parse_test.go b/internal/request/graphql/schema/index_parse_test.go similarity index 100% rename from request/graphql/schema/index_parse_test.go rename to internal/request/graphql/schema/index_parse_test.go diff --git a/request/graphql/schema/manager.go b/internal/request/graphql/schema/manager.go similarity index 98% rename from request/graphql/schema/manager.go rename to internal/request/graphql/schema/manager.go index f4a2cb3e5b..02dfaade86 100644 --- a/request/graphql/schema/manager.go +++ b/internal/request/graphql/schema/manager.go @@ -13,7 +13,7 @@ package schema import ( gql "github.com/sourcenetwork/graphql-go" - schemaTypes "github.com/sourcenetwork/defradb/request/graphql/schema/types" + schemaTypes "github.com/sourcenetwork/defradb/internal/request/graphql/schema/types" ) // SchemaManager creates an instanced management point diff --git a/request/graphql/schema/manager_test.go b/internal/request/graphql/schema/manager_test.go similarity index 100% rename from request/graphql/schema/manager_test.go rename to internal/request/graphql/schema/manager_test.go diff --git a/request/graphql/schema/type.schema.gen.gql.template b/internal/request/graphql/schema/type.schema.gen.gql.template similarity index 100% rename from request/graphql/schema/type.schema.gen.gql.template rename to internal/request/graphql/schema/type.schema.gen.gql.template diff --git a/request/graphql/schema/types/base.go b/internal/request/graphql/schema/types/base.go similarity index 100% rename from request/graphql/schema/types/base.go rename to internal/request/graphql/schema/types/base.go diff --git a/request/graphql/schema/types/commits.go b/internal/request/graphql/schema/types/commits.go similarity index 100% rename from request/graphql/schema/types/commits.go rename to internal/request/graphql/schema/types/commits.go diff --git a/request/graphql/schema/types/descriptions.go b/internal/request/graphql/schema/types/descriptions.go similarity index 100% rename from request/graphql/schema/types/descriptions.go rename to internal/request/graphql/schema/types/descriptions.go diff --git a/request/graphql/schema/types/scalars.go b/internal/request/graphql/schema/types/scalars.go similarity index 100% rename from request/graphql/schema/types/scalars.go rename to internal/request/graphql/schema/types/scalars.go diff --git a/request/graphql/schema/types/scalars_test.go b/internal/request/graphql/schema/types/scalars_test.go similarity index 100% rename from request/graphql/schema/types/scalars_test.go rename to internal/request/graphql/schema/types/scalars_test.go diff --git a/request/graphql/schema/types/types.go b/internal/request/graphql/schema/types/types.go similarity index 100% rename from request/graphql/schema/types/types.go rename to internal/request/graphql/schema/types/types.go diff --git a/net/dag_test.go b/net/dag_test.go index 2072122b2d..09b2701fac 100644 --- a/net/dag_test.go +++ b/net/dag_test.go @@ -23,8 +23,8 @@ import ( "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/merkle/clock" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/merkle/clock" netutils "github.com/sourcenetwork/defradb/net/utils" ) diff --git a/net/dialer.go b/net/dialer.go index 3cd12be7ff..2e8d8b361e 100644 --- a/net/dialer.go +++ b/net/dialer.go @@ -22,8 +22,8 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/connectivity" - corenet "github.com/sourcenetwork/defradb/core/net" "github.com/sourcenetwork/defradb/errors" + corenet "github.com/sourcenetwork/defradb/internal/core/net" pb "github.com/sourcenetwork/defradb/net/pb" ) diff --git a/net/node_test.go b/net/node_test.go index bf0bc653c5..c0012bdb31 100644 --- a/net/node_test.go +++ b/net/node_test.go @@ -23,7 +23,7 @@ import ( "github.com/sourcenetwork/defradb/client" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" "github.com/sourcenetwork/defradb/datastore/memory" - "github.com/sourcenetwork/defradb/db" + "github.com/sourcenetwork/defradb/internal/db" netutils "github.com/sourcenetwork/defradb/net/utils" ) diff --git a/net/peer.go b/net/peer.go index 61711b3918..e958e5c84d 100644 --- a/net/peer.go +++ b/net/peer.go @@ -35,12 +35,12 @@ import ( "google.golang.org/grpc" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" - corenet "github.com/sourcenetwork/defradb/core/net" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/events" - "github.com/sourcenetwork/defradb/merkle/clock" + "github.com/sourcenetwork/defradb/internal/core" + corenet "github.com/sourcenetwork/defradb/internal/core/net" + "github.com/sourcenetwork/defradb/internal/merkle/clock" pb "github.com/sourcenetwork/defradb/net/pb" ) diff --git a/net/peer_collection.go b/net/peer_collection.go index 8bf7ee337f..1676a7be43 100644 --- a/net/peer_collection.go +++ b/net/peer_collection.go @@ -17,8 +17,8 @@ import ( "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/db" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/db" ) const marker = byte(0xff) diff --git a/net/peer_replicator.go b/net/peer_replicator.go index ce5f7e23b6..19accb17c4 100644 --- a/net/peer_replicator.go +++ b/net/peer_replicator.go @@ -19,8 +19,8 @@ import ( "github.com/libp2p/go-libp2p/core/peerstore" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/db" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/db" ) func (p *Peer) SetReplicator(ctx context.Context, rep client.Replicator) error { diff --git a/net/peer_test.go b/net/peer_test.go index 2ad5db9037..e64107d888 100644 --- a/net/peer_test.go +++ b/net/peer_test.go @@ -26,13 +26,13 @@ import ( rpc "github.com/sourcenetwork/go-libp2p-pubsub-rpc" "github.com/stretchr/testify/require" - acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core/crdt" "github.com/sourcenetwork/defradb/datastore/memory" - "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/events" + acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" + "github.com/sourcenetwork/defradb/internal/core/crdt" + "github.com/sourcenetwork/defradb/internal/db" netutils "github.com/sourcenetwork/defradb/net/utils" ) diff --git a/net/process.go b/net/process.go index 6779ada29f..b02e2e1fed 100644 --- a/net/process.go +++ b/net/process.go @@ -25,11 +25,11 @@ import ( "github.com/sourcenetwork/corelog" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/db/base" "github.com/sourcenetwork/defradb/errors" - merklecrdt "github.com/sourcenetwork/defradb/merkle/crdt" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/db/base" + merklecrdt "github.com/sourcenetwork/defradb/internal/merkle/crdt" ) type blockProcessor struct { diff --git a/net/server.go b/net/server.go index ebf772a8bc..9e5c213200 100644 --- a/net/server.go +++ b/net/server.go @@ -30,10 +30,10 @@ import ( "google.golang.org/protobuf/proto" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore/badger/v4" - "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/db" pb "github.com/sourcenetwork/defradb/net/pb" ) diff --git a/node/node.go b/node/node.go index 9524247bf8..d0cfc87d0c 100644 --- a/node/node.go +++ b/node/node.go @@ -20,8 +20,8 @@ import ( "github.com/sourcenetwork/corelog" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/http" + "github.com/sourcenetwork/defradb/internal/db" "github.com/sourcenetwork/defradb/net" ) diff --git a/node/node_test.go b/node/node_test.go index 3f3c7c854f..6da528f999 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -19,8 +19,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/http" + "github.com/sourcenetwork/defradb/internal/db" "github.com/sourcenetwork/defradb/net" ) diff --git a/tests/bench/query/planner/utils.go b/tests/bench/query/planner/utils.go index dec517d781..7655e55003 100644 --- a/tests/bench/query/planner/utils.go +++ b/tests/bench/query/planner/utils.go @@ -15,14 +15,14 @@ import ( "fmt" "testing" - "github.com/sourcenetwork/defradb/acp" - acpIdentity "github.com/sourcenetwork/defradb/acp/identity" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/planner" - "github.com/sourcenetwork/defradb/request/graphql" - gqlSchema "github.com/sourcenetwork/defradb/request/graphql/schema" + "github.com/sourcenetwork/defradb/internal/acp" + acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/planner" + "github.com/sourcenetwork/defradb/internal/request/graphql" + gqlSchema "github.com/sourcenetwork/defradb/internal/request/graphql/schema" benchutils "github.com/sourcenetwork/defradb/tests/bench" "github.com/sourcenetwork/defradb/tests/bench/fixtures" ) diff --git a/tests/clients/cli/wrapper_cli.go b/tests/clients/cli/wrapper_cli.go index cee64081d4..44ed7f22bc 100644 --- a/tests/clients/cli/wrapper_cli.go +++ b/tests/clients/cli/wrapper_cli.go @@ -17,7 +17,7 @@ import ( "strings" "github.com/sourcenetwork/defradb/cli" - "github.com/sourcenetwork/defradb/db" + "github.com/sourcenetwork/defradb/internal/db" ) type cliWrapper struct { diff --git a/tests/gen/cli/util_test.go b/tests/gen/cli/util_test.go index 58b2db083b..2114687ad5 100644 --- a/tests/gen/cli/util_test.go +++ b/tests/gen/cli/util_test.go @@ -21,9 +21,9 @@ import ( "github.com/sourcenetwork/defradb/client" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" - "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/errors" httpapi "github.com/sourcenetwork/defradb/http" + "github.com/sourcenetwork/defradb/internal/db" ) var log = corelog.NewLogger("cli") diff --git a/tests/gen/schema_parser.go b/tests/gen/schema_parser.go index bcce388f22..fbd82bc47b 100644 --- a/tests/gen/schema_parser.go +++ b/tests/gen/schema_parser.go @@ -17,7 +17,7 @@ import ( "unicode" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/request/graphql" + "github.com/sourcenetwork/defradb/internal/request/graphql" ) func parseSDL(gqlSDL string) (map[string]client.CollectionDefinition, error) { diff --git a/tests/integration/acp.go b/tests/integration/acp.go index eb9bdc8fbe..ea69dd278c 100644 --- a/tests/integration/acp.go +++ b/tests/integration/acp.go @@ -14,8 +14,8 @@ import ( "github.com/sourcenetwork/immutable" "github.com/stretchr/testify/require" - acpIdentity "github.com/sourcenetwork/defradb/acp/identity" - "github.com/sourcenetwork/defradb/db" + acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" + "github.com/sourcenetwork/defradb/internal/db" ) // AddPolicy will attempt to add the given policy using DefraDB's ACP system. diff --git a/tests/integration/db.go b/tests/integration/db.go index 73d8818934..ff5fb0060c 100644 --- a/tests/integration/db.go +++ b/tests/integration/db.go @@ -22,7 +22,7 @@ import ( "github.com/sourcenetwork/defradb/client" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" "github.com/sourcenetwork/defradb/datastore/memory" - "github.com/sourcenetwork/defradb/db" + "github.com/sourcenetwork/defradb/internal/db" changeDetector "github.com/sourcenetwork/defradb/tests/change_detector" ) diff --git a/tests/integration/events/simple/with_create_txn_test.go b/tests/integration/events/simple/with_create_txn_test.go index 7d7238b546..f77a6fe1f6 100644 --- a/tests/integration/events/simple/with_create_txn_test.go +++ b/tests/integration/events/simple/with_create_txn_test.go @@ -18,7 +18,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/db" + "github.com/sourcenetwork/defradb/internal/db" testUtils "github.com/sourcenetwork/defradb/tests/integration/events" ) diff --git a/tests/integration/events/utils.go b/tests/integration/events/utils.go index 8b998d0051..6a8f54aac2 100644 --- a/tests/integration/events/utils.go +++ b/tests/integration/events/utils.go @@ -20,7 +20,7 @@ import ( "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/db" + "github.com/sourcenetwork/defradb/internal/db" testUtils "github.com/sourcenetwork/defradb/tests/integration" ) diff --git a/tests/integration/index/create_test.go b/tests/integration/index/create_test.go index ce3f94080a..d37dfd07f7 100644 --- a/tests/integration/index/create_test.go +++ b/tests/integration/index/create_test.go @@ -13,7 +13,7 @@ package index import ( "testing" - "github.com/sourcenetwork/defradb/request/graphql/schema" + "github.com/sourcenetwork/defradb/internal/request/graphql/schema" testUtils "github.com/sourcenetwork/defradb/tests/integration" ) diff --git a/tests/integration/index/create_unique_composite_test.go b/tests/integration/index/create_unique_composite_test.go index 2f0ed96488..f115668068 100644 --- a/tests/integration/index/create_unique_composite_test.go +++ b/tests/integration/index/create_unique_composite_test.go @@ -14,8 +14,8 @@ import ( "testing" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/internal/db" testUtils "github.com/sourcenetwork/defradb/tests/integration" ) diff --git a/tests/integration/index/create_unique_test.go b/tests/integration/index/create_unique_test.go index e9b2d41753..3d723f5d55 100644 --- a/tests/integration/index/create_unique_test.go +++ b/tests/integration/index/create_unique_test.go @@ -14,8 +14,8 @@ import ( "testing" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/internal/db" testUtils "github.com/sourcenetwork/defradb/tests/integration" ) diff --git a/tests/integration/lens.go b/tests/integration/lens.go index 541b708a33..c99cc3f5b7 100644 --- a/tests/integration/lens.go +++ b/tests/integration/lens.go @@ -14,7 +14,7 @@ import ( "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/db" + "github.com/sourcenetwork/defradb/internal/db" ) // ConfigureMigration is a test action which will configure a Lens migration using the diff --git a/tests/integration/net/order/utils.go b/tests/integration/net/order/utils.go index 2373037b62..adac8c7535 100644 --- a/tests/integration/net/order/utils.go +++ b/tests/integration/net/order/utils.go @@ -20,8 +20,8 @@ import ( "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" - coreDB "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/errors" + coreDB "github.com/sourcenetwork/defradb/internal/db" "github.com/sourcenetwork/defradb/net" netutils "github.com/sourcenetwork/defradb/net/utils" testutils "github.com/sourcenetwork/defradb/tests/integration" diff --git a/tests/integration/schema/client_test.go b/tests/integration/schema/client_test.go index 6c603b9b71..ef2a155297 100644 --- a/tests/integration/schema/client_test.go +++ b/tests/integration/schema/client_test.go @@ -13,7 +13,7 @@ package schema import ( "testing" - schemaTypes "github.com/sourcenetwork/defradb/request/graphql/schema/types" + schemaTypes "github.com/sourcenetwork/defradb/internal/request/graphql/schema/types" testUtils "github.com/sourcenetwork/defradb/tests/integration" ) diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index 4821d06b32..b12b8cef29 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -28,14 +28,14 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" - "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/errors" + acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" + "github.com/sourcenetwork/defradb/internal/db" + "github.com/sourcenetwork/defradb/internal/request/graphql" "github.com/sourcenetwork/defradb/net" - "github.com/sourcenetwork/defradb/request/graphql" changeDetector "github.com/sourcenetwork/defradb/tests/change_detector" "github.com/sourcenetwork/defradb/tests/clients" "github.com/sourcenetwork/defradb/tests/gen" diff --git a/tests/predefined/gen_predefined.go b/tests/predefined/gen_predefined.go index 34d575098e..647d878f82 100644 --- a/tests/predefined/gen_predefined.go +++ b/tests/predefined/gen_predefined.go @@ -16,7 +16,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/request/graphql" + "github.com/sourcenetwork/defradb/internal/request/graphql" "github.com/sourcenetwork/defradb/tests/gen" ) diff --git a/tools/configs/mockery.yaml b/tools/configs/mockery.yaml index dccbb2d8f9..9216c32fa8 100644 --- a/tools/configs/mockery.yaml +++ b/tools/configs/mockery.yaml @@ -42,9 +42,9 @@ packages: # Packages and their interfaces to generate mocks for. DB: Collection: - github.com/sourcenetwork/defradb/db/fetcher: + github.com/sourcenetwork/defradb/internal/db/fetcher: config: - dir: "./db/fetcher/mocks" + dir: "./internal/db/fetcher/mocks" interfaces: Fetcher: EncodedDocument: diff --git a/version/version.go b/version/version.go index 32de6f67c8..eba7dccba0 100644 --- a/version/version.go +++ b/version/version.go @@ -18,8 +18,8 @@ import ( "strings" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core/net" "github.com/sourcenetwork/defradb/http" + "github.com/sourcenetwork/defradb/internal/core/net" ) const commitHashMaxLength = 8 From ed3550aa5a352ec778a02600494a17cd47e56ef7 Mon Sep 17 00:00:00 2001 From: Islam Aliev Date: Wed, 8 May 2024 23:52:20 +0200 Subject: [PATCH 04/78] fix: Remove limit for fetching secondary docs (#2594) ## Relevant issue(s) Resolves #2590 and #2600 ## Description Make join fetch all secondary docs of a fetched-by-index primary doc. --- internal/planner/type_join.go | 9 +- tests/integration/index/docs.go | 2 +- .../index/query_with_relation_filter_test.go | 134 +++++++++++++++++- tests/integration/test_case.go | 18 +++ tests/predefined/README.md | 3 + 5 files changed, 155 insertions(+), 11 deletions(-) diff --git a/internal/planner/type_join.go b/internal/planner/type_join.go index 07dfb4655b..f745e3c5cf 100644 --- a/internal/planner/type_join.go +++ b/internal/planner/type_join.go @@ -294,7 +294,7 @@ func (n *typeJoinOne) Kind() string { return "typeJoinOne" } -func fetchDocsWithFieldValue(plan planNode, fieldName string, val any, limit uint) ([]core.Doc, error) { +func fetchDocsWithFieldValue(plan planNode, fieldName string, val any) ([]core.Doc, error) { propIndex := plan.DocumentMap().FirstIndexOfName(fieldName) setSubTypeFilterToScanNode(plan, propIndex, val) @@ -302,7 +302,7 @@ func fetchDocsWithFieldValue(plan planNode, fieldName string, val any, limit uin return nil, NewErrSubTypeInit(err) } - docs := make([]core.Doc, 0, limit) + var docs []core.Doc for { next, err := plan.Next() if err != nil { @@ -313,10 +313,6 @@ func fetchDocsWithFieldValue(plan planNode, fieldName string, val any, limit uin } docs = append(docs, plan.Value()) - - if limit > 0 && len(docs) >= int(limit) { - break - } } return docs, nil @@ -587,7 +583,6 @@ func (join *invertibleTypeJoin) Next() (bool, error) { // otherwise the user would not have been able to request it. join.dir.secondaryField.Value(), firstDoc.GetID(), - join.secondaryFetchLimit, ) if err != nil { return false, err diff --git a/tests/integration/index/docs.go b/tests/integration/index/docs.go index 379ad5a8a1..cd53dd2f39 100644 --- a/tests/integration/index/docs.go +++ b/tests/integration/index/docs.go @@ -216,7 +216,7 @@ func getUserDocs() predefined.DocsList { }, { "model": "Playstation 5", - "year": 2022, + "year": 2021, "type": "game_console", "specs": map[string]any{ "CPU": 3.5, diff --git a/tests/integration/index/query_with_relation_filter_test.go b/tests/integration/index/query_with_relation_filter_test.go index 8fb6500eef..aa49dd2623 100644 --- a/tests/integration/index/query_with_relation_filter_test.go +++ b/tests/integration/index/query_with_relation_filter_test.go @@ -252,7 +252,11 @@ func TestQueryWithIndexOnOneToOnePrimaryRelation_IfFilterOnIndexedFieldOfRelatio }, }, testUtils.Request{ - Request: makeExplainQuery(req1), + Request: makeExplainQuery(req1), + // we make 1 index fetch to get the only address with city == "London" + // then we scan all 10 users to find one with matching "address_id" + // after this we fetch the name of the user + // it should be optimized after this is done https://github.com/sourcenetwork/defradb/issues/2601 Asserter: testUtils.NewExplainAsserter().WithFieldFetches(11).WithIndexFetches(1), }, testUtils.Request{ @@ -264,8 +268,12 @@ func TestQueryWithIndexOnOneToOnePrimaryRelation_IfFilterOnIndexedFieldOfRelatio }, }, testUtils.Request{ - Request: makeExplainQuery(req2), - Asserter: testUtils.NewExplainAsserter().WithFieldFetches(15).WithIndexFetches(3), + Request: makeExplainQuery(req2), + // we make 3 index fetch to get the 3 address with city == "Montreal" + // then we scan all 10 users to find one with matching "address_id" for each address + // after this we fetch the name of each user + // it should be optimized after this is done https://github.com/sourcenetwork/defradb/issues/2601 + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(33).WithIndexFetches(3), }, }, } @@ -553,3 +561,123 @@ func TestQueryWithIndexOnOneToOne_IfFilterOnIndexedRelation_ShouldFilter(t *test testUtils.ExecuteTestCase(t, test) } + +func TestQueryWithIndexOnManyToOne_IfFilterOnIndexedField_ShouldFilterWithExplain(t *testing.T) { + // This query will fetch first a matching device which is secondary doc and therefore + // has a reference to the primary User doc. + req := `query { + Device(filter: { + year: {_eq: 2021} + }) { + model + owner { + name + } + } + }` + test := testUtils.TestCase{ + Description: "With filter on indexed field of secondary relation (N-1) should fetch secondary and primary objects", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + devices: [Device] + } + + type Device { + model: String + year: Int @index + owner: User + } + `, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + { + "model": "Playstation 5", + "owner": map[string]any{ + "name": "Islam", + }, + }, + { + "model": "Playstation 5", + "owner": map[string]any{ + "name": "Addo", + }, + }, + { + "model": "iPhone 10", + "owner": map[string]any{ + "name": "Addo", + }, + }, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + // we make 3 index fetches to get all 3 devices with year 2021 + // and 9 field fetches: for every device we fetch additionally "model", "owner_id" and owner's "name" + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(9).WithIndexFetches(3), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndexOnManyToOne_IfFilterOnIndexedRelation_ShouldFilterWithExplain(t *testing.T) { + // This query will fetch first a matching user (owner) which is primary doc and therefore + // has no direct reference to secondary Device docs. + // At the moment the db has to make a full scan of the Device docs to find the matching ones. + // Keenan has 3 devices. + req := `query { + Device(filter: { + owner: {name: {_eq: "Keenan"}} + }) { + model + } + }` + test := testUtils.TestCase{ + Description: "Upon querying secondary object with filter on indexed field of primary relation (in 1-N) should fetch all secondary objects of the same primary one", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String @index + devices: [Device] + } + + type Device { + model: String + owner: User + } + `, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"model": "iPhone 13"}, + {"model": "iPad Mini"}, + {"model": "MacBook Pro"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + // we make only 1 index fetch to get the owner by it's name + // and 44 field fetches to get 2 fields for all 22 devices in the db. + // it should be optimized after this is done https://github.com/sourcenetwork/defradb/issues/2601 + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(44).WithIndexFetches(1), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/test_case.go b/tests/integration/test_case.go index fa1629d0ef..b2f3368339 100644 --- a/tests/integration/test_case.go +++ b/tests/integration/test_case.go @@ -471,6 +471,24 @@ type GenerateDocs struct { } // CreatePredefinedDocs is an action that will trigger creation of predefined documents. +// Predefined docs allows specifying a database state with complex schemas that can be used by +// multiple tests while allowing each test to select a subset of the schemas (collection and +// collection's fields) to work with. +// Example: +// +// gen.DocsList{ +// ColName: "User", +// Docs: []map[string]any{ +// { +// "name": "Shahzad", +// "devices": []map[string]any{ +// { +// "model": "iPhone Xs", +// }}, +// }}, +// } +// +// For more information refer to tests/predefined/README.md type CreatePredefinedDocs struct { // NodeID may hold the ID (index) of a node to execute the generation on. // diff --git a/tests/predefined/README.md b/tests/predefined/README.md index d70cafab4f..28a4012dcd 100644 --- a/tests/predefined/README.md +++ b/tests/predefined/README.md @@ -36,6 +36,9 @@ gen.DocsList{ "year": 2022, "type": "phone", }}, + "address": map[string]any{ + "city": "Munich", + }, }}, } ``` From a3d035be722badf7bcbea7aaf14666a550b583ba Mon Sep 17 00:00:00 2001 From: Islam Aliev Date: Thu, 9 May 2024 08:37:03 +0200 Subject: [PATCH 05/78] feat: Enable sec. indexes with ACP (#2602) ## Relevant issue(s) Resolves #2365 ## Description Enable secondary indexes with ACP. --- internal/db/collection_index.go | 6 - internal/db/errors.go | 1 - tests/integration/acp/index/create_test.go | 92 +---- tests/integration/acp/index/fixture.go | 89 +++++ tests/integration/acp/index/query_test.go | 185 ++++++++++ .../acp/index/query_with_relation_test.go | 319 ++++++++++++++++++ 6 files changed, 603 insertions(+), 89 deletions(-) create mode 100644 tests/integration/acp/index/fixture.go create mode 100644 tests/integration/acp/index/query_test.go create mode 100644 tests/integration/acp/index/query_with_relation_test.go diff --git a/internal/db/collection_index.go b/internal/db/collection_index.go index a5db0a96d3..14f9a1b805 100644 --- a/internal/db/collection_index.go +++ b/internal/db/collection_index.go @@ -256,12 +256,6 @@ func (c *collection) createIndex( ctx context.Context, desc client.IndexDescription, ) (CollectionIndex, error) { - // Don't allow creating index on a permissioned collection, until following is implemented. - // TODO-ACP: ACP <> INDEX https://github.com/sourcenetwork/defradb/issues/2365 - if c.Description().Policy.HasValue() { - return nil, ErrCanNotCreateIndexOnCollectionWithPolicy - } - if desc.Name != "" && !schema.IsValidIndexName(desc.Name) { return nil, schema.NewErrIndexWithInvalidName("!") } diff --git a/internal/db/errors.go b/internal/db/errors.go index f917ee9724..fcb4baf13f 100644 --- a/internal/db/errors.go +++ b/internal/db/errors.go @@ -97,7 +97,6 @@ const ( var ( ErrFailedToGetCollection = errors.New(errFailedToGetCollection) - ErrCanNotCreateIndexOnCollectionWithPolicy = errors.New("can not create index on a collection with a policy") ErrSubscriptionsNotAllowed = errors.New("server does not accept subscriptions") ErrInvalidFilter = errors.New("invalid filter") ErrCollectionAlreadyExists = errors.New(errCollectionAlreadyExists) diff --git a/tests/integration/acp/index/create_test.go b/tests/integration/acp/index/create_test.go index f2c9b193a7..9c440e25e2 100644 --- a/tests/integration/acp/index/create_test.go +++ b/tests/integration/acp/index/create_test.go @@ -17,46 +17,14 @@ import ( acpUtils "github.com/sourcenetwork/defradb/tests/integration/acp" ) -// This test documents that we don't allow creating indexes on collections that have policy -// until the following is implemented: -// TODO-ACP: ACP <> P2P https://github.com/sourcenetwork/defradb/issues/2365 -func TestACP_IndexCreateWithSeparateRequest_OnCollectionWithPolicy_ReturnError(t *testing.T) { +func TestACP_IndexCreateWithSeparateRequest_OnCollectionWithPolicy_NoError(t *testing.T) { test := testUtils.TestCase{ - Description: "Test acp, with creating new index using separate request on permissioned collection, error", + Description: "Test acp, with creating new index using separate request on permissioned collection, no error", Actions: []any{ testUtils.AddPolicy{ - - Identity: acpUtils.Actor1Identity, - - Policy: ` - description: a test policy which marks a collection in a database as a resource - - actor: - name: actor - - resources: - users: - permissions: - read: - expr: owner + reader - write: - expr: owner - - relations: - owner: - types: - - actor - reader: - types: - - actor - admin: - manages: - - reader - types: - - actor - `, - + Identity: acpUtils.Actor1Identity, + Policy: userPolicy, ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", }, @@ -74,12 +42,8 @@ func TestACP_IndexCreateWithSeparateRequest_OnCollectionWithPolicy_ReturnError(t testUtils.CreateIndex{ CollectionID: 0, - - IndexName: "some_index", - - FieldName: "name", - - ExpectedError: "can not create index on a collection with a policy", + IndexName: "some_index", + FieldName: "name", }, testUtils.Request{ @@ -99,46 +63,14 @@ func TestACP_IndexCreateWithSeparateRequest_OnCollectionWithPolicy_ReturnError(t testUtils.ExecuteTestCase(t, test) } -// This test documents that we don't allow creating indexes on collections that have policy -// until the following is implemented: -// TODO-ACP: ACP <> P2P https://github.com/sourcenetwork/defradb/issues/2365 -func TestACP_IndexCreateWithDirective_OnCollectionWithPolicy_ReturnError(t *testing.T) { +func TestACP_IndexCreateWithDirective_OnCollectionWithPolicy_NoError(t *testing.T) { test := testUtils.TestCase{ - Description: "Test acp, with creating new index using directive on permissioned collection, error", + Description: "Test acp, with creating new index using directive on permissioned collection, no error", Actions: []any{ testUtils.AddPolicy{ - - Identity: acpUtils.Actor1Identity, - - Policy: ` - description: a test policy which marks a collection in a database as a resource - - actor: - name: actor - - resources: - users: - permissions: - read: - expr: owner + reader - write: - expr: owner - - relations: - owner: - types: - - actor - reader: - types: - - actor - admin: - manages: - - reader - types: - - actor - `, - + Identity: acpUtils.Actor1Identity, + Policy: userPolicy, ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", }, @@ -152,8 +84,6 @@ func TestACP_IndexCreateWithDirective_OnCollectionWithPolicy_ReturnError(t *test age: Int } `, - - ExpectedError: "can not create index on a collection with a policy", }, testUtils.Request{ @@ -164,8 +94,6 @@ func TestACP_IndexCreateWithDirective_OnCollectionWithPolicy_ReturnError(t *test age } }`, - - ExpectedError: `Cannot query field "Users" on type "Query"`, }, }, } diff --git a/tests/integration/acp/index/fixture.go b/tests/integration/acp/index/fixture.go new file mode 100644 index 0000000000..49c76e8b22 --- /dev/null +++ b/tests/integration/acp/index/fixture.go @@ -0,0 +1,89 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_acp_index + +// policy id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001" +const userPolicy = ` +description: a test policy which marks a collection in a database as a resource + +actor: + name: actor + +resources: + users: + permissions: + read: + expr: owner + reader + write: + expr: owner + + relations: + owner: + types: + - actor + reader: + types: + - actor + admin: + manages: + - reader + types: + - actor +` + +// policy id: "68a4e64d5034b8a0565a90cd36483de0d61e0ea2450cf57c1fa8d27cbbf17c2c" +const bookAuthorPolicy = ` +description: a test policy which marks a collection in a database as a resource + +actor: + name: actor + +resources: + author: + permissions: + read: + expr: owner + reader + write: + expr: owner + + relations: + owner: + types: + - actor + reader: + types: + - actor + admin: + manages: + - reader + types: + - actor + + book: + permissions: + read: + expr: owner + reader + write: + expr: owner + + relations: + owner: + types: + - actor + reader: + types: + - actor + admin: + manages: + - reader + types: + - actor +` diff --git a/tests/integration/acp/index/query_test.go b/tests/integration/acp/index/query_test.go new file mode 100644 index 0000000000..a7c09cd9e9 --- /dev/null +++ b/tests/integration/acp/index/query_test.go @@ -0,0 +1,185 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_acp_index + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + acpUtils "github.com/sourcenetwork/defradb/tests/integration/acp" +) + +func TestACPWithIndex_UponQueryingPrivateDocWithoutIdentity_ShouldNotFetch(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test acp, querying private doc without identity should not fetch", + Actions: []any{ + testUtils.AddPolicy{ + Identity: acpUtils.Actor1Identity, + Policy: userPolicy, + ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + }, + testUtils.SchemaUpdate{ + Schema: ` + type Users @policy( + id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + resource: "users" + ) { + name: String @index + age: Int + } + `, + }, + testUtils.CreateDoc{ + Doc: ` + { + "name": "Shahzad" + } + `, + }, + testUtils.CreateDoc{ + Identity: acpUtils.Actor1Identity, + Doc: ` + { + "name": "Islam" + } + `, + }, + testUtils.Request{ + Request: ` + query { + Users { + name + } + }`, + Results: []map[string]any{ + { + "name": "Shahzad", + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestACPWithIndex_UponQueryingPrivateDocWithIdentity_ShouldFetch(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test acp, querying private doc with identity should fetch", + Actions: []any{ + testUtils.AddPolicy{ + Identity: acpUtils.Actor1Identity, + Policy: userPolicy, + ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + }, + testUtils.SchemaUpdate{ + Schema: ` + type Users @policy( + id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + resource: "users" + ) { + name: String @index + age: Int + } + `, + }, + testUtils.CreateDoc{ + Doc: ` + { + "name": "Shahzad" + } + `, + }, + testUtils.CreateDoc{ + Identity: acpUtils.Actor1Identity, + Doc: ` + { + "name": "Islam" + } + `, + }, + testUtils.Request{ + Identity: acpUtils.Actor1Identity, + Request: ` + query { + Users { + name + } + }`, + Results: []map[string]any{ + { + "name": "Islam", + }, + { + "name": "Shahzad", + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestACPWithIndex_UponQueryingPrivateDocWithWrongIdentity_ShouldNotFetch(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test acp, querying private doc with wrong identity should not fetch", + Actions: []any{ + testUtils.AddPolicy{ + Identity: acpUtils.Actor1Identity, + Policy: userPolicy, + ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + }, + testUtils.SchemaUpdate{ + Schema: ` + type Users @policy( + id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + resource: "users" + ) { + name: String @index + age: Int + } + `, + }, + testUtils.CreateDoc{ + Doc: ` + { + "name": "Shahzad" + } + `, + }, + testUtils.CreateDoc{ + Identity: acpUtils.Actor1Identity, + Doc: ` + { + "name": "Islam" + } + `, + }, + testUtils.Request{ + Identity: acpUtils.Actor2Identity, + Request: ` + query { + Users { + name + } + }`, + Results: []map[string]any{ + { + "name": "Shahzad", + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/acp/index/query_with_relation_test.go b/tests/integration/acp/index/query_with_relation_test.go new file mode 100644 index 0000000000..614aaa6e84 --- /dev/null +++ b/tests/integration/acp/index/query_with_relation_test.go @@ -0,0 +1,319 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_acp_index + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + acpUtils "github.com/sourcenetwork/defradb/tests/integration/acp" +) + +func createAuthorBooksSchemaWithPolicyAndCreateDocs() []any { + return []any{ + testUtils.AddPolicy{ + Identity: acpUtils.Actor1Identity, + Policy: bookAuthorPolicy, + ExpectedPolicyID: "68a4e64d5034b8a0565a90cd36483de0d61e0ea2450cf57c1fa8d27cbbf17c2c", + }, + testUtils.SchemaUpdate{ + Schema: ` + type Author @policy( + id: "68a4e64d5034b8a0565a90cd36483de0d61e0ea2450cf57c1fa8d27cbbf17c2c", + resource: "author" + ) { + name: String + age: Int @index + verified: Boolean + published: [Book] + } + + type Book @policy( + id: "68a4e64d5034b8a0565a90cd36483de0d61e0ea2450cf57c1fa8d27cbbf17c2c", + resource: "author" + ) { + name: String + rating: Float @index + author: Author + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + Doc: `{ + "name": "John Grisham", + "age": 65, + "verified": true + }`, + }, + testUtils.CreateDoc{ + Identity: acpUtils.Actor1Identity, + CollectionID: 0, + // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + Doc: `{ + "name": "Cornelia Funke", + "age": 62, + "verified": false + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ + "name": "Painted House", + "rating": 4.9, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + }, + testUtils.CreateDoc{ + Identity: acpUtils.Actor1Identity, + CollectionID: 1, + Doc: `{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + }, + testUtils.CreateDoc{ + Identity: acpUtils.Actor1Identity, + CollectionID: 1, + Doc: `{ + "name": "Theif Lord", + "rating": 4.8, + "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + }`, + }, + } +} + +func TestACPWithIndex_UponQueryingPrivateOneToManyRelatedDocWithoutIdentity_ShouldNotFetch(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test ACP with index: upon querying private (one-to-many) related doc without identity should not fetch", + Actions: []any{ + createAuthorBooksSchemaWithPolicyAndCreateDocs(), + testUtils.Request{ + Request: ` + query { + Author(filter: { + published: {rating: {_gt: 3}} + }) { + name + published { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "John Grisham", + "published": []map[string]any{ + { + "name": "Painted House", + }, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestACPWithIndex_UponQueryingPrivateOneToManyRelatedDocWithIdentity_ShouldFetch(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test ACP with index: upon querying private (one-to-many) related doc with identity should fetch", + Actions: []any{ + createAuthorBooksSchemaWithPolicyAndCreateDocs(), + testUtils.Request{ + Identity: acpUtils.Actor1Identity, + Request: ` + query { + Author(filter: { + published: {rating: {_gt: 3}} + }) { + name + published { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "John Grisham", + "published": []map[string]any{ + { + "name": "Painted House", + }, + { + "name": "A Time for Mercy", + }, + }, + }, + { + "name": "Cornelia Funke", + "published": []map[string]any{ + { + "name": "Theif Lord", + }, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestACPWithIndex_UponQueryingPrivateOneToManyRelatedDocWithWrongIdentity_ShouldNotFetch(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test ACP with index: upon querying private (one-to-many) related doc with wrong identity should not fetch", + Actions: []any{ + createAuthorBooksSchemaWithPolicyAndCreateDocs(), + testUtils.Request{ + Identity: acpUtils.Actor2Identity, + Request: ` + query { + Author(filter: { + published: {rating: {_gt: 3}} + }) { + name + published { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "John Grisham", + "published": []map[string]any{ + { + "name": "Painted House", + }, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestACPWithIndex_UponQueryingPrivateManyToOneRelatedDocWithoutIdentity_ShouldNotFetch(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test ACP with index: upon querying private (many-to-one) related doc without identity should not fetch", + Actions: []any{ + createAuthorBooksSchemaWithPolicyAndCreateDocs(), + testUtils.Request{ + Request: ` + query { + Book(filter: { + author: {age: {_gt: 60}} + }) { + name + author { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Painted House", + "author": map[string]any{ + "name": "John Grisham", + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestACPWithIndex_UponQueryingPrivateManyToOneRelatedDocWithIdentity_ShouldFetch(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test ACP with index: upon querying private (many-to-one) related doc with identity should fetch", + Actions: []any{ + createAuthorBooksSchemaWithPolicyAndCreateDocs(), + testUtils.Request{ + Identity: acpUtils.Actor1Identity, + Request: ` + query { + Book(filter: { + author: {age: {_gt: 60}} + }) { + name + author { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Theif Lord", + "author": map[string]any{ + "name": "Cornelia Funke", + }, + }, + { + "name": "Painted House", + "author": map[string]any{ + "name": "John Grisham", + }, + }, + { + "name": "A Time for Mercy", + "author": map[string]any{ + "name": "John Grisham", + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestACPWithIndex_UponQueryingPrivateManyToOneRelatedDocWithWrongIdentity_ShouldNotFetch(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test ACP with index: upon querying private (many-to-one) related doc without identity should not fetch", + Actions: []any{ + createAuthorBooksSchemaWithPolicyAndCreateDocs(), + testUtils.Request{ + Identity: acpUtils.Actor2Identity, + Request: ` + query { + Book(filter: { + author: {age: {_gt: 60}} + }) { + name + author { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Painted House", + "author": map[string]any{ + "name": "John Grisham", + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} From 01e1b669c714e844a43c6021c1fc8fc8ef90a279 Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Fri, 10 May 2024 09:24:28 -0700 Subject: [PATCH 06/78] feat: Keyring (#2557) ## Relevant issue(s) Resolves #2556 ## Description This PR adds a keyring to the cli. Notable changes: - created `keyring` package - file backed keystore uses a password based key generation to encrypt - system backed keystore uses the OS managed keyring - added `keyring` root command - `--no-keyring` disables the keyring and generates ephemeral keys - `--keyring-backend` flag sets the keyring backend (file or system) - `--keyring-path` flag sets the keyring directory when using file backend - `--keyring-namespace` flag sets the service name when using system backend - added `keyring generate` generates required keys - `--no-encryption-key` skips generating encryption key (disables datastore encryption) - added `keyring import` import external keys (hexadecimal for now) - added `keyring export` export existing key (hexadecimal for now) - moved key generation to `crypto` package ## Tasks - [x] I made sure the code is well commented, particularly hard-to-understand areas. - [x] I made sure the repository-held documentation is changed accordingly. - [x] I made sure the pull request title adheres to the conventional commit style (the subset used in the project can be found in [tools/configs/chglog/config.yml](tools/configs/chglog/config.yml)). - [x] I made sure to discuss its limitations such as threats to validity, vulnerability to mistake and misuse, robustness to invalidation of assumptions, resource requirements, ... ## How has this been tested? Manually tested on MacOS. Specify the platform(s) on which this was tested: - MacOS --- .github/workflows/start-binary.yml | 2 +- .../workflows/test-and-upload-coverage.yml | 11 +++ README.md | 28 ++++++ cli/cli.go | 8 ++ cli/config.go | 5 + cli/config_test.go | 6 +- cli/errors.go | 12 +++ cli/keyring.go | 25 +++++ cli/keyring_export.go | 41 ++++++++ cli/keyring_export_test.go | 51 ++++++++++ cli/keyring_generate.go | 63 +++++++++++++ cli/keyring_generate_test.go | 52 ++++++++++ cli/keyring_import.go | 42 +++++++++ cli/keyring_import_test.go | 42 +++++++++ cli/root.go | 24 +++++ cli/start.go | 31 +++--- cli/utils.go | 63 ++++++------- crypto/crypto.go | 29 ++++++ docs/cli/defradb.md | 5 + docs/cli/defradb_client.md | 4 + docs/cli/defradb_client_acp.md | 4 + docs/cli/defradb_client_acp_policy.md | 4 + docs/cli/defradb_client_acp_policy_add.md | 4 + docs/cli/defradb_client_backup.md | 4 + docs/cli/defradb_client_backup_export.md | 4 + docs/cli/defradb_client_backup_import.md | 4 + docs/cli/defradb_client_collection.md | 4 + docs/cli/defradb_client_collection_create.md | 4 + docs/cli/defradb_client_collection_delete.md | 4 + .../cli/defradb_client_collection_describe.md | 4 + docs/cli/defradb_client_collection_docIDs.md | 4 + docs/cli/defradb_client_collection_get.md | 4 + docs/cli/defradb_client_collection_patch.md | 4 + docs/cli/defradb_client_collection_update.md | 4 + docs/cli/defradb_client_dump.md | 4 + docs/cli/defradb_client_index.md | 4 + docs/cli/defradb_client_index_create.md | 4 + docs/cli/defradb_client_index_drop.md | 4 + docs/cli/defradb_client_index_list.md | 4 + docs/cli/defradb_client_p2p.md | 4 + docs/cli/defradb_client_p2p_collection.md | 4 + docs/cli/defradb_client_p2p_collection_add.md | 4 + .../defradb_client_p2p_collection_getall.md | 4 + .../defradb_client_p2p_collection_remove.md | 4 + docs/cli/defradb_client_p2p_info.md | 4 + docs/cli/defradb_client_p2p_replicator.md | 4 + .../defradb_client_p2p_replicator_delete.md | 4 + .../defradb_client_p2p_replicator_getall.md | 4 + docs/cli/defradb_client_p2p_replicator_set.md | 4 + docs/cli/defradb_client_query.md | 4 + docs/cli/defradb_client_schema.md | 4 + docs/cli/defradb_client_schema_add.md | 4 + docs/cli/defradb_client_schema_describe.md | 4 + docs/cli/defradb_client_schema_migration.md | 4 + .../defradb_client_schema_migration_down.md | 4 + .../defradb_client_schema_migration_reload.md | 4 + ...db_client_schema_migration_set-registry.md | 4 + .../defradb_client_schema_migration_set.md | 4 + .../cli/defradb_client_schema_migration_up.md | 4 + docs/cli/defradb_client_schema_patch.md | 4 + docs/cli/defradb_client_schema_set-active.md | 4 + docs/cli/defradb_client_tx.md | 4 + docs/cli/defradb_client_tx_commit.md | 4 + docs/cli/defradb_client_tx_create.md | 4 + docs/cli/defradb_client_tx_discard.md | 4 + docs/cli/defradb_client_view.md | 4 + docs/cli/defradb_client_view_add.md | 4 + docs/cli/defradb_keyring.md | 49 ++++++++++ docs/cli/defradb_keyring_export.md | 53 +++++++++++ docs/cli/defradb_keyring_generate.md | 62 ++++++++++++ docs/cli/defradb_keyring_import.md | 53 +++++++++++ docs/cli/defradb_server-dump.md | 4 + docs/cli/defradb_start.md | 4 + docs/cli/defradb_version.md | 4 + docs/config.md | 19 ++++ go.mod | 14 ++- go.sum | 26 ++++- keyring/errors.go | 16 ++++ keyring/file.go | 94 +++++++++++++++++++ keyring/file_test.go | 51 ++++++++++ keyring/keyring.go | 27 ++++++ keyring/system.go | 55 +++++++++++ net/config.go | 5 +- net/node.go | 13 ++- node/store.go | 7 ++ tests/integration/db.go | 38 ++++++-- tests/integration/utils2.go | 5 +- 87 files changed, 1259 insertions(+), 72 deletions(-) create mode 100644 cli/keyring.go create mode 100644 cli/keyring_export.go create mode 100644 cli/keyring_export_test.go create mode 100644 cli/keyring_generate.go create mode 100644 cli/keyring_generate_test.go create mode 100644 cli/keyring_import.go create mode 100644 cli/keyring_import_test.go create mode 100644 crypto/crypto.go create mode 100644 docs/cli/defradb_keyring.md create mode 100644 docs/cli/defradb_keyring_export.md create mode 100644 docs/cli/defradb_keyring_generate.md create mode 100644 docs/cli/defradb_keyring_import.md create mode 100644 keyring/errors.go create mode 100644 keyring/file.go create mode 100644 keyring/file_test.go create mode 100644 keyring/keyring.go create mode 100644 keyring/system.go diff --git a/.github/workflows/start-binary.yml b/.github/workflows/start-binary.yml index 35fea3c022..7268faae7b 100644 --- a/.github/workflows/start-binary.yml +++ b/.github/workflows/start-binary.yml @@ -48,7 +48,7 @@ jobs: - name: Attempt to start binary run: | - ./build/defradb start & + ./build/defradb start --no-keyring & sleep 5 - name: Check if binary is still running diff --git a/.github/workflows/test-and-upload-coverage.yml b/.github/workflows/test-and-upload-coverage.yml index 491b674906..0541b9a331 100644 --- a/.github/workflows/test-and-upload-coverage.yml +++ b/.github/workflows/test-and-upload-coverage.yml @@ -35,17 +35,26 @@ jobs: database-type: [badger-file, badger-memory] mutation-type: [gql, collection-named, collection-save] detect-changes: [false] + database-encryption: [false] include: - os: ubuntu-latest client-type: go database-type: badger-memory mutation-type: collection-save detect-changes: true + database-encryption: false + - os: ubuntu-latest + client-type: go + database-type: badger-memory + mutation-type: collection-save + detect-changes: false + database-encryption: true - os: macos-latest client-type: go database-type: badger-memory mutation-type: collection-save detect-changes: false + database-encryption: false ## TODO: https://github.com/sourcenetwork/defradb/issues/2080 ## Uncomment the lines below to Re-enable the windows build once this todo is resolved. ## - os: windows-latest @@ -53,6 +62,7 @@ jobs: ## database-type: badger-memory ## mutation-type: collection-save ## detect-changes: false +## database-encryption: false runs-on: ${{ matrix.os }} @@ -68,6 +78,7 @@ jobs: DEFRA_CLIENT_CLI: ${{ matrix.client-type == 'cli' }} DEFRA_BADGER_MEMORY: ${{ matrix.database-type == 'badger-memory' }} DEFRA_BADGER_FILE: ${{ matrix.database-type == 'badger-file' }} + DEFRA_BADGER_ENCRYPTION: ${{ matrix.database-encryption }} DEFRA_MUTATION_TYPE: ${{ matrix.mutation-type }} steps: diff --git a/README.md b/README.md index 220c48f842..71e3ff856c 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,7 @@ Read the documentation on [docs.source.network](https://docs.source.network/). ## Table of Contents - [Install](#install) +- [Key Management](#key-management) - [Start](#start) - [Configuration](#configuration) - [External port binding](#external-port-binding) @@ -58,6 +59,33 @@ export PATH=$PATH:$(go env GOPATH)/bin We recommend experimenting with queries using a native GraphQL client. GraphiQL is a popular option - [download and install it](https://altairgraphql.dev/#download). +## Key Management + +DefraDB has a built in keyring that can be used to store private keys securely. + +The following keys are loaded from the keyring on start: + +- `peer-key` Ed25519 private key (required) +- `encryption-key` AES-128, AES-192, or AES-256 key (optional) + +To randomly generate the required keys, run the following command: + +``` +defradb keyring generate +``` + +To import externally generated keys, run the following command: + +``` +defradb keyring import +``` + +To learn more about the available options: + +``` +defradb keyring --help +``` + ## Start Start a node by executing `defradb start`. Keep the node running while going through the following examples. diff --git a/cli/cli.go b/cli/cli.go index 38209a9f69..7099f57f99 100644 --- a/cli/cli.go +++ b/cli/cli.go @@ -122,9 +122,17 @@ func NewDefraCommand() *cobra.Command { collection, ) + keyring := MakeKeyringCommand() + keyring.AddCommand( + MakeKeyringGenerateCommand(), + MakeKeyringImportCommand(), + MakeKeyringExportCommand(), + ) + root := MakeRootCommand() root.AddCommand( client, + keyring, MakeStartCommand(), MakeServerDumpCmd(), MakeVersionCommand(), diff --git a/cli/config.go b/cli/config.go index fd275a2d01..fe2a84ca2d 100644 --- a/cli/config.go +++ b/cli/config.go @@ -36,6 +36,7 @@ var configPaths = []string{ "datastore.badger.path", "api.pubkeypath", "api.privkeypath", + "keyring.path", } // configFlags is a mapping of config keys to cli flags to bind to. @@ -57,6 +58,10 @@ var configFlags = map[string]string{ "api.allowed-origins": "allowed-origins", "api.pubkeypath": "pubkeypath", "api.privkeypath": "privkeypath", + "keyring.namespace": "keyring-namespace", + "keyring.backend": "keyring-backend", + "keyring.path": "keyring-path", + "keyring.disabled": "no-keyring", } // defaultConfig returns a new config with default values. diff --git a/cli/config_test.go b/cli/config_test.go index 39a17d60fd..3cbd9aa687 100644 --- a/cli/config_test.go +++ b/cli/config_test.go @@ -36,7 +36,6 @@ func TestLoadConfigNotExist(t *testing.T) { require.NoError(t, err) assert.Equal(t, 5, cfg.GetInt("datastore.maxtxnretries")) - assert.Equal(t, filepath.Join(rootdir, "data"), cfg.GetString("datastore.badger.path")) assert.Equal(t, 1<<30, cfg.GetInt("datastore.badger.valuelogfilesize")) assert.Equal(t, "badger", cfg.GetString("datastore.store")) @@ -59,4 +58,9 @@ func TestLoadConfigNotExist(t *testing.T) { assert.Equal(t, false, cfg.GetBool("log.source")) assert.Equal(t, "", cfg.GetString("log.overrides")) assert.Equal(t, false, cfg.GetBool("log.nocolor")) + + assert.Equal(t, filepath.Join(rootdir, "keys"), cfg.GetString("keyring.path")) + assert.Equal(t, false, cfg.GetBool("keyring.disabled")) + assert.Equal(t, "defradb", cfg.GetString("keyring.namespace")) + assert.Equal(t, "file", cfg.GetString("keyring.backend")) } diff --git a/cli/errors.go b/cli/errors.go index 02cd252b59..504cb9ca25 100644 --- a/cli/errors.go +++ b/cli/errors.go @@ -16,6 +16,14 @@ import ( "github.com/sourcenetwork/defradb/errors" ) +const errKeyringHelp = `%w + +Did you forget to initialize the keyring? + +Use the following command to generate the required keys: + defradb keyring generate +` + const ( errInvalidLensConfig string = "invalid lens configuration" errSchemaVersionNotOfSchema string = "the given schema version is from a different schema" @@ -53,3 +61,7 @@ func NewErrSchemaVersionNotOfSchema(schemaRoot string, schemaVersionID string) e errors.NewKV("SchemaVersionID", schemaVersionID), ) } + +func NewErrKeyringHelp(inner error) error { + return fmt.Errorf(errKeyringHelp, inner) +} diff --git a/cli/keyring.go b/cli/keyring.go new file mode 100644 index 0000000000..c60440ccc9 --- /dev/null +++ b/cli/keyring.go @@ -0,0 +1,25 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" +) + +func MakeKeyringCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "keyring", + Short: "Manage DefraDB private keys", + Long: `Manage DefraDB private keys. +Generate, import, and export private keys.`, + } + return cmd +} diff --git a/cli/keyring_export.go b/cli/keyring_export.go new file mode 100644 index 0000000000..775672fc8a --- /dev/null +++ b/cli/keyring_export.go @@ -0,0 +1,41 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" +) + +func MakeKeyringExportCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "export ", + Short: "Export a private key", + Long: `Export a private key. +Prints the hexadecimal representation of a private key. + +Example: + defradb keyring export encryption-key`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + keyring, err := openKeyring(cmd) + if err != nil { + return err + } + keyBytes, err := keyring.Get(args[0]) + if err != nil { + return err + } + cmd.Printf("%x\n", keyBytes) + return nil + }, + } + return cmd +} diff --git a/cli/keyring_export_test.go b/cli/keyring_export_test.go new file mode 100644 index 0000000000..8631ff70ab --- /dev/null +++ b/cli/keyring_export_test.go @@ -0,0 +1,51 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "bytes" + "encoding/hex" + "strings" + "testing" + + "github.com/sourcenetwork/defradb/crypto" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestKeyringExport(t *testing.T) { + rootdir := t.TempDir() + readPassword = func(_ *cobra.Command, _ string) ([]byte, error) { + return []byte("secret"), nil + } + + keyBytes, err := crypto.GenerateAES256() + require.NoError(t, err) + keyHex := hex.EncodeToString(keyBytes) + + cmd := NewDefraCommand() + cmd.SetArgs([]string{"keyring", "import", "--rootdir", rootdir, encryptionKeyName, keyHex}) + + err = cmd.Execute() + require.NoError(t, err) + + var output bytes.Buffer + cmd.SetOut(&output) + cmd.SetArgs([]string{"keyring", "export", "--rootdir", rootdir, encryptionKeyName}) + + err = cmd.Execute() + require.NoError(t, err) + + actualKeyHex := strings.TrimSpace(output.String()) + assert.Equal(t, keyHex, actualKeyHex) +} diff --git a/cli/keyring_generate.go b/cli/keyring_generate.go new file mode 100644 index 0000000000..f9e073d0d5 --- /dev/null +++ b/cli/keyring_generate.go @@ -0,0 +1,63 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/crypto" +) + +func MakeKeyringGenerateCommand() *cobra.Command { + var noEncryption bool + var cmd = &cobra.Command{ + Use: "generate", + Short: "Generate private keys", + Long: `Generate private keys. +Randomly generate and store private keys in the keyring. + +WARNING: This will overwrite existing keys in the keyring. + +Example: + defradb keyring generate + +Example: with no encryption key + defradb keyring generate --no-encryption-key + +Example: with system keyring + defradb keyring generate --keyring-backend system`, + RunE: func(cmd *cobra.Command, args []string) error { + keyring, err := openKeyring(cmd) + if err != nil { + return err + } + if !noEncryption { + // generate optional encryption key + encryptionKey, err := crypto.GenerateAES256() + if err != nil { + return err + } + err = keyring.Set(encryptionKeyName, encryptionKey) + if err != nil { + return err + } + } + peerKey, err := crypto.GenerateEd25519() + if err != nil { + return err + } + return keyring.Set(peerKeyName, peerKey) + }, + } + cmd.Flags().BoolVar(&noEncryption, "no-encryption-key", false, + "Skip generating an encryption. Encryption at rest will be disabled") + return cmd +} diff --git a/cli/keyring_generate_test.go b/cli/keyring_generate_test.go new file mode 100644 index 0000000000..672760c2ef --- /dev/null +++ b/cli/keyring_generate_test.go @@ -0,0 +1,52 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "path/filepath" + "testing" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestKeyringGenerate(t *testing.T) { + rootdir := t.TempDir() + readPassword = func(_ *cobra.Command, _ string) ([]byte, error) { + return []byte("secret"), nil + } + + cmd := NewDefraCommand() + cmd.SetArgs([]string{"keyring", "generate", "--rootdir", rootdir}) + + err := cmd.Execute() + require.NoError(t, err) + + assert.FileExists(t, filepath.Join(rootdir, "keys", encryptionKeyName)) + assert.FileExists(t, filepath.Join(rootdir, "keys", peerKeyName)) +} + +func TestKeyringGenerateNoEncryptionKey(t *testing.T) { + rootdir := t.TempDir() + readPassword = func(_ *cobra.Command, _ string) ([]byte, error) { + return []byte("secret"), nil + } + + cmd := NewDefraCommand() + cmd.SetArgs([]string{"keyring", "generate", "--no-encryption-key", "--rootdir", rootdir}) + + err := cmd.Execute() + require.NoError(t, err) + + assert.NoFileExists(t, filepath.Join(rootdir, "keys", encryptionKeyName)) + assert.FileExists(t, filepath.Join(rootdir, "keys", peerKeyName)) +} diff --git a/cli/keyring_import.go b/cli/keyring_import.go new file mode 100644 index 0000000000..61f80f12a1 --- /dev/null +++ b/cli/keyring_import.go @@ -0,0 +1,42 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "encoding/hex" + + "github.com/spf13/cobra" +) + +func MakeKeyringImportCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "import ", + Short: "Import a private key", + Long: `Import a private key. +Store an externally generated key in the keyring. + +Example: + defradb keyring import encryption-key 0000000000000000`, + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + keyring, err := openKeyring(cmd) + if err != nil { + return err + } + keyBytes, err := hex.DecodeString(args[1]) + if err != nil { + return err + } + return keyring.Set(args[0], keyBytes) + }, + } + return cmd +} diff --git a/cli/keyring_import_test.go b/cli/keyring_import_test.go new file mode 100644 index 0000000000..dac907e000 --- /dev/null +++ b/cli/keyring_import_test.go @@ -0,0 +1,42 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "encoding/hex" + "path/filepath" + "testing" + + "github.com/sourcenetwork/defradb/crypto" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestKeyringImport(t *testing.T) { + rootdir := t.TempDir() + readPassword = func(_ *cobra.Command, _ string) ([]byte, error) { + return []byte("secret"), nil + } + + keyBytes, err := crypto.GenerateAES256() + require.NoError(t, err) + keyHex := hex.EncodeToString(keyBytes) + + cmd := NewDefraCommand() + cmd.SetArgs([]string{"keyring", "import", "--rootdir", rootdir, encryptionKeyName, keyHex}) + + err = cmd.Execute() + require.NoError(t, err) + + assert.FileExists(t, filepath.Join(rootdir, "keys", encryptionKeyName)) +} diff --git a/cli/root.go b/cli/root.go index 8fc8baf628..a974628eed 100644 --- a/cli/root.go +++ b/cli/root.go @@ -139,5 +139,29 @@ Start a DefraDB node, interact with a local or remote node, and much more. "Path to the private key for tls", ) + cmd.PersistentFlags().String( + "keyring-namespace", + "defradb", + "Service name to use when using the system backend", + ) + + cmd.PersistentFlags().String( + "keyring-backend", + "file", + "Keyring backend to use. Options are file or system", + ) + + cmd.PersistentFlags().String( + "keyring-path", + "keys", + "Path to store encrypted keys when using the file backend", + ) + + cmd.PersistentFlags().Bool( + "no-keyring", + false, + "Disable the keyring and generate ephemeral keys", + ) + return cmd } diff --git a/cli/start.go b/cli/start.go index 5c0e092c4a..a3d986a0a9 100644 --- a/cli/start.go +++ b/cli/start.go @@ -14,7 +14,6 @@ import ( "fmt" "os" "os/signal" - "path/filepath" "syscall" "github.com/libp2p/go-libp2p/core/peer" @@ -23,6 +22,7 @@ import ( "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/http" "github.com/sourcenetwork/defradb/internal/db" + "github.com/sourcenetwork/defradb/keyring" "github.com/sourcenetwork/defradb/net" netutils "github.com/sourcenetwork/defradb/net/utils" "github.com/sourcenetwork/defradb/node" @@ -84,23 +84,32 @@ func MakeStartCommand() *cobra.Command { } if cfg.GetString("datastore.store") != configStoreMemory { - // It would be ideal to not have the key path tied to the datastore. - // Running with memory store mode will always generate a random key. - // Adding support for an ephemeral mode and moving the key to the - // config would solve both of these issues. rootDir := mustGetContextRootDir(cmd) - key, err := loadOrGeneratePrivateKey(filepath.Join(rootDir, "data", "key")) - if err != nil { - return err - } - netOpts = append(netOpts, net.WithPrivateKey(key)) - // TODO-ACP: Infuture when we add support for the --no-acp flag when admin signatures are in, // we can allow starting of db without acp. Currently that can only be done programmatically. // https://github.com/sourcenetwork/defradb/issues/2271 dbOpts = append(dbOpts, db.WithACP(rootDir)) } + if !cfg.GetBool("keyring.disabled") { + kr, err := openKeyring(cmd) + if err != nil { + return NewErrKeyringHelp(err) + } + // load the required peer key + peerKey, err := kr.Get(peerKeyName) + if err != nil { + return NewErrKeyringHelp(err) + } + netOpts = append(netOpts, net.WithPrivateKey(peerKey)) + // load the optional encryption key + encryptionKey, err := kr.Get(encryptionKeyName) + if err != nil && !errors.Is(err, keyring.ErrNotFound) { + return err + } + storeOpts = append(storeOpts, node.WithEncryptionKey(encryptionKey)) + } + opts := []node.NodeOpt{ node.WithPeers(peers...), node.WithStoreOpts(storeOpts...), diff --git a/cli/utils.go b/cli/utils.go index 105323c33b..97b1f144ff 100644 --- a/cli/utils.go +++ b/cli/utils.go @@ -15,15 +15,22 @@ import ( "encoding/json" "os" "path/filepath" + "syscall" - "github.com/libp2p/go-libp2p/core/crypto" "github.com/spf13/cobra" "github.com/spf13/viper" + "golang.org/x/term" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/http" acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" "github.com/sourcenetwork/defradb/internal/db" + "github.com/sourcenetwork/defradb/keyring" +) + +const ( + peerKeyName = "peer-key" + encryptionKeyName = "encryption-key" ) type contextKey string @@ -42,6 +49,14 @@ var ( colContextKey = contextKey("col") ) +// readPassword reads a user input password without echoing it to the terminal. +var readPassword = func(cmd *cobra.Command, msg string) ([]byte, error) { + cmd.Print(msg) + pass, err := term.ReadPassword(int(syscall.Stdin)) + cmd.Println("") + return pass, err +} + // mustGetContextDB returns the db for the current command context. // // If a db is not set in the current context this function panics. @@ -153,44 +168,20 @@ func setContextRootDir(cmd *cobra.Command) error { return nil } -// loadOrGeneratePrivateKey loads the private key from the given path -// or generates a new key and writes it to a file at the given path. -func loadOrGeneratePrivateKey(path string) (crypto.PrivKey, error) { - key, err := loadPrivateKey(path) - if err == nil { - return key, nil - } - if os.IsNotExist(err) { - return generatePrivateKey(path) - } - return nil, err -} - -// generatePrivateKey generates a new private key and writes it -// to a file at the given path. -func generatePrivateKey(path string) (crypto.PrivKey, error) { - key, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 0) - if err != nil { - return nil, err - } - data, err := crypto.MarshalPrivateKey(key) - if err != nil { - return nil, err - } - err = os.MkdirAll(filepath.Dir(path), 0755) - if err != nil { - return nil, err +// openKeyring opens the keyring for the current environment. +func openKeyring(cmd *cobra.Command) (keyring.Keyring, error) { + cfg := mustGetContextConfig(cmd) + if cfg.Get("keyring.backend") == "system" { + return keyring.OpenSystemKeyring(cfg.GetString("keyring.namespace")), nil } - return key, os.WriteFile(path, data, 0644) -} - -// loadPrivateKey reads the private key from the file at the given path. -func loadPrivateKey(path string) (crypto.PrivKey, error) { - data, err := os.ReadFile(path) - if err != nil { + path := cfg.GetString("keyring.path") + if err := os.MkdirAll(path, 0755); err != nil { return nil, err } - return crypto.UnmarshalPrivateKey(data) + prompt := keyring.PromptFunc(func(s string) ([]byte, error) { + return readPassword(cmd, s) + }) + return keyring.OpenFileKeyring(path, prompt) } func writeJSON(cmd *cobra.Command, out any) error { diff --git a/crypto/crypto.go b/crypto/crypto.go new file mode 100644 index 0000000000..2e2bc333c1 --- /dev/null +++ b/crypto/crypto.go @@ -0,0 +1,29 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package crypto + +import ( + "crypto/ed25519" + "crypto/rand" +) + +// GenerateAES256 generates a new random AES-256 bit key. +func GenerateAES256() ([]byte, error) { + data := make([]byte, 32) + _, err := rand.Read(data) + return data, err +} + +// GenerateEd25519 generates a new random Ed25519 private key. +func GenerateEd25519() (ed25519.PrivateKey, error) { + _, priv, err := ed25519.GenerateKey(rand.Reader) + return priv, err +} diff --git a/docs/cli/defradb.md b/docs/cli/defradb.md index 602206e575..8c26dd86e3 100644 --- a/docs/cli/defradb.md +++ b/docs/cli/defradb.md @@ -14,6 +14,9 @@ Start a DefraDB node, interact with a local or remote node, and much more. ``` --allowed-origins stringArray List of origins to allow for CORS requests -h, --help help for defradb + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -22,6 +25,7 @@ Start a DefraDB node, interact with a local or remote node, and much more. --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to @@ -36,6 +40,7 @@ Start a DefraDB node, interact with a local or remote node, and much more. ### SEE ALSO * [defradb client](defradb_client.md) - Interact with a DefraDB node +* [defradb keyring](defradb_keyring.md) - Manage DefraDB private keys * [defradb server-dump](defradb_server-dump.md) - Dumps the state of the entire database * [defradb start](defradb_start.md) - Start a DefraDB node * [defradb version](defradb_version.md) - Display the version information of DefraDB and its components diff --git a/docs/cli/defradb_client.md b/docs/cli/defradb_client.md index 302e171dd3..c2a8e31972 100644 --- a/docs/cli/defradb_client.md +++ b/docs/cli/defradb_client.md @@ -19,6 +19,9 @@ Execute queries, add schema types, obtain node info, etc. ``` --allowed-origins stringArray List of origins to allow for CORS requests + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -27,6 +30,7 @@ Execute queries, add schema types, obtain node info, etc. --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_acp.md b/docs/cli/defradb_client_acp.md index d3f57ae230..a81ddea70c 100644 --- a/docs/cli/defradb_client_acp.md +++ b/docs/cli/defradb_client_acp.md @@ -21,6 +21,9 @@ Learn more about [ACP](/acp/README.md) ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -29,6 +32,7 @@ Learn more about [ACP](/acp/README.md) --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_acp_policy.md b/docs/cli/defradb_client_acp_policy.md index 2e659a0eb4..0ae3eb70c6 100644 --- a/docs/cli/defradb_client_acp_policy.md +++ b/docs/cli/defradb_client_acp_policy.md @@ -17,6 +17,9 @@ Interact with the acp policy features of DefraDB instance ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -25,6 +28,7 @@ Interact with the acp policy features of DefraDB instance --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_acp_policy_add.md b/docs/cli/defradb_client_acp_policy_add.md index f426909323..00a78f7b49 100644 --- a/docs/cli/defradb_client_acp_policy_add.md +++ b/docs/cli/defradb_client_acp_policy_add.md @@ -65,6 +65,9 @@ defradb client acp policy add [-i --identity] [policy] [flags] ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -73,6 +76,7 @@ defradb client acp policy add [-i --identity] [policy] [flags] --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_backup.md b/docs/cli/defradb_client_backup.md index ffa879365c..bf879df080 100644 --- a/docs/cli/defradb_client_backup.md +++ b/docs/cli/defradb_client_backup.md @@ -18,6 +18,9 @@ Currently only supports JSON format. ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -26,6 +29,7 @@ Currently only supports JSON format. --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_backup_export.md b/docs/cli/defradb_client_backup_export.md index fc05e8ee14..b922576e60 100644 --- a/docs/cli/defradb_client_backup_export.md +++ b/docs/cli/defradb_client_backup_export.md @@ -32,6 +32,9 @@ defradb client backup export [-c --collections | -p --pretty | -f --format] [flags] ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -32,6 +35,7 @@ defradb client backup import [flags] --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_collection.md b/docs/cli/defradb_client_collection.md index 59faa94f78..fa0eda42e8 100644 --- a/docs/cli/defradb_client_collection.md +++ b/docs/cli/defradb_client_collection.md @@ -22,6 +22,9 @@ Create, read, update, and delete documents within a collection. ``` --allowed-origins stringArray List of origins to allow for CORS requests + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -30,6 +33,7 @@ Create, read, update, and delete documents within a collection. --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_collection_create.md b/docs/cli/defradb_client_collection_create.md index b565c2a547..165dc72b54 100644 --- a/docs/cli/defradb_client_collection_create.md +++ b/docs/cli/defradb_client_collection_create.md @@ -39,6 +39,9 @@ defradb client collection create [-i --identity] [flags] --allowed-origins stringArray List of origins to allow for CORS requests --get-inactive Get inactive collections as well as active -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -48,6 +51,7 @@ defradb client collection create [-i --identity] [flags] --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) --name string Collection name + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_collection_delete.md b/docs/cli/defradb_client_collection_delete.md index 2bca8d7d8a..c3b2f8cd41 100644 --- a/docs/cli/defradb_client_collection_delete.md +++ b/docs/cli/defradb_client_collection_delete.md @@ -34,6 +34,9 @@ defradb client collection delete [-i --identity] [--filter --docID --docID [flags] --allowed-origins stringArray List of origins to allow for CORS requests --get-inactive Get inactive collections as well as active -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -39,6 +42,7 @@ defradb client collection get [-i --identity] [--show-deleted] [flags] --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) --name string Collection name + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_collection_patch.md b/docs/cli/defradb_client_collection_patch.md index c8540aa397..fba7cdcf0a 100644 --- a/docs/cli/defradb_client_collection_patch.md +++ b/docs/cli/defradb_client_collection_patch.md @@ -36,6 +36,9 @@ defradb client collection patch [patch] [flags] --allowed-origins stringArray List of origins to allow for CORS requests --get-inactive Get inactive collections as well as active -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -45,6 +48,7 @@ defradb client collection patch [patch] [flags] --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) --name string Collection name + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_collection_update.md b/docs/cli/defradb_client_collection_update.md index ab6b8999b0..cd906be969 100644 --- a/docs/cli/defradb_client_collection_update.md +++ b/docs/cli/defradb_client_collection_update.md @@ -41,6 +41,9 @@ defradb client collection update [-i --identity] [--filter --docID --docID --fields [-n - ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -42,6 +45,7 @@ defradb client index create -c --collection --fields [-n - --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_index_drop.md b/docs/cli/defradb_client_index_drop.md index 03b206c6cb..68ac4701ed 100644 --- a/docs/cli/defradb_client_index_drop.md +++ b/docs/cli/defradb_client_index_drop.md @@ -26,6 +26,9 @@ defradb client index drop -c --collection -n --name [flags] ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -34,6 +37,7 @@ defradb client index drop -c --collection -n --name [flags] --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_index_list.md b/docs/cli/defradb_client_index_list.md index 3c776f73ac..bec4c6a005 100644 --- a/docs/cli/defradb_client_index_list.md +++ b/docs/cli/defradb_client_index_list.md @@ -28,6 +28,9 @@ defradb client index list [-c --collection ] [flags] ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -36,6 +39,7 @@ defradb client index list [-c --collection ] [flags] --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_p2p.md b/docs/cli/defradb_client_p2p.md index 2506208717..23dae543e3 100644 --- a/docs/cli/defradb_client_p2p.md +++ b/docs/cli/defradb_client_p2p.md @@ -17,6 +17,9 @@ Interact with the DefraDB P2P system ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -25,6 +28,7 @@ Interact with the DefraDB P2P system --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_p2p_collection.md b/docs/cli/defradb_client_p2p_collection.md index a1de966445..318c259548 100644 --- a/docs/cli/defradb_client_p2p_collection.md +++ b/docs/cli/defradb_client_p2p_collection.md @@ -18,6 +18,9 @@ The selected collections synchronize their events on the pubsub network. ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -26,6 +29,7 @@ The selected collections synchronize their events on the pubsub network. --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_p2p_collection_add.md b/docs/cli/defradb_client_p2p_collection_add.md index 01bc79ca0f..149b69c5a8 100644 --- a/docs/cli/defradb_client_p2p_collection_add.md +++ b/docs/cli/defradb_client_p2p_collection_add.md @@ -29,6 +29,9 @@ defradb client p2p collection add [collectionIDs] [flags] ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -37,6 +40,7 @@ defradb client p2p collection add [collectionIDs] [flags] --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_p2p_collection_getall.md b/docs/cli/defradb_client_p2p_collection_getall.md index 8d10944ad2..1145bfac75 100644 --- a/docs/cli/defradb_client_p2p_collection_getall.md +++ b/docs/cli/defradb_client_p2p_collection_getall.md @@ -22,6 +22,9 @@ defradb client p2p collection getall [flags] ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -30,6 +33,7 @@ defradb client p2p collection getall [flags] --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_p2p_collection_remove.md b/docs/cli/defradb_client_p2p_collection_remove.md index 1cd6a14ee9..bab6859137 100644 --- a/docs/cli/defradb_client_p2p_collection_remove.md +++ b/docs/cli/defradb_client_p2p_collection_remove.md @@ -29,6 +29,9 @@ defradb client p2p collection remove [collectionIDs] [flags] ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -37,6 +40,7 @@ defradb client p2p collection remove [collectionIDs] [flags] --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_p2p_info.md b/docs/cli/defradb_client_p2p_info.md index 385780ad3d..b1ebf90922 100644 --- a/docs/cli/defradb_client_p2p_info.md +++ b/docs/cli/defradb_client_p2p_info.md @@ -21,6 +21,9 @@ defradb client p2p info [flags] ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -29,6 +32,7 @@ defradb client p2p info [flags] --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_p2p_replicator.md b/docs/cli/defradb_client_p2p_replicator.md index b9d5b561c7..e88e16d487 100644 --- a/docs/cli/defradb_client_p2p_replicator.md +++ b/docs/cli/defradb_client_p2p_replicator.md @@ -18,6 +18,9 @@ A replicator replicates one or all collection(s) from one node to another. ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -26,6 +29,7 @@ A replicator replicates one or all collection(s) from one node to another. --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_p2p_replicator_delete.md b/docs/cli/defradb_client_p2p_replicator_delete.md index 93e5ff6d95..07c4f4eb80 100644 --- a/docs/cli/defradb_client_p2p_replicator_delete.md +++ b/docs/cli/defradb_client_p2p_replicator_delete.md @@ -27,6 +27,9 @@ defradb client p2p replicator delete [-c, --collection] [flags] ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -35,6 +38,7 @@ defradb client p2p replicator delete [-c, --collection] [flags] --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_p2p_replicator_getall.md b/docs/cli/defradb_client_p2p_replicator_getall.md index cc9cc1ed63..39d41ac3d7 100644 --- a/docs/cli/defradb_client_p2p_replicator_getall.md +++ b/docs/cli/defradb_client_p2p_replicator_getall.md @@ -26,6 +26,9 @@ defradb client p2p replicator getall [flags] ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -34,6 +37,7 @@ defradb client p2p replicator getall [flags] --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_p2p_replicator_set.md b/docs/cli/defradb_client_p2p_replicator_set.md index 4fbc980a7c..4d8cb7515a 100644 --- a/docs/cli/defradb_client_p2p_replicator_set.md +++ b/docs/cli/defradb_client_p2p_replicator_set.md @@ -27,6 +27,9 @@ defradb client p2p replicator set [-c, --collection] [flags] ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -35,6 +38,7 @@ defradb client p2p replicator set [-c, --collection] [flags] --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_query.md b/docs/cli/defradb_client_query.md index 493acca2d4..f5e0035273 100644 --- a/docs/cli/defradb_client_query.md +++ b/docs/cli/defradb_client_query.md @@ -39,6 +39,9 @@ defradb client query [-i --identity] [request] [flags] ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -47,6 +50,7 @@ defradb client query [-i --identity] [request] [flags] --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_schema.md b/docs/cli/defradb_client_schema.md index 2e144a89e6..8f5a0896e3 100644 --- a/docs/cli/defradb_client_schema.md +++ b/docs/cli/defradb_client_schema.md @@ -17,6 +17,9 @@ Make changes, updates, or look for existing schema types. ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -25,6 +28,7 @@ Make changes, updates, or look for existing schema types. --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_schema_add.md b/docs/cli/defradb_client_schema_add.md index 0ff3f683f4..bf06821cdb 100644 --- a/docs/cli/defradb_client_schema_add.md +++ b/docs/cli/defradb_client_schema_add.md @@ -38,6 +38,9 @@ defradb client schema add [schema] [flags] ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -46,6 +49,7 @@ defradb client schema add [schema] [flags] --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_schema_describe.md b/docs/cli/defradb_client_schema_describe.md index 0b28a1e64e..8048411b57 100644 --- a/docs/cli/defradb_client_schema_describe.md +++ b/docs/cli/defradb_client_schema_describe.md @@ -37,6 +37,9 @@ defradb client schema describe [flags] ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -45,6 +48,7 @@ defradb client schema describe [flags] --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_schema_migration.md b/docs/cli/defradb_client_schema_migration.md index c339763571..5e815aaff0 100644 --- a/docs/cli/defradb_client_schema_migration.md +++ b/docs/cli/defradb_client_schema_migration.md @@ -17,6 +17,9 @@ Make set or look for existing schema migrations on a DefraDB node. ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -25,6 +28,7 @@ Make set or look for existing schema migrations on a DefraDB node. --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_schema_migration_down.md b/docs/cli/defradb_client_schema_migration_down.md index f741f5bec9..a64c00a0f5 100644 --- a/docs/cli/defradb_client_schema_migration_down.md +++ b/docs/cli/defradb_client_schema_migration_down.md @@ -34,6 +34,9 @@ defradb client schema migration down --collection [fl ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -42,6 +45,7 @@ defradb client schema migration down --collection [fl --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_schema_migration_reload.md b/docs/cli/defradb_client_schema_migration_reload.md index 8a1d8480c0..1ef1213f56 100644 --- a/docs/cli/defradb_client_schema_migration_reload.md +++ b/docs/cli/defradb_client_schema_migration_reload.md @@ -21,6 +21,9 @@ defradb client schema migration reload [flags] ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -29,6 +32,7 @@ defradb client schema migration reload [flags] --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_schema_migration_set-registry.md b/docs/cli/defradb_client_schema_migration_set-registry.md index ebb4c625c7..cd5325457d 100644 --- a/docs/cli/defradb_client_schema_migration_set-registry.md +++ b/docs/cli/defradb_client_schema_migration_set-registry.md @@ -27,6 +27,9 @@ defradb client schema migration set-registry [collectionID] [cfg] [flags] ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -35,6 +38,7 @@ defradb client schema migration set-registry [collectionID] [cfg] [flags] --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_schema_migration_set.md b/docs/cli/defradb_client_schema_migration_set.md index 8386fd8369..c0353f9622 100644 --- a/docs/cli/defradb_client_schema_migration_set.md +++ b/docs/cli/defradb_client_schema_migration_set.md @@ -34,6 +34,9 @@ defradb client schema migration set [src] [dst] [cfg] [flags] ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -42,6 +45,7 @@ defradb client schema migration set [src] [dst] [cfg] [flags] --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_schema_migration_up.md b/docs/cli/defradb_client_schema_migration_up.md index b55ace45ad..3ce2862447 100644 --- a/docs/cli/defradb_client_schema_migration_up.md +++ b/docs/cli/defradb_client_schema_migration_up.md @@ -34,6 +34,9 @@ defradb client schema migration up --collection [flag ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -42,6 +45,7 @@ defradb client schema migration up --collection [flag --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_schema_patch.md b/docs/cli/defradb_client_schema_patch.md index 7d16e632ae..cae05bd26b 100644 --- a/docs/cli/defradb_client_schema_patch.md +++ b/docs/cli/defradb_client_schema_patch.md @@ -37,6 +37,9 @@ defradb client schema patch [schema] [migration] [flags] ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -45,6 +48,7 @@ defradb client schema patch [schema] [migration] [flags] --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_schema_set-active.md b/docs/cli/defradb_client_schema_set-active.md index 7f7b4f4cd5..a2e7dd82ad 100644 --- a/docs/cli/defradb_client_schema_set-active.md +++ b/docs/cli/defradb_client_schema_set-active.md @@ -22,6 +22,9 @@ defradb client schema set-active [versionID] [flags] ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -30,6 +33,7 @@ defradb client schema set-active [versionID] [flags] --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_tx.md b/docs/cli/defradb_client_tx.md index 67bf63e2df..f7bc69f3ea 100644 --- a/docs/cli/defradb_client_tx.md +++ b/docs/cli/defradb_client_tx.md @@ -17,6 +17,9 @@ Create, commit, and discard DefraDB transactions ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -25,6 +28,7 @@ Create, commit, and discard DefraDB transactions --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_tx_commit.md b/docs/cli/defradb_client_tx_commit.md index eba408dc57..3e854427dd 100644 --- a/docs/cli/defradb_client_tx_commit.md +++ b/docs/cli/defradb_client_tx_commit.md @@ -21,6 +21,9 @@ defradb client tx commit [id] [flags] ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -29,6 +32,7 @@ defradb client tx commit [id] [flags] --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_tx_create.md b/docs/cli/defradb_client_tx_create.md index 26668e6ad2..a610a249a9 100644 --- a/docs/cli/defradb_client_tx_create.md +++ b/docs/cli/defradb_client_tx_create.md @@ -23,6 +23,9 @@ defradb client tx create [flags] ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -31,6 +34,7 @@ defradb client tx create [flags] --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_tx_discard.md b/docs/cli/defradb_client_tx_discard.md index 3989bc4c05..0f856f3855 100644 --- a/docs/cli/defradb_client_tx_discard.md +++ b/docs/cli/defradb_client_tx_discard.md @@ -21,6 +21,9 @@ defradb client tx discard [id] [flags] ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -29,6 +32,7 @@ defradb client tx discard [id] [flags] --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_view.md b/docs/cli/defradb_client_view.md index 09c5bab11b..c81fec50e0 100644 --- a/docs/cli/defradb_client_view.md +++ b/docs/cli/defradb_client_view.md @@ -17,6 +17,9 @@ Manage (add) views withing a running DefraDB instance ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -25,6 +28,7 @@ Manage (add) views withing a running DefraDB instance --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_client_view_add.md b/docs/cli/defradb_client_view_add.md index b671d8290c..58c0ea4596 100644 --- a/docs/cli/defradb_client_view_add.md +++ b/docs/cli/defradb_client_view_add.md @@ -27,6 +27,9 @@ defradb client view add [query] [sdl] [transform] [flags] ``` --allowed-origins stringArray List of origins to allow for CORS requests -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -35,6 +38,7 @@ defradb client view add [query] [sdl] [transform] [flags] --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_keyring.md b/docs/cli/defradb_keyring.md new file mode 100644 index 0000000000..ca26cf1e8b --- /dev/null +++ b/docs/cli/defradb_keyring.md @@ -0,0 +1,49 @@ +## defradb keyring + +Manage DefraDB private keys + +### Synopsis + +Manage DefraDB private keys. +Generate, import, and export private keys. + +### Options + +``` + -h, --help help for keyring +``` + +### Options inherited from parent commands + +``` + --allowed-origins stringArray List of origins to allow for CORS requests + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-no-color Disable colored log output + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) +``` + +### SEE ALSO + +* [defradb](defradb.md) - DefraDB Edge Database +* [defradb keyring export](defradb_keyring_export.md) - Export a private key +* [defradb keyring generate](defradb_keyring_generate.md) - Generate private keys +* [defradb keyring import](defradb_keyring_import.md) - Import a private key + diff --git a/docs/cli/defradb_keyring_export.md b/docs/cli/defradb_keyring_export.md new file mode 100644 index 0000000000..8292ae9c1d --- /dev/null +++ b/docs/cli/defradb_keyring_export.md @@ -0,0 +1,53 @@ +## defradb keyring export + +Export a private key + +### Synopsis + +Export a private key. +Prints the hexadecimal representation of a private key. + +Example: + defradb keyring export encryption-key + +``` +defradb keyring export [flags] +``` + +### Options + +``` + -h, --help help for export +``` + +### Options inherited from parent commands + +``` + --allowed-origins stringArray List of origins to allow for CORS requests + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-no-color Disable colored log output + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) +``` + +### SEE ALSO + +* [defradb keyring](defradb_keyring.md) - Manage DefraDB private keys + diff --git a/docs/cli/defradb_keyring_generate.md b/docs/cli/defradb_keyring_generate.md new file mode 100644 index 0000000000..e7d9a34f47 --- /dev/null +++ b/docs/cli/defradb_keyring_generate.md @@ -0,0 +1,62 @@ +## defradb keyring generate + +Generate private keys + +### Synopsis + +Generate private keys. +Randomly generate and store private keys in the keyring. + +WARNING: This will overwrite existing keys in the keyring. + +Example: + defradb keyring generate + +Example: with no encryption key + defradb keyring generate --no-encryption-key + +Example: with system keyring + defradb keyring generate --keyring-backend system + +``` +defradb keyring generate [flags] +``` + +### Options + +``` + -h, --help help for generate + --no-encryption-key Skip generating an encryption. Encryption at rest will be disabled +``` + +### Options inherited from parent commands + +``` + --allowed-origins stringArray List of origins to allow for CORS requests + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-no-color Disable colored log output + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) +``` + +### SEE ALSO + +* [defradb keyring](defradb_keyring.md) - Manage DefraDB private keys + diff --git a/docs/cli/defradb_keyring_import.md b/docs/cli/defradb_keyring_import.md new file mode 100644 index 0000000000..0b93048185 --- /dev/null +++ b/docs/cli/defradb_keyring_import.md @@ -0,0 +1,53 @@ +## defradb keyring import + +Import a private key + +### Synopsis + +Import a private key. +Store an externally generated key in the keyring. + +Example: + defradb keyring import encryption-key 0000000000000000 + +``` +defradb keyring import [flags] +``` + +### Options + +``` + -h, --help help for import +``` + +### Options inherited from parent commands + +``` + --allowed-origins stringArray List of origins to allow for CORS requests + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-no-color Disable colored log output + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) +``` + +### SEE ALSO + +* [defradb keyring](defradb_keyring.md) - Manage DefraDB private keys + diff --git a/docs/cli/defradb_server-dump.md b/docs/cli/defradb_server-dump.md index 3651d32e9c..5973f12642 100644 --- a/docs/cli/defradb_server-dump.md +++ b/docs/cli/defradb_server-dump.md @@ -16,6 +16,9 @@ defradb server-dump [flags] ``` --allowed-origins stringArray List of origins to allow for CORS requests + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -24,6 +27,7 @@ defradb server-dump [flags] --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_start.md b/docs/cli/defradb_start.md index e0f732cb04..a9f9010a17 100644 --- a/docs/cli/defradb_start.md +++ b/docs/cli/defradb_start.md @@ -20,6 +20,9 @@ defradb start [flags] ``` --allowed-origins stringArray List of origins to allow for CORS requests + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -28,6 +31,7 @@ defradb start [flags] --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/cli/defradb_version.md b/docs/cli/defradb_version.md index b4693fddbf..123441769b 100644 --- a/docs/cli/defradb_version.md +++ b/docs/cli/defradb_version.md @@ -18,6 +18,9 @@ defradb version [flags] ``` --allowed-origins stringArray List of origins to allow for CORS requests + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") --log-format string Log format to use. Options are text or json (default "text") --log-level string Log level to use. Options are debug, info, error, fatal (default "info") --log-no-color Disable colored log output @@ -26,6 +29,7 @@ defradb version [flags] --log-source Include source location in logs --log-stacktrace Include stacktrace in error and fatal logs --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to diff --git a/docs/config.md b/docs/config.md index da46700bb7..316b6d9aa0 100644 --- a/docs/config.md +++ b/docs/config.md @@ -92,3 +92,22 @@ Logger config overrides. Format `,=,...;,...`. ## `log.nocolor` Disable colored log output. Defaults to `false`. + +## `keyring.path` + +Path to store encrypted key files in. Defaults to `keys`. + +## `keyring.disabled` + +Disable the keyring and generate ephemeral keys instead. Defaults to `false`. + +## `keyring.namespace` + +The service name to use when using the system keyring. Defaults to `defradb`. + +## `keyring.backend` + +Keyring backend to use. Defaults to `file`. + +- `file` Stores keys in encrypted files +- `system` Stores keys in the OS managed keyring diff --git a/go.mod b/go.mod index 9d236c86de..5bcc1757a0 100644 --- a/go.mod +++ b/go.mod @@ -22,6 +22,7 @@ require ( github.com/ipfs/go-log/v2 v2.5.1 github.com/jbenet/goprocess v0.1.4 github.com/lens-vm/lens/host-go v0.0.0-20231127204031-8d858ed2926c + github.com/lestrrat-go/jwx/v2 v2.0.21 github.com/libp2p/go-libp2p v0.33.2 github.com/libp2p/go-libp2p-gostream v0.6.0 github.com/libp2p/go-libp2p-kad-dht v0.25.2 @@ -44,10 +45,12 @@ require ( github.com/ugorji/go/codec v1.2.12 github.com/valyala/fastjson v1.6.4 github.com/vito/go-sse v1.0.0 + github.com/zalando/go-keyring v0.2.4 go.opentelemetry.io/otel/metric v1.26.0 go.opentelemetry.io/otel/sdk/metric v1.26.0 go.uber.org/zap v1.27.0 golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 + golang.org/x/term v0.19.0 google.golang.org/grpc v1.63.2 google.golang.org/protobuf v1.33.0 ) @@ -70,6 +73,7 @@ require ( github.com/DataDog/zstd v1.5.5 // indirect github.com/Jorropo/jsync v1.0.1 // indirect github.com/NathanBaulch/protoc-gen-cobra v1.2.1 // indirect + github.com/alessio/shellescape v1.4.1 // indirect github.com/awalterschulze/gographviz v2.0.3+incompatible // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -112,7 +116,7 @@ require ( github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/dvsekhvalnov/jose2go v1.6.0 // indirect + github.com/dvsekhvalnov/jose2go v1.7.0 // indirect github.com/elastic/gosigar v0.14.2 // indirect github.com/emicklei/dot v1.6.1 // indirect github.com/fatih/color v1.15.0 // indirect @@ -130,6 +134,7 @@ require ( github.com/go-openapi/jsonpointer v0.20.2 // indirect github.com/go-openapi/swag v0.22.8 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/goccy/go-json v0.10.2 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/googleapis v1.4.1 // indirect @@ -195,6 +200,11 @@ require ( github.com/koron/go-ssdp v0.0.4 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect + github.com/lestrrat-go/blackmagic v1.0.2 // indirect + github.com/lestrrat-go/httpcc v1.0.1 // indirect + github.com/lestrrat-go/httprc v1.0.5 // indirect + github.com/lestrrat-go/iter v1.0.2 // indirect + github.com/lestrrat-go/option v1.0.1 // indirect github.com/lib/pq v1.10.7 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect @@ -261,6 +271,7 @@ require ( github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sasha-s/go-deadlock v0.3.1 // indirect + github.com/segmentio/asm v1.2.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/sourcenetwork/raccoondb v0.2.0 // indirect github.com/sourcenetwork/zanzi v0.3.0 // indirect @@ -295,7 +306,6 @@ require ( golang.org/x/net v0.23.0 // indirect golang.org/x/sync v0.6.0 // indirect golang.org/x/sys v0.19.0 // indirect - golang.org/x/term v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.18.0 // indirect gonum.org/v1/gonum v0.14.0 // indirect diff --git a/go.sum b/go.sum index cbe3eea36c..3043ce7aa4 100644 --- a/go.sum +++ b/go.sum @@ -70,6 +70,8 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= +github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -257,8 +259,8 @@ github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:Htrtb github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= -github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/dvsekhvalnov/jose2go v1.7.0 h1:bnQc8+GMnidJZA8zc6lLEAb4xNrIqHwO+9TzqvtQZPo= +github.com/dvsekhvalnov/jose2go v1.7.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= @@ -378,8 +380,8 @@ github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6Wezm github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= github.com/gobwas/ws v1.2.1 h1:F2aeBZrm2NDsc7vbovKrWSogd4wvfAxg0FQ89/iqOTk= github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= -github.com/goccy/go-json v0.9.11 h1:/pAaQDLHEoCq/5FFmSKBswWmK6H0e8g4159Kc/X/nqk= -github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -696,6 +698,18 @@ github.com/lens-vm/lens/host-go v0.0.0-20231127204031-8d858ed2926c/go.mod h1:a4e github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= +github.com/lestrrat-go/blackmagic v1.0.2 h1:Cg2gVSc9h7sz9NOByczrbUvLopQmXrfFx//N+AkAr5k= +github.com/lestrrat-go/blackmagic v1.0.2/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU= +github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE= +github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= +github.com/lestrrat-go/httprc v1.0.5 h1:bsTfiH8xaKOJPrg1R+E3iE/AWZr/x0Phj9PBTG/OLUk= +github.com/lestrrat-go/httprc v1.0.5/go.mod h1:mwwz3JMTPBjHUkkDv/IGJ39aALInZLrhBp0X7KGUZlo= +github.com/lestrrat-go/iter v1.0.2 h1:gMXo1q4c2pHmC3dn8LzRhJfP1ceCbgSiT9lUydIzltI= +github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4= +github.com/lestrrat-go/jwx/v2 v2.0.21 h1:jAPKupy4uHgrHFEdjVjNkUgoBKtVDgrQPB/h55FHrR0= +github.com/lestrrat-go/jwx/v2 v2.0.21/go.mod h1:09mLW8zto6bWL9GbwnqAli+ArLf+5M33QLQPDggkUWM= +github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU= +github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= @@ -1009,6 +1023,8 @@ github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0 github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= +github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= @@ -1171,6 +1187,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zalando/go-keyring v0.2.4 h1:wi2xxTqdiwMKbM6TWwi+uJCG/Tum2UV0jqaQhCa9/68= +github.com/zalando/go-keyring v0.2.4/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk= github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= github.com/zondax/ledger-go v0.14.3 h1:wEpJt2CEcBJ428md/5MgSLsXLBos98sBOyxNmCjfUCw= diff --git a/keyring/errors.go b/keyring/errors.go new file mode 100644 index 0000000000..724d36612c --- /dev/null +++ b/keyring/errors.go @@ -0,0 +1,16 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keyring + +import "github.com/zalando/go-keyring" + +// ErrNotFound is returned when a keyring item is not found. +var ErrNotFound = keyring.ErrNotFound diff --git a/keyring/file.go b/keyring/file.go new file mode 100644 index 0000000000..61191b3285 --- /dev/null +++ b/keyring/file.go @@ -0,0 +1,94 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keyring + +import ( + "os" + "path/filepath" + + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/jwx/v2/jwe" +) + +var _ Keyring = (*fileKeyring)(nil) + +var keyEncryptionAlgorithm = jwa.PBES2_HS512_A256KW + +// fileKeyring is a keyring that stores keys in encrypted files. +type fileKeyring struct { + // dir is the keystore root directory + dir string + // password is the user defined password used to generate encryption keys + password []byte + // prompt func is used to retrieve the user password + prompt PromptFunc +} + +// PromptFunc is a callback used to retrieve the user's password. +type PromptFunc func(s string) ([]byte, error) + +// OpenFileKeyring opens the keyring in the given directory. +func OpenFileKeyring(dir string, prompt PromptFunc) (*fileKeyring, error) { + if err := os.MkdirAll(dir, 0755); err != nil { + return nil, err + } + return &fileKeyring{ + dir: dir, + prompt: prompt, + }, nil +} + +func (f *fileKeyring) Set(name string, key []byte) error { + password, err := f.promptPassword() + if err != nil { + return err + } + cipher, err := jwe.Encrypt(key, jwe.WithKey(keyEncryptionAlgorithm, password)) + if err != nil { + return err + } + return os.WriteFile(filepath.Join(f.dir, name), cipher, 0755) +} + +func (f *fileKeyring) Get(name string) ([]byte, error) { + cipher, err := os.ReadFile(filepath.Join(f.dir, name)) + if os.IsNotExist(err) { + return nil, ErrNotFound + } + password, err := f.promptPassword() + if err != nil { + return nil, err + } + return jwe.Decrypt(cipher, jwe.WithKey(keyEncryptionAlgorithm, password)) +} + +func (f *fileKeyring) Delete(user string) error { + err := os.Remove(filepath.Join(f.dir, user)) + if os.IsNotExist(err) { + return ErrNotFound + } + return err +} + +// promptPassword returns the password from the user. +// +// If the password has been previously prompted it will be remembered. +func (f *fileKeyring) promptPassword() ([]byte, error) { + if len(f.password) > 0 { + return f.password, nil + } + password, err := f.prompt("Enter keystore password:") + if err != nil { + return nil, err + } + f.password = password + return password, nil +} diff --git a/keyring/file_test.go b/keyring/file_test.go new file mode 100644 index 0000000000..f3aa3529b1 --- /dev/null +++ b/keyring/file_test.go @@ -0,0 +1,51 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keyring + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/zalando/go-keyring" +) + +func TestFileKeyring(t *testing.T) { + prompt := PromptFunc(func(s string) ([]byte, error) { + return []byte("secret"), nil + }) + + kr, err := OpenFileKeyring(t.TempDir(), prompt) + require.NoError(t, err) + + err = kr.Set("peer_key", []byte("abc")) + require.NoError(t, err) + + // password should be remembered + assert.Equal(t, []byte("secret"), kr.password) + + err = kr.Set("node_key", []byte("123")) + require.NoError(t, err) + + peerKey, err := kr.Get("peer_key") + require.NoError(t, err) + assert.Equal(t, []byte("abc"), peerKey) + + nodeKey, err := kr.Get("node_key") + require.NoError(t, err) + assert.Equal(t, []byte("123"), nodeKey) + + err = kr.Delete("node_key") + require.NoError(t, err) + + _, err = kr.Get("node_key") + assert.ErrorIs(t, err, keyring.ErrNotFound) +} diff --git a/keyring/keyring.go b/keyring/keyring.go new file mode 100644 index 0000000000..603c25bb78 --- /dev/null +++ b/keyring/keyring.go @@ -0,0 +1,27 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keyring + +// Keyring provides a simple set/get interface for a keyring service. +type Keyring interface { + // Set stores the given key in the keystore under the given name. + // + // If a key with the given name already exists it will be overriden. + Set(name string, key []byte) error + // Get returns the key with the given name from the keystore. + // + // If a key with the given name does not exist `ErrNotFound` is returned. + Get(name string) ([]byte, error) + // Delete removes the key with the given name from the keystore. + // + // If a key with that name does not exist `ErrNotFound` is returned. + Delete(name string) error +} diff --git a/keyring/system.go b/keyring/system.go new file mode 100644 index 0000000000..81575b5501 --- /dev/null +++ b/keyring/system.go @@ -0,0 +1,55 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keyring + +import ( + "encoding/base64" + + "github.com/zalando/go-keyring" +) + +var _ Keyring = (*systemKeyring)(nil) + +// systemKeyring is a keyring that utilizies the +// built in key management system of the OS. +type systemKeyring struct { + // service is the service name to use when using the system keyring + service string +} + +// OpenSystemKeyring opens the system keyring managed by the OS. +func OpenSystemKeyring(service string) *systemKeyring { + return &systemKeyring{ + service: service, + } +} + +func (s *systemKeyring) Set(name string, key []byte) error { + enc := base64.StdEncoding.EncodeToString(key) + return keyring.Set(s.service, name, enc) +} + +func (s *systemKeyring) Get(name string) ([]byte, error) { + enc, err := keyring.Get(s.service, name) + if err != nil { + return nil, err + } + dst := make([]byte, base64.StdEncoding.DecodedLen(len(enc))) + n, err := base64.StdEncoding.Decode(dst, []byte(enc)) + if err != nil { + return nil, err + } + return dst[:n], nil +} + +func (s *systemKeyring) Delete(user string) error { + return keyring.Delete(s.service, user) +} diff --git a/net/config.go b/net/config.go index 040689bbe9..d2a3039d4a 100644 --- a/net/config.go +++ b/net/config.go @@ -13,14 +13,13 @@ package net import ( - "github.com/libp2p/go-libp2p/core/crypto" "google.golang.org/grpc" ) // Options is the node options. type Options struct { ListenAddresses []string - PrivateKey crypto.PrivKey + PrivateKey []byte EnablePubSub bool EnableRelay bool GRPCServerOptions []grpc.ServerOption @@ -39,7 +38,7 @@ func DefaultOptions() *Options { type NodeOpt func(*Options) // WithPrivateKey sets the p2p host private key. -func WithPrivateKey(priv crypto.PrivKey) NodeOpt { +func WithPrivateKey(priv []byte) NodeOpt { return func(opt *Options) { opt.PrivateKey = priv } diff --git a/net/node.go b/net/node.go index a52e296712..7683d3fb8f 100644 --- a/net/node.go +++ b/net/node.go @@ -30,7 +30,7 @@ import ( dualdht "github.com/libp2p/go-libp2p-kad-dht/dual" pubsub "github.com/libp2p/go-libp2p-pubsub" record "github.com/libp2p/go-libp2p-record" - "github.com/libp2p/go-libp2p/core/crypto" + libp2pCrypto "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" @@ -47,6 +47,7 @@ import ( "github.com/libp2p/go-libp2p/p2p/net/connmgr" "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/crypto" ) var evtWaitTimeout = 10 * time.Second @@ -108,19 +109,25 @@ func NewNode( if options.PrivateKey == nil { // generate an ephemeral private key - key, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 0) + key, err := crypto.GenerateEd25519() if err != nil { return nil, fin.Cleanup(err) } options.PrivateKey = key } + // unmarshal the private key bytes + privateKey, err := libp2pCrypto.UnmarshalEd25519PrivateKey(options.PrivateKey) + if err != nil { + return nil, fin.Cleanup(err) + } + var ddht *dualdht.DHT libp2pOpts := []libp2p.Option{ libp2p.ConnectionManager(connManager), libp2p.DefaultTransports, - libp2p.Identity(options.PrivateKey), + libp2p.Identity(privateKey), libp2p.ListenAddrs(listenAddresses...), libp2p.Peerstore(peerstore), libp2p.Routing(func(h host.Host) (routing.PeerRouting, error) { diff --git a/node/store.go b/node/store.go index 6d05954662..8e15b7aab1 100644 --- a/node/store.go +++ b/node/store.go @@ -74,5 +74,12 @@ func NewStore(opts ...StoreOpt) (datastore.RootStore, error) { badgerOpts.ValueLogFileSize = options.valueLogFileSize badgerOpts.EncryptionKey = options.encryptionKey + if len(options.encryptionKey) > 0 { + // Having a cache improves the performance. + // Otherwise, your reads would be very slow while encryption is enabled. + // https://dgraph.io/docs/badger/get-started/#encryption-mode + badgerOpts.IndexCacheSize = 100 << 20 + } + return badger.NewDatastore(options.path, &badgerOpts) } diff --git a/tests/integration/db.go b/tests/integration/db.go index ff5fb0060c..d565030b3e 100644 --- a/tests/integration/db.go +++ b/tests/integration/db.go @@ -20,6 +20,7 @@ import ( badger "github.com/sourcenetwork/badger/v4" "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/crypto" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" "github.com/sourcenetwork/defradb/datastore/memory" "github.com/sourcenetwork/defradb/internal/db" @@ -29,10 +30,11 @@ import ( type DatabaseType string const ( - memoryBadgerEnvName = "DEFRA_BADGER_MEMORY" - fileBadgerEnvName = "DEFRA_BADGER_FILE" - fileBadgerPathEnvName = "DEFRA_BADGER_FILE_PATH" - inMemoryEnvName = "DEFRA_IN_MEMORY" + memoryBadgerEnvName = "DEFRA_BADGER_MEMORY" + fileBadgerEnvName = "DEFRA_BADGER_FILE" + fileBadgerPathEnvName = "DEFRA_BADGER_FILE_PATH" + badgerEncryptionEnvName = "DEFRA_BADGER_ENCRYPTION" + inMemoryEnvName = "DEFRA_IN_MEMORY" ) const ( @@ -42,10 +44,12 @@ const ( ) var ( - badgerInMemory bool - badgerFile bool - inMemoryStore bool - databaseDir string + badgerInMemory bool + badgerFile bool + inMemoryStore bool + databaseDir string + badgerEncryption bool + encryptionKey []byte ) func init() { @@ -54,6 +58,7 @@ func init() { badgerFile, _ = strconv.ParseBool(os.Getenv(fileBadgerEnvName)) badgerInMemory, _ = strconv.ParseBool(os.Getenv(memoryBadgerEnvName)) inMemoryStore, _ = strconv.ParseBool(os.Getenv(inMemoryEnvName)) + badgerEncryption, _ = strconv.ParseBool(os.Getenv(badgerEncryptionEnvName)) if changeDetector.Enabled { // Change detector only uses badger file db type. @@ -72,6 +77,10 @@ func NewBadgerMemoryDB(ctx context.Context, dbopts ...db.Option) (client.DB, err opts := badgerds.Options{ Options: badger.DefaultOptions("").WithInMemory(true), } + if encryptionKey != nil { + opts.Options.EncryptionKey = encryptionKey + opts.Options.IndexCacheSize = 100 << 20 + } rootstore, err := badgerds.NewDatastore("", &opts) if err != nil { return nil, err @@ -112,7 +121,10 @@ func NewBadgerFileDB(ctx context.Context, t testing.TB, dbopts ...db.Option) (cl opts := &badgerds.Options{ Options: badger.DefaultOptions(dbPath), } - + if encryptionKey != nil { + opts.Options.EncryptionKey = encryptionKey + opts.Options.IndexCacheSize = 100 << 20 + } rootstore, err := badgerds.NewDatastore(dbPath, opts) if err != nil { return nil, "", err @@ -136,6 +148,14 @@ func setupDatabase(s *state) (impl client.DB, path string, err error) { db.WithLensPoolSize(lensPoolSize), } + if badgerEncryption && encryptionKey == nil { + key, err := crypto.GenerateAES256() + if err != nil { + return nil, "", err + } + encryptionKey = key + } + switch s.dbt { case badgerIMType: impl, err = NewBadgerMemoryDB(s.ctx, dbopts...) diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index b12b8cef29..3e6923b52c 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -22,13 +22,13 @@ import ( "github.com/bxcodec/faker/support/slice" "github.com/fxamacker/cbor/v2" - "github.com/libp2p/go-libp2p/core/crypto" "github.com/sourcenetwork/corelog" "github.com/sourcenetwork/immutable" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/crypto" "github.com/sourcenetwork/defradb/datastore" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" "github.com/sourcenetwork/defradb/errors" @@ -195,6 +195,7 @@ func executeTestCase( corelog.Any("client", clientType), corelog.Any("mutationType", mutationType), corelog.String("databaseDir", databaseDir), + corelog.Bool("badgerEncryption", badgerEncryption), corelog.Bool("skipNetworkTests", skipNetworkTests), corelog.Bool("changeDetector.Enabled", changeDetector.Enabled), corelog.Bool("changeDetector.SetupOnly", changeDetector.SetupOnly), @@ -788,7 +789,7 @@ func configureNode( db, path, err := setupDatabase(s) //disable change dector, or allow it? require.NoError(s.t, err) - privateKey, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 0) + privateKey, err := crypto.GenerateEd25519() require.NoError(s.t, err) nodeOpts := action() From 575ea2ae3a57e8b9add7f8f686a7158bbdff7440 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 14 May 2024 03:37:45 -0400 Subject: [PATCH 07/78] bot: Update dependencies (bulk dependabot PRs) 05-14-2024 (#2617) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ✅ This PR was created by combining the following PRs: #2613 bot: Bump golang.org/x/term from 0.19.0 to 0.20.0 #2612 bot: Bump github.com/gofrs/uuid/v5 from 5.1.0 to 5.2.0 #2611 bot: Bump swagger-ui-react from 5.17.2 to 5.17.8 in /playground #2610 bot: Bump graphiql from 3.2.0 to 3.2.2 in /playground #2609 bot: Bump eslint-plugin-react-refresh from 0.4.6 to 0.4.7 in /playground #2608 bot: Bump @types/react from 18.3.1 to 18.3.2 in /playground #2588 bot: Bump github.com/libp2p/go-libp2p-pubsub from 0.10.0 to 0.10.1 #2587 bot: Bump vite from 5.2.10 to 5.2.11 in /playground --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 8 +- go.sum | 16 +- playground/package-lock.json | 871 ++++++++++++++--------------------- playground/package.json | 10 +- 4 files changed, 356 insertions(+), 549 deletions(-) diff --git a/go.mod b/go.mod index 5bcc1757a0..9b725e6d61 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/go-chi/chi/v5 v5.0.12 github.com/go-chi/cors v1.2.1 github.com/go-errors/errors v1.5.1 - github.com/gofrs/uuid/v5 v5.1.0 + github.com/gofrs/uuid/v5 v5.2.0 github.com/iancoleman/strcase v0.3.0 github.com/ipfs/boxo v0.19.0 github.com/ipfs/go-block-format v0.2.0 @@ -26,7 +26,7 @@ require ( github.com/libp2p/go-libp2p v0.33.2 github.com/libp2p/go-libp2p-gostream v0.6.0 github.com/libp2p/go-libp2p-kad-dht v0.25.2 - github.com/libp2p/go-libp2p-pubsub v0.10.0 + github.com/libp2p/go-libp2p-pubsub v0.10.1 github.com/libp2p/go-libp2p-record v0.2.0 github.com/multiformats/go-multiaddr v0.12.3 github.com/multiformats/go-multibase v0.2.0 @@ -50,7 +50,7 @@ require ( go.opentelemetry.io/otel/sdk/metric v1.26.0 go.uber.org/zap v1.27.0 golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 - golang.org/x/term v0.19.0 + golang.org/x/term v0.20.0 google.golang.org/grpc v1.63.2 google.golang.org/protobuf v1.33.0 ) @@ -305,7 +305,7 @@ require ( golang.org/x/mod v0.15.0 // indirect golang.org/x/net v0.23.0 // indirect golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.19.0 // indirect + golang.org/x/sys v0.20.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.18.0 // indirect gonum.org/v1/gonum v0.14.0 // indirect diff --git a/go.sum b/go.sum index 3043ce7aa4..cbf4919926 100644 --- a/go.sum +++ b/go.sum @@ -388,8 +388,8 @@ github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid/v5 v5.1.0 h1:S5rqVKIigghZTCBKPCw0Y+bXkn26K3TB5mvQq2Ix8dk= -github.com/gofrs/uuid/v5 v5.1.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= +github.com/gofrs/uuid/v5 v5.2.0 h1:qw1GMx6/y8vhVsx626ImfKMuS5CvJmhIKKtuyvfajMM= +github.com/gofrs/uuid/v5 v5.2.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.4.1-0.20201022092350-68b0159b7869/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= @@ -728,8 +728,8 @@ github.com/libp2p/go-libp2p-kad-dht v0.25.2 h1:FOIk9gHoe4YRWXTu8SY9Z1d0RILol0Trt github.com/libp2p/go-libp2p-kad-dht v0.25.2/go.mod h1:6za56ncRHYXX4Nc2vn8z7CZK0P4QiMcrn77acKLM2Oo= github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0= github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEHetYSPXOaJnOiD8i0= -github.com/libp2p/go-libp2p-pubsub v0.10.0 h1:wS0S5FlISavMaAbxyQn3dxMOe2eegMfswM471RuHJwA= -github.com/libp2p/go-libp2p-pubsub v0.10.0/go.mod h1:1OxbaT/pFRO5h+Dpze8hdHQ63R0ke55XTs6b6NwLLkw= +github.com/libp2p/go-libp2p-pubsub v0.10.1 h1:/RqOZpEtAolsr8/9CC8KqROJSOZeu7lK7fPftn4MwNg= +github.com/libp2p/go-libp2p-pubsub v0.10.1/go.mod h1:1OxbaT/pFRO5h+Dpze8hdHQ63R0ke55XTs6b6NwLLkw= github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= github.com/libp2p/go-libp2p-routing-helpers v0.7.3 h1:u1LGzAMVRK9Nqq5aYDVOiq/HaB93U9WWczBzGyAC5ZY= @@ -1414,15 +1414,15 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/playground/package-lock.json b/playground/package-lock.json index 8bb2cafd7b..f77bb3002d 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -8,14 +8,14 @@ "name": "playground", "version": "0.0.0", "dependencies": { - "graphiql": "^3.2.0", + "graphiql": "^3.2.2", "graphql": "^16.8.1", "react": "^18.3.1", "react-dom": "^18.3.1", - "swagger-ui-react": "^5.17.2" + "swagger-ui-react": "^5.17.8" }, "devDependencies": { - "@types/react": "^18.3.1", + "@types/react": "^18.3.2", "@types/react-dom": "^18.3.0", "@types/swagger-ui-react": "^4.18.3", "@typescript-eslint/eslint-plugin": "^7.7.1", @@ -23,9 +23,9 @@ "@vitejs/plugin-react-swc": "^3.6.0", "eslint": "^8.57.0", "eslint-plugin-react-hooks": "^4.6.2", - "eslint-plugin-react-refresh": "^0.4.6", + "eslint-plugin-react-refresh": "^0.4.7", "typescript": "^5.4.5", - "vite": "^5.2.10" + "vite": "^5.2.11" } }, "node_modules/@babel/runtime": { @@ -557,9 +557,9 @@ } }, "node_modules/@floating-ui/dom": { - "version": "1.6.4", - "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.6.4.tgz", - "integrity": "sha512-0G8R+zOvQsAG1pg2Q99P21jiqxqGBW1iRe/iXHsBRBxnpXKFI8QwbB4x5KmYLggNO5m34IQgOIu9SCRfR/WWiQ==", + "version": "1.6.5", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.6.5.tgz", + "integrity": "sha512-Nsdud2X65Dz+1RHjAIP0t8z5e2ff/IRbei6BqFrl1urT8sDVzM1HMQ+R0XcU5ceRfyO3I6ayeqIfh+6Wb8LGTw==", "dependencies": { "@floating-ui/core": "^1.0.0", "@floating-ui/utils": "^0.2.0" @@ -583,9 +583,9 @@ "integrity": "sha512-J4yDIIthosAsRZ5CPYP/jQvUAQtlZTTD/4suA08/FEnlxqW3sKS9iAhgsa9VYLZ6vDHn/ixJgIqRQPotoBjxIw==" }, "node_modules/@graphiql/react": { - "version": "0.21.0", - "resolved": "https://registry.npmjs.org/@graphiql/react/-/react-0.21.0.tgz", - "integrity": "sha512-UlXzG78HC5+CgQYXw0jVZPoZX0Uh2jPIrqLBIxAdAWMZsmcHMZHAujZtION1pbIrv22cWxP95W+8RpDIHijYow==", + "version": "0.22.1", + "resolved": "https://registry.npmjs.org/@graphiql/react/-/react-0.22.1.tgz", + "integrity": "sha512-PBClhO2juCvVvmE5qD4PHivJLkhp0dqIX1zgId8Z83UCKpxO2M+bEspRL9aOQQaE4F4xqExCUk5B2AL+wc+agg==", "dependencies": { "@graphiql/toolkit": "^0.9.1", "@headlessui/react": "^1.7.15", @@ -1654,30 +1654,30 @@ ] }, "node_modules/@swagger-api/apidom-ast": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ast/-/apidom-ast-0.99.1.tgz", - "integrity": "sha512-evkKm2JaqNfg3dB2Yk3FWL/Qy2r4csZLMZ9bHMG+xNpti8ulENHMjuCh3Ry4koV1gD7IA54CU2ZjcaTvqJa22Q==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ast/-/apidom-ast-0.99.2.tgz", + "integrity": "sha512-poNlXWAU2XBl192+lo5sC6loB3qGvwK30V1pta6Hs200KeTayVsMMRL4R6wDDYEtsbv7M3vQaFKcRGbYUk/SgA==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", "@swagger-api/apidom-error": "^0.99.0", "@types/ramda": "~0.29.6", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.1.1", + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0", "unraw": "^3.0.0" } }, "node_modules/@swagger-api/apidom-core": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-core/-/apidom-core-0.99.1.tgz", - "integrity": "sha512-oWU9Re2B7hPFAnm4ymN2HNOqevMqZsvL4Fjud2qN+KFWNvZ1/r8kwQaj0Pba5Kwka2bcWo0aEfWNayP4axTB+Q==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-core/-/apidom-core-0.99.2.tgz", + "integrity": "sha512-deudG9eCxqgPnZyIcZzpmDxF0cja0hdPFS2hB0Op6aB4TKc9mOP1+1iEIDI3Tlx/nzgIayyAl1bblyhK3yH5fQ==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-ast": "^0.99.1", + "@swagger-api/apidom-ast": "^0.99.2", "@swagger-api/apidom-error": "^0.99.0", "@types/ramda": "~0.29.6", "minim": "~0.23.8", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.1.1", + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0", "short-unique-id": "^5.0.2", "stampit": "^4.3.2" } @@ -1691,409 +1691,409 @@ } }, "node_modules/@swagger-api/apidom-json-pointer": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-json-pointer/-/apidom-json-pointer-0.99.1.tgz", - "integrity": "sha512-4fOOKTLoBWpfX2eGNx93sqBsS1KRCtBFOq75n1jMcRbs1rrj+JxcaiTFUE+6BZqIqBsCqTmRMYE/HsgwBS3vhQ==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-json-pointer/-/apidom-json-pointer-0.99.2.tgz", + "integrity": "sha512-bZENmE3H2si1yP38VLUAdhoMWNxkh98+/dCOESaw3R5zXHG04di3ShbYsCG0StkigF+eCfCdaj6XoikQOGSkiA==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.1", + "@swagger-api/apidom-core": "^0.99.2", "@swagger-api/apidom-error": "^0.99.0", "@types/ramda": "~0.29.6", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.0.0" + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-ns-api-design-systems": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-api-design-systems/-/apidom-ns-api-design-systems-0.99.1.tgz", - "integrity": "sha512-LID3n+Y2eKBzaR7oYShto48+EFPBLZLuKIJdEZ53is6SqD5jHS0Ev6xLj2QfqSIQR3OoVN3PUOrz724Jkpiv/A==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-api-design-systems/-/apidom-ns-api-design-systems-0.99.2.tgz", + "integrity": "sha512-854ioZ/FB5DNiJcMinD9/a6dj6h/poOsKcb4POhPTzMSM0fHLIQUp//Ufhx7qL6qsepwtLapkgZ3/hAYN7lnBg==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.1", + "@swagger-api/apidom-core": "^0.99.2", "@swagger-api/apidom-error": "^0.99.0", - "@swagger-api/apidom-ns-openapi-3-1": "^0.99.1", + "@swagger-api/apidom-ns-openapi-3-1": "^0.99.2", "@types/ramda": "~0.29.6", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.1.1", + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0", "ts-mixer": "^6.0.3" } }, "node_modules/@swagger-api/apidom-ns-asyncapi-2": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-asyncapi-2/-/apidom-ns-asyncapi-2-0.99.1.tgz", - "integrity": "sha512-fAUsKbg0MuvEPjE2UWQu+62K0eh/3yTE2M5u/QCqpj48IpByMNYLKU9ICfMMAzBjXNQAVuEr07/UgY9CRHUVhA==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-asyncapi-2/-/apidom-ns-asyncapi-2-0.99.2.tgz", + "integrity": "sha512-HF38kCszKYQqhQ6VMEMqd5r7gPGBRpHwPcoYaRJSDeOST/qLLG78xpoCJKQEyL3PQprea0gXKz1LG1uslDHgtQ==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.1", - "@swagger-api/apidom-ns-json-schema-draft-7": "^0.99.1", + "@swagger-api/apidom-core": "^0.99.2", + "@swagger-api/apidom-ns-json-schema-draft-7": "^0.99.2", "@types/ramda": "~0.29.6", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.1.1", + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0", "ts-mixer": "^6.0.3" } }, "node_modules/@swagger-api/apidom-ns-json-schema-draft-4": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-4/-/apidom-ns-json-schema-draft-4-0.99.1.tgz", - "integrity": "sha512-HdxD4WXnaMJsdodrWoynzgteg9UDaZsVkX04oObQPR3C1ZWW9KahEGBSbtr/oBhnE/QgiPfNHUDWrQvk3oC6lg==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-4/-/apidom-ns-json-schema-draft-4-0.99.2.tgz", + "integrity": "sha512-vgCRaqDLI/SmTECZeKO47RGFFx6MCpOcbSm60sV0/ZJxeK+TgkNjIRJTyuRQNts44K863CWgY+bwzzn1zhNqUg==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-ast": "^0.99.1", - "@swagger-api/apidom-core": "^0.99.1", + "@swagger-api/apidom-ast": "^0.99.2", + "@swagger-api/apidom-core": "^0.99.2", "@types/ramda": "~0.29.6", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.1.1", - "stampit": "^4.3.2" + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0", + "ts-mixer": "^6.0.4" } }, "node_modules/@swagger-api/apidom-ns-json-schema-draft-6": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-6/-/apidom-ns-json-schema-draft-6-0.99.1.tgz", - "integrity": "sha512-O6A25j9y+Hjvwwq8x+uTaIhK4tp0CqO6YrFRXmfmOnkBtJ6Q66jqbvRzIN9XQfW8VaIipqAlOin++ufsfuDd1g==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-6/-/apidom-ns-json-schema-draft-6-0.99.2.tgz", + "integrity": "sha512-ayKGsd65a6p/k4s5L2el+vMoMi8kc/bLXVszWszFDET1eZNvhKwEMLylGzKMfnwAFgpj+kJOKn4MZsD6PK6U/A==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.1", + "@swagger-api/apidom-core": "^0.99.2", "@swagger-api/apidom-error": "^0.99.0", - "@swagger-api/apidom-ns-json-schema-draft-4": "^0.99.1", + "@swagger-api/apidom-ns-json-schema-draft-4": "^0.99.2", "@types/ramda": "~0.29.6", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.1.1", - "stampit": "^4.3.2" + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0", + "ts-mixer": "^6.0.4" } }, "node_modules/@swagger-api/apidom-ns-json-schema-draft-7": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-7/-/apidom-ns-json-schema-draft-7-0.99.1.tgz", - "integrity": "sha512-I4IpTkAlParfUWOi5kJU7jQqeMKy39JOWiRz8jTyPoZ8vvixVgyIlOS7/bj5uLxbBw3QxOFXPuIqUvK1uFElAg==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-7/-/apidom-ns-json-schema-draft-7-0.99.2.tgz", + "integrity": "sha512-Rn2YeQKxj6hSijQAzGRRxMYDRIedqHjE69z9xigVbvm+iDXxLJIwasuzFa7BIMRDZF5eAJkBPHXTiU9cXVsl6w==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.1", + "@swagger-api/apidom-core": "^0.99.2", "@swagger-api/apidom-error": "^0.99.0", - "@swagger-api/apidom-ns-json-schema-draft-6": "^0.99.1", + "@swagger-api/apidom-ns-json-schema-draft-6": "^0.99.2", "@types/ramda": "~0.29.6", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.1.1", - "stampit": "^4.3.2" + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0", + "ts-mixer": "^6.0.4" } }, "node_modules/@swagger-api/apidom-ns-openapi-2": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-2/-/apidom-ns-openapi-2-0.99.1.tgz", - "integrity": "sha512-ChEd1RaJKrYskLTmlH8NL9tNpAgroSPklTwJCvHmZjzaWvW7N/B2diHBOaz+rnVLiW9Hb7QOlR/biEXJn7OUIg==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-2/-/apidom-ns-openapi-2-0.99.2.tgz", + "integrity": "sha512-4YlBvMkxSJIWrOQmsHiVuQ2VkbcWgUnOm7uiRq+8d88ur9mKI5XbP5iUvxCASuONmCqlaSU2+qoM1qesy73XPw==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.1", + "@swagger-api/apidom-core": "^0.99.2", "@swagger-api/apidom-error": "^0.99.0", - "@swagger-api/apidom-ns-json-schema-draft-4": "^0.99.1", + "@swagger-api/apidom-ns-json-schema-draft-4": "^0.99.2", "@types/ramda": "~0.29.6", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.1.1", + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0", "ts-mixer": "^6.0.3" } }, "node_modules/@swagger-api/apidom-ns-openapi-3-0": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-3-0/-/apidom-ns-openapi-3-0-0.99.1.tgz", - "integrity": "sha512-9lfa2a+4rLp+1loEXrr+Dq3whdBwBWHukctsX/C/cGr4SG0NO8+tmS3FLsOD+ly6O/YPdszPDxVcIqqNV8J2uA==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-3-0/-/apidom-ns-openapi-3-0-0.99.2.tgz", + "integrity": "sha512-fcT597Ty3kqTkoBr1jeZ3Lfbu0a+CKd1l2ojY6RBF/5+dWNux+CRZ9qosax2XZbN+nJhSdvGLLvGvuKaV3Ybug==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.1", + "@swagger-api/apidom-core": "^0.99.2", "@swagger-api/apidom-error": "^0.99.0", - "@swagger-api/apidom-ns-json-schema-draft-4": "^0.99.1", + "@swagger-api/apidom-ns-json-schema-draft-4": "^0.99.2", "@types/ramda": "~0.29.6", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.1.1", + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0", "ts-mixer": "^6.0.3" } }, "node_modules/@swagger-api/apidom-ns-openapi-3-1": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-3-1/-/apidom-ns-openapi-3-1-0.99.1.tgz", - "integrity": "sha512-XsRxM9WC+WywBo+rr/YUayQRsV2mN8AzBxVlKzJoZ+pBgmPYe24n3Ma/0FTr8zGwQyg4DtOBwydlYz8QFrLPFA==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-3-1/-/apidom-ns-openapi-3-1-0.99.2.tgz", + "integrity": "sha512-ubO8vi1dYpIV2a3IKhTkBCf125udoCeUZIc9wrhOFwwHHIKeInGR5L6yxlNhOQm0/doYCth77vEqcuTBpxaIrw==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-ast": "^0.99.1", - "@swagger-api/apidom-core": "^0.99.1", - "@swagger-api/apidom-ns-openapi-3-0": "^0.99.1", + "@swagger-api/apidom-ast": "^0.99.2", + "@swagger-api/apidom-core": "^0.99.2", + "@swagger-api/apidom-ns-openapi-3-0": "^0.99.2", "@types/ramda": "~0.29.6", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.1.1", + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0", "ts-mixer": "^6.0.3" } }, "node_modules/@swagger-api/apidom-ns-workflows-1": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-workflows-1/-/apidom-ns-workflows-1-0.99.1.tgz", - "integrity": "sha512-s6SmFzlBmKKRdlyLdZsjXHYJ+7+AuDyK3qrBAPHX7mDe/uN6D7QPGD05oCzHytPhbeZQPMf0wi9vPUrM1s1xvw==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-workflows-1/-/apidom-ns-workflows-1-0.99.2.tgz", + "integrity": "sha512-lm8G7cbCRXukN4UOb/bPszUiSbvN1ymvwQ2PEkyZN+DzJvYfgRuAxXt7xd2EDKJcxeH4igpAnkKoIoBoSOHg+w==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.1", - "@swagger-api/apidom-ns-openapi-3-1": "^0.99.1", + "@swagger-api/apidom-core": "^0.99.2", + "@swagger-api/apidom-ns-openapi-3-1": "^0.99.2", "@types/ramda": "~0.29.6", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.1.1", + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0", "ts-mixer": "^6.0.3" } }, "node_modules/@swagger-api/apidom-parser-adapter-api-design-systems-json": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-api-design-systems-json/-/apidom-parser-adapter-api-design-systems-json-0.99.1.tgz", - "integrity": "sha512-ONeGsOZPZ16SvYbfHKiLjg8IeKGg+nJC+fOIqnelGdMCu/34ed0X7k6XQZGrwbDtmSd3SkXykL3F55H5BFiUPQ==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-api-design-systems-json/-/apidom-parser-adapter-api-design-systems-json-0.99.2.tgz", + "integrity": "sha512-7WPbiUJEWggVmxsssFfW/8JGk8Yu4C9ELneh805kMsgl/DOm6hcHxqT5gXXSwamH0ZQlTmSnHl2OZSlG+U5KKQ==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.1", - "@swagger-api/apidom-ns-api-design-systems": "^0.99.1", - "@swagger-api/apidom-parser-adapter-json": "^0.99.1", + "@swagger-api/apidom-core": "^0.99.2", + "@swagger-api/apidom-ns-api-design-systems": "^0.99.2", + "@swagger-api/apidom-parser-adapter-json": "^0.99.2", "@types/ramda": "~0.29.6", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.0.0" + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-api-design-systems-yaml": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-api-design-systems-yaml/-/apidom-parser-adapter-api-design-systems-yaml-0.99.1.tgz", - "integrity": "sha512-mVOHebofGhI3E8HW/7YsqGOpIWOBSMc5R5aQFMYMYpTxrpDHNhyEfFEWqZRAoC2Hin9NZ2BeI/hsrXGIw/LoeQ==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-api-design-systems-yaml/-/apidom-parser-adapter-api-design-systems-yaml-0.99.2.tgz", + "integrity": "sha512-ezOA1fjBAQPQ5X0DGYnuFyZMBSBCsaT6k9KDRr7B37Do9yj8YKa/lTlg5usXOrcLm4VgcyJGTKhAJi9kfzCKcA==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.1", - "@swagger-api/apidom-ns-api-design-systems": "^0.99.1", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.99.1", + "@swagger-api/apidom-core": "^0.99.2", + "@swagger-api/apidom-ns-api-design-systems": "^0.99.2", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.99.2", "@types/ramda": "~0.29.6", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.0.0" + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-asyncapi-json-2": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-asyncapi-json-2/-/apidom-parser-adapter-asyncapi-json-2-0.99.1.tgz", - "integrity": "sha512-2kKVf5ecTuDirPpk8nDRyTrT0tkrWjdaUPwJ/+l2RdgWYObNVwdX2lAS9URC4zK/drdQOQxjetF+aDQBBhXmXA==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-asyncapi-json-2/-/apidom-parser-adapter-asyncapi-json-2-0.99.2.tgz", + "integrity": "sha512-b1ncaIc4dD0FGqty3iRCDUA/uHdd7nH271C06blQ+S9Id4D/xXxzd84z8LeNIJNLhCcnueuMKgUkGzvXP+raAA==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.1", - "@swagger-api/apidom-ns-asyncapi-2": "^0.99.1", - "@swagger-api/apidom-parser-adapter-json": "^0.99.1", + "@swagger-api/apidom-core": "^0.99.2", + "@swagger-api/apidom-ns-asyncapi-2": "^0.99.2", + "@swagger-api/apidom-parser-adapter-json": "^0.99.2", "@types/ramda": "~0.29.6", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.0.0" + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-asyncapi-yaml-2": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-asyncapi-yaml-2/-/apidom-parser-adapter-asyncapi-yaml-2-0.99.1.tgz", - "integrity": "sha512-UX+rLOUSQuWe5yNXS8eLFvDhCA1CP5r80jLtvT3n0FDnss4+9WkPlqgj4UPH4XoitXSvBVOZxbdjNwfKtJzsHA==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-asyncapi-yaml-2/-/apidom-parser-adapter-asyncapi-yaml-2-0.99.2.tgz", + "integrity": "sha512-NuwuwdORyZPhEpxwyEgslyGfVnwIuyDvF5TDT0cLCMOIFDqbE/n77c4FAh/nQUARDEXRthiDb5pdMo/+rOxjFg==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.1", - "@swagger-api/apidom-ns-asyncapi-2": "^0.99.1", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.99.1", + "@swagger-api/apidom-core": "^0.99.2", + "@swagger-api/apidom-ns-asyncapi-2": "^0.99.2", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.99.2", "@types/ramda": "~0.29.6", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.0.0" + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-json": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-json/-/apidom-parser-adapter-json-0.99.1.tgz", - "integrity": "sha512-qVeSdhaDIggIkFtMI4aqqv4MYuJlRQ6pniP+Li+DjcHeTKYHelX0OwoznaTlLlZ1tM9QFaMi8rw8xfGp6vMHgg==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-json/-/apidom-parser-adapter-json-0.99.2.tgz", + "integrity": "sha512-wy2WF71bLX1wEJkgmPRCEnXicV155KCelPQhCtzAGGo/B3+OuhknovBWXZNStvoJqZ/2A4a5pvYrgHoVoIKchg==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-ast": "^0.99.1", - "@swagger-api/apidom-core": "^0.99.1", + "@swagger-api/apidom-ast": "^0.99.2", + "@swagger-api/apidom-core": "^0.99.2", "@swagger-api/apidom-error": "^0.99.0", "@types/ramda": "~0.29.6", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.1.1", + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0", "tree-sitter": "=0.20.4", "tree-sitter-json": "=0.20.2", "web-tree-sitter": "=0.20.3" } }, "node_modules/@swagger-api/apidom-parser-adapter-openapi-json-2": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-2/-/apidom-parser-adapter-openapi-json-2-0.99.1.tgz", - "integrity": "sha512-aHzdast9HMeGTaTUWwVovMcspEVCAdvBJe47BzMZfzcVOnZlAVyTmLqxQ/3s9fjseRrPhFYqKtCOKROzbWeAhg==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-2/-/apidom-parser-adapter-openapi-json-2-0.99.2.tgz", + "integrity": "sha512-z+ATszNWaO2JlixM9h4QpTAW2fE5nPCY4IDcScuWbch8gtKBmv61+53nahYb7tc3W/X0mMqhc1LyTCy5QC2L/w==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.1", - "@swagger-api/apidom-ns-openapi-2": "^0.99.1", - "@swagger-api/apidom-parser-adapter-json": "^0.99.1", + "@swagger-api/apidom-core": "^0.99.2", + "@swagger-api/apidom-ns-openapi-2": "^0.99.2", + "@swagger-api/apidom-parser-adapter-json": "^0.99.2", "@types/ramda": "~0.29.6", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.0.0" + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-openapi-json-3-0": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-3-0/-/apidom-parser-adapter-openapi-json-3-0-0.99.1.tgz", - "integrity": "sha512-l/nYccP87GL611W9OCiYWUOizhhoGenuKa7Ocmaf9Rg+xIDnPw29+9p/SuGEN2jjtql0iYuNI4+ZzwiC2+teSg==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-3-0/-/apidom-parser-adapter-openapi-json-3-0-0.99.2.tgz", + "integrity": "sha512-78PFDsF67tWDjPCGAD9cNHage8p5Vs2+zili1AF2zch3JkJA/KxBt+5va4A8w1fYaUaXi8LnMkM8VvEIAsNaOw==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.1", - "@swagger-api/apidom-ns-openapi-3-0": "^0.99.1", - "@swagger-api/apidom-parser-adapter-json": "^0.99.1", + "@swagger-api/apidom-core": "^0.99.2", + "@swagger-api/apidom-ns-openapi-3-0": "^0.99.2", + "@swagger-api/apidom-parser-adapter-json": "^0.99.2", "@types/ramda": "~0.29.6", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.0.0" + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-openapi-json-3-1": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-3-1/-/apidom-parser-adapter-openapi-json-3-1-0.99.1.tgz", - "integrity": "sha512-Eie4ztKR5hgrGESBDHB9xIODTB/gvjWBwPNveZ/iSlJ/yhZGyDMC8dgv0aQiyFP01mKaaBMhyZjWgsvts9l+cQ==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-3-1/-/apidom-parser-adapter-openapi-json-3-1-0.99.2.tgz", + "integrity": "sha512-WQmm14C0EH0dcMzvgrGPeLkWKXyFwyunK9rrRt7xRLn8sL1Em0dC31hiVdgypo3DLrz9YW3PStpSQjEedJaWUQ==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.1", - "@swagger-api/apidom-ns-openapi-3-1": "^0.99.1", - "@swagger-api/apidom-parser-adapter-json": "^0.99.1", + "@swagger-api/apidom-core": "^0.99.2", + "@swagger-api/apidom-ns-openapi-3-1": "^0.99.2", + "@swagger-api/apidom-parser-adapter-json": "^0.99.2", "@types/ramda": "~0.29.6", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.0.0" + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-openapi-yaml-2": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-2/-/apidom-parser-adapter-openapi-yaml-2-0.99.1.tgz", - "integrity": "sha512-MzjUyhGmJ+jQly90Nak7s01x2Jp1GvBe+Z8BXwkArNOFjLvzQIjdAx7F943/VlLaV9y71DNXVsqhgKdiqjnX3w==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-2/-/apidom-parser-adapter-openapi-yaml-2-0.99.2.tgz", + "integrity": "sha512-rEoE54T8KKRxtdxXgvaYba+GX8853mwcw5nzdrrvOy2tNKqsJANPeJcrQmjVYqJX7SU0HuZPK3zBvyqMyKoNsg==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.1", - "@swagger-api/apidom-ns-openapi-2": "^0.99.1", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.99.1", + "@swagger-api/apidom-core": "^0.99.2", + "@swagger-api/apidom-ns-openapi-2": "^0.99.2", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.99.2", "@types/ramda": "~0.29.6", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.0.0" + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-openapi-yaml-3-0": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-3-0/-/apidom-parser-adapter-openapi-yaml-3-0-0.99.1.tgz", - "integrity": "sha512-TF/yquy1Alce/olQzR5AnjnOx7o7q8MkXMi0JxrtqvMk9Ky//0qFxFGzFQEzA++NaSGt9StG0Pcgp4MGZAzJYg==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-3-0/-/apidom-parser-adapter-openapi-yaml-3-0-0.99.2.tgz", + "integrity": "sha512-l7ve45cfAj+imE8flypjdo49zpfp0m29stpOO/q2fCD5/46wT3Z4Ve3aKhil8/TRFEX26VOKoYVNjpeUWzUMaw==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.1", - "@swagger-api/apidom-ns-openapi-3-0": "^0.99.1", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.99.1", + "@swagger-api/apidom-core": "^0.99.2", + "@swagger-api/apidom-ns-openapi-3-0": "^0.99.2", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.99.2", "@types/ramda": "~0.29.6", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.0.0" + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-openapi-yaml-3-1": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-3-1/-/apidom-parser-adapter-openapi-yaml-3-1-0.99.1.tgz", - "integrity": "sha512-baXbKqjnbmgEmFgCVHlDEiFANHs5lHnnBM0X3k5kNtAVule6Lc5lAZVoySpTGyBJ+4nq4RHNJfbKW8RDHgVMoQ==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-3-1/-/apidom-parser-adapter-openapi-yaml-3-1-0.99.2.tgz", + "integrity": "sha512-1ab06o/M6MAJ0Js4C1bifpj/R0T0mw26Qk4dR7qKzel9dDuEkIRMQF7JHnf2pojZE+aR59Eb4iAMKmxzokHZdA==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.1", - "@swagger-api/apidom-ns-openapi-3-1": "^0.99.1", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.99.1", + "@swagger-api/apidom-core": "^0.99.2", + "@swagger-api/apidom-ns-openapi-3-1": "^0.99.2", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.99.2", "@types/ramda": "~0.29.6", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.0.0" + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-workflows-json-1": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-workflows-json-1/-/apidom-parser-adapter-workflows-json-1-0.99.1.tgz", - "integrity": "sha512-Uu8SaQfl2XiiXDQVRUvUCu3yk7jwHVmwKOoacbJGzPducrR/7/bOe8dNeN4CMRw7HKeRbh02UxXtR46mgBPnog==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-workflows-json-1/-/apidom-parser-adapter-workflows-json-1-0.99.2.tgz", + "integrity": "sha512-VsFVmwTX/OfsXyBmIEp5Y+adqBF4Cj/cM/55KPM3mIEmKbc+PK3M08TIotMk1FdCiTafe+I28OZL+WMVujNm1A==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.1", - "@swagger-api/apidom-ns-workflows-1": "^0.99.1", - "@swagger-api/apidom-parser-adapter-json": "^0.99.1", + "@swagger-api/apidom-core": "^0.99.2", + "@swagger-api/apidom-ns-workflows-1": "^0.99.2", + "@swagger-api/apidom-parser-adapter-json": "^0.99.2", "@types/ramda": "~0.29.6", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.0.0" + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-workflows-yaml-1": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-workflows-yaml-1/-/apidom-parser-adapter-workflows-yaml-1-0.99.1.tgz", - "integrity": "sha512-9DX9X9wxW6TJF5lG0k/w0GxeMPkHACwEQx/QFJqg1YRD3/UWSkBcm567KbfCh5BiDx5p5WAYhTGInQEAF3d0zQ==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-workflows-yaml-1/-/apidom-parser-adapter-workflows-yaml-1-0.99.2.tgz", + "integrity": "sha512-yK+48YcllFc8mY711ZJ7uTfPVZmJdujIHbvGLOMxMODmETkZlEjfoTAwNTWvutcuA6cxK70tKUD8vz5572ALQA==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.1", - "@swagger-api/apidom-ns-workflows-1": "^0.99.1", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.99.1", + "@swagger-api/apidom-core": "^0.99.2", + "@swagger-api/apidom-ns-workflows-1": "^0.99.2", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.99.2", "@types/ramda": "~0.29.6", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.0.0" + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-yaml-1-2": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-yaml-1-2/-/apidom-parser-adapter-yaml-1-2-0.99.1.tgz", - "integrity": "sha512-MmTDUkrvFIg2AwzaZmiqBifWpoECh7AKeJcAD8Tm+G2/FUmGr3mIr7elc4ehYt/fecSSJEwFGNFU/radKqT/6g==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-yaml-1-2/-/apidom-parser-adapter-yaml-1-2-0.99.2.tgz", + "integrity": "sha512-eU6Rd58WzzcOYOajwp9UCURhXVO8SUCrau14W6BuF1DbJCr85FmOigy4yu2b9UWsK44ZPzH8KeyhSYwTkqkgLA==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-ast": "^0.99.1", - "@swagger-api/apidom-core": "^0.99.1", + "@swagger-api/apidom-ast": "^0.99.2", + "@swagger-api/apidom-core": "^0.99.2", "@swagger-api/apidom-error": "^0.99.0", "@types/ramda": "~0.29.6", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.1.1", + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0", "tree-sitter": "=0.20.4", "tree-sitter-yaml": "=0.5.0", "web-tree-sitter": "=0.20.3" } }, "node_modules/@swagger-api/apidom-reference": { - "version": "0.99.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-reference/-/apidom-reference-0.99.1.tgz", - "integrity": "sha512-g7xp+ZL/iRX6CEwdUnqqsLfZmaSRlXwEZV8LF1k4k13/o7Qcf7bsPv0fOVGa8ZC29zM8k//FVavwWoXvT2xrFQ==", + "version": "0.99.2", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-reference/-/apidom-reference-0.99.2.tgz", + "integrity": "sha512-QwAnCCEUbicPAVPWYOOpSI8rcj2e7TTybn1chGfdogV+NMLprGXBk/A86hO9CaSLMXkCA2rERUznSNSZWC996g==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.1", + "@swagger-api/apidom-core": "^0.99.2", "@types/ramda": "~0.29.6", "axios": "^1.4.0", "minimatch": "^7.4.3", "process": "^0.11.10", - "ramda": "~0.29.1", - "ramda-adjunct": "^4.1.1", + "ramda": "~0.30.0", + "ramda-adjunct": "^5.0.0", "stampit": "^4.3.2" }, "optionalDependencies": { "@swagger-api/apidom-error": "^0.99.0", - "@swagger-api/apidom-json-pointer": "^0.99.1", - "@swagger-api/apidom-ns-asyncapi-2": "^0.99.1", - "@swagger-api/apidom-ns-openapi-2": "^0.99.1", - "@swagger-api/apidom-ns-openapi-3-0": "^0.99.1", - "@swagger-api/apidom-ns-openapi-3-1": "^0.99.1", - "@swagger-api/apidom-ns-workflows-1": "^0.99.1", - "@swagger-api/apidom-parser-adapter-api-design-systems-json": "^0.99.1", - "@swagger-api/apidom-parser-adapter-api-design-systems-yaml": "^0.99.1", - "@swagger-api/apidom-parser-adapter-asyncapi-json-2": "^0.99.1", - "@swagger-api/apidom-parser-adapter-asyncapi-yaml-2": "^0.99.1", - "@swagger-api/apidom-parser-adapter-json": "^0.99.1", - "@swagger-api/apidom-parser-adapter-openapi-json-2": "^0.99.1", - "@swagger-api/apidom-parser-adapter-openapi-json-3-0": "^0.99.1", - "@swagger-api/apidom-parser-adapter-openapi-json-3-1": "^0.99.1", - "@swagger-api/apidom-parser-adapter-openapi-yaml-2": "^0.99.1", - "@swagger-api/apidom-parser-adapter-openapi-yaml-3-0": "^0.99.1", - "@swagger-api/apidom-parser-adapter-openapi-yaml-3-1": "^0.99.1", - "@swagger-api/apidom-parser-adapter-workflows-json-1": "^0.99.1", - "@swagger-api/apidom-parser-adapter-workflows-yaml-1": "^0.99.1", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.99.1" + "@swagger-api/apidom-json-pointer": "^0.99.2", + "@swagger-api/apidom-ns-asyncapi-2": "^0.99.2", + "@swagger-api/apidom-ns-openapi-2": "^0.99.2", + "@swagger-api/apidom-ns-openapi-3-0": "^0.99.2", + "@swagger-api/apidom-ns-openapi-3-1": "^0.99.2", + "@swagger-api/apidom-ns-workflows-1": "^0.99.2", + "@swagger-api/apidom-parser-adapter-api-design-systems-json": "^0.99.2", + "@swagger-api/apidom-parser-adapter-api-design-systems-yaml": "^0.99.2", + "@swagger-api/apidom-parser-adapter-asyncapi-json-2": "^0.99.2", + "@swagger-api/apidom-parser-adapter-asyncapi-yaml-2": "^0.99.2", + "@swagger-api/apidom-parser-adapter-json": "^0.99.2", + "@swagger-api/apidom-parser-adapter-openapi-json-2": "^0.99.2", + "@swagger-api/apidom-parser-adapter-openapi-json-3-0": "^0.99.2", + "@swagger-api/apidom-parser-adapter-openapi-json-3-1": "^0.99.2", + "@swagger-api/apidom-parser-adapter-openapi-yaml-2": "^0.99.2", + "@swagger-api/apidom-parser-adapter-openapi-yaml-3-0": "^0.99.2", + "@swagger-api/apidom-parser-adapter-openapi-yaml-3-1": "^0.99.2", + "@swagger-api/apidom-parser-adapter-workflows-json-1": "^0.99.2", + "@swagger-api/apidom-parser-adapter-workflows-yaml-1": "^0.99.2", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.99.2" } }, "node_modules/@swagger-api/apidom-reference/node_modules/minimatch": { @@ -2390,9 +2390,9 @@ } }, "node_modules/@types/react": { - "version": "18.3.1", - "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.1.tgz", - "integrity": "sha512-V0kuGBX3+prX+DQ/7r2qsv1NsdfnCLnTgnRJ1pYnxykBhGMz+qj+box5lq7XsO5mtZsBqpjwwTu/7wszPfMBcw==", + "version": "18.3.2", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.2.tgz", + "integrity": "sha512-Btgg89dAnqD4vV7R3hlwOxgqobUQKgx3MmrQRi0yYbs/P0ym8XozIAlkqVilPqHQwXs4e9Tf63rrCgl58BcO4w==", "devOptional": true, "dependencies": { "@types/prop-types": "*", @@ -2649,11 +2649,6 @@ "vite": "^4 || ^5" } }, - "node_modules/@yarnpkg/lockfile": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@yarnpkg/lockfile/-/lockfile-1.1.0.tgz", - "integrity": "sha512-GpSwvyXOcOOlV70vbnzjj4fW5xW/FdUF6nQEt1ENy7m4ZCczi1+/buVUPAqmGfqznsORNFzUMjctTIp8a9tuCQ==" - }, "node_modules/acorn": { "version": "8.11.3", "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz", @@ -2704,6 +2699,7 @@ "version": "4.3.0", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, "dependencies": { "color-convert": "^2.0.1" }, @@ -2714,6 +2710,11 @@ "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, + "node_modules/apg-lite": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/apg-lite/-/apg-lite-1.0.3.tgz", + "integrity": "sha512-lOoNkL7vN7PGdyQMFPey1aok2oVVqvs3n7UMFBRvQ9FoELSbKhgPc3rd7JptaGwCmo4125gLX9Cqb8ElvLCFaQ==" + }, "node_modules/argparse": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", @@ -2744,14 +2745,6 @@ "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" }, - "node_modules/at-least-node": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", - "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==", - "engines": { - "node": ">= 4.0.0" - } - }, "node_modules/autolinker": { "version": "3.16.2", "resolved": "https://registry.npmjs.org/autolinker/-/autolinker-3.16.2.tgz", @@ -2817,6 +2810,7 @@ "version": "3.0.2", "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dev": true, "dependencies": { "fill-range": "^7.0.1" }, @@ -2879,6 +2873,7 @@ "version": "4.1.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -2923,20 +2918,6 @@ "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==", "optional": true }, - "node_modules/ci-info": { - "version": "3.9.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", - "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/sibiraj-s" - } - ], - "engines": { - "node": ">=8" - } - }, "node_modules/classnames": { "version": "2.5.1", "resolved": "https://registry.npmjs.org/classnames/-/classnames-2.5.1.tgz", @@ -2986,6 +2967,7 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, "dependencies": { "color-name": "~1.1.4" }, @@ -2996,7 +2978,8 @@ "node_modules/color-name": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true }, "node_modules/combined-stream": { "version": "1.0.8", @@ -3021,7 +3004,8 @@ "node_modules/concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true }, "node_modules/cookie": { "version": "0.6.0", @@ -3053,6 +3037,7 @@ "version": "7.0.3", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dev": true, "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", @@ -3190,9 +3175,9 @@ } }, "node_modules/dompurify": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.1.0.tgz", - "integrity": "sha512-yoU4rhgPKCo+p5UrWWWNKiIq+ToGqmVVhk0PmMYBK4kRsR3/qhemNFL8f6CFmBd4gMwm3F4T7HBoydP5uY07fA==" + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.1.2.tgz", + "integrity": "sha512-hLGGBI1tw5N8qTELr3blKjAML/LY4ANxksbS612UiJyDfyf/2D092Pvm+S7pmeTGJRqvlJkFzBoHBQKgQlOQVg==" }, "node_modules/drange": { "version": "1.1.1", @@ -3359,9 +3344,9 @@ } }, "node_modules/eslint-plugin-react-refresh": { - "version": "0.4.6", - "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.6.tgz", - "integrity": "sha512-NjGXdm7zgcKRkKMua34qVO9doI7VOxZ6ancSvBELJSSoX97jyndXcSoa8XBh69JoB31dNz3EEzlMcizZl7LaMA==", + "version": "0.4.7", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.7.tgz", + "integrity": "sha512-yrj+KInFmwuQS2UQcg1SF83ha1tuHC1jMQbRNyuWtlEzzKRDgAl7L4Yp4NlDUZTZNlWvHEzOtJhMi40R7JxcSw==", "dev": true, "peerDependencies": { "eslint": ">=7" @@ -3573,6 +3558,7 @@ "version": "7.0.1", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dev": true, "dependencies": { "to-regex-range": "^5.0.1" }, @@ -3596,14 +3582,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/find-yarn-workspace-root": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/find-yarn-workspace-root/-/find-yarn-workspace-root-2.0.0.tgz", - "integrity": "sha512-1IMnbjt4KzsQfnhnzNd8wUEgXZ44IzZaZmnLYx7D5FZlaHt2gW20Cri8Q+E/t5tIj4+epTBub+2Zxu/vNILzqQ==", - "dependencies": { - "micromatch": "^4.0.2" - } - }, "node_modules/flat-cache": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", @@ -3698,24 +3676,11 @@ "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==", "optional": true }, - "node_modules/fs-extra": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", - "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", - "dependencies": { - "at-least-node": "^1.0.0", - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=10" - } - }, "node_modules/fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true }, "node_modules/fsevents": { "version": "2.3.3", @@ -3775,6 +3740,7 @@ "version": "7.2.3", "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "dev": true, "dependencies": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -3806,6 +3772,7 @@ "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -3815,6 +3782,7 @@ "version": "3.1.2", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, "dependencies": { "brace-expansion": "^1.1.7" }, @@ -3868,11 +3836,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/graceful-fs": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", - "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" - }, "node_modules/graphemer": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", @@ -3880,11 +3843,11 @@ "dev": true }, "node_modules/graphiql": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/graphiql/-/graphiql-3.2.0.tgz", - "integrity": "sha512-HHZ9j47IVUdUhdEdOkwD/U3kMGxCGZocEf9rk1aou5lInK9vJRbjlDW4BbG9CvA5fNoe7DevRr72tv0ubvjjPA==", + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/graphiql/-/graphiql-3.2.2.tgz", + "integrity": "sha512-Tpv9gz9/xfOCJq2RTU/ByPgCFkh3ftN16xmcJxNms3j7C0eJ9z7xg6J0lASGGJ6mTeIW9myEI98SJBPL1c4vcA==", "dependencies": { - "@graphiql/react": "^0.21.0", + "@graphiql/react": "^0.22.1", "@graphiql/toolkit": "^0.9.1", "graphql-language-service": "^5.2.0", "markdown-it": "^14.1.0" @@ -3922,6 +3885,7 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, "engines": { "node": ">=8" } @@ -4073,6 +4037,7 @@ "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "dev": true, "dependencies": { "once": "^1.3.0", "wrappy": "1" @@ -4128,20 +4093,6 @@ "url": "https://github.com/sponsors/wooorm" } }, - "node_modules/is-docker": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", - "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", - "bin": { - "is-docker": "cli.js" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/is-extglob": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", @@ -4176,6 +4127,7 @@ "version": "7.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, "engines": { "node": ">=0.12.0" } @@ -4208,26 +4160,11 @@ "node": ">=0.10.0" } }, - "node_modules/is-wsl": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", - "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", - "dependencies": { - "is-docker": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/isarray": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", - "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==" - }, "node_modules/isexe": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true }, "node_modules/isobject": { "version": "3.0.1", @@ -4270,48 +4207,12 @@ "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", "dev": true }, - "node_modules/json-stable-stringify": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/json-stable-stringify/-/json-stable-stringify-1.1.1.tgz", - "integrity": "sha512-SU/971Kt5qVQfJpyDveVhQ/vya+5hvrjClFOcr8c0Fq5aODJjMwutrOfCU+eCnVD5gpx1Q3fEqkyom77zH1iIg==", - "dependencies": { - "call-bind": "^1.0.5", - "isarray": "^2.0.5", - "jsonify": "^0.0.1", - "object-keys": "^1.1.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/json-stable-stringify-without-jsonify": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", "dev": true }, - "node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/jsonify": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/jsonify/-/jsonify-0.0.1.tgz", - "integrity": "sha512-2/Ki0GcmuqSrgFyelQq9M05y7PS0mEwuIzrf3f1fPqkVDVRvZrPZtVSMHxdgo8Aq0sxAOb/cr2aqqA3LeWHVPg==", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/keyv": { "version": "4.5.4", "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", @@ -4321,14 +4222,6 @@ "json-buffer": "3.0.1" } }, - "node_modules/klaw-sync": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/klaw-sync/-/klaw-sync-6.0.0.tgz", - "integrity": "sha512-nIeuVSzdCCs6TDPTqI8w1Yre34sSq7AkZ4B3sfOBbI2CgVSB4Du4aLQijFU2+lhAFCwt9+42Hel6lQNIv6AntQ==", - "dependencies": { - "graceful-fs": "^4.1.11" - } - }, "node_modules/levn": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", @@ -4409,6 +4302,7 @@ "version": "6.0.0", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "devOptional": true, "dependencies": { "yallist": "^4.0.0" }, @@ -4466,6 +4360,7 @@ "version": "4.0.5", "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "dev": true, "dependencies": { "braces": "^3.0.2", "picomatch": "^2.3.1" @@ -4535,6 +4430,7 @@ "version": "1.2.8", "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "optional": true, "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -4659,35 +4555,24 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/object-keys": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", - "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", - "engines": { - "node": ">= 0.4" - } - }, "node_modules/once": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "devOptional": true, "dependencies": { "wrappy": "1" } }, - "node_modules/open": { - "version": "7.4.2", - "resolved": "https://registry.npmjs.org/open/-/open-7.4.2.tgz", - "integrity": "sha512-MVHddDVweXZF3awtlAS+6pgKLlm/JgxZ90+/NBurBoQctVOOB/zDdVjcyPzQ+0laDGbsWgrRkflI65sQeOgT9Q==", + "node_modules/openapi-path-templating": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/openapi-path-templating/-/openapi-path-templating-1.5.1.tgz", + "integrity": "sha512-kgRHToVP571U1YzUnaZnWaUIygon2itg5g96kwaFIi8bnpsw4oXYOk7k59Ivn+ley1iQnMENe/1HSovpPVZuXA==", "dependencies": { - "is-docker": "^2.0.0", - "is-wsl": "^2.1.1" + "apg-lite": "^1.0.3" }, "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=12.20.0" } }, "node_modules/optionator": { @@ -4707,14 +4592,6 @@ "node": ">= 0.8.0" } }, - "node_modules/os-tmpdir": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", - "integrity": "sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g==", - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/p-limit": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", @@ -4774,54 +4651,6 @@ "url": "https://github.com/sponsors/wooorm" } }, - "node_modules/patch-package": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/patch-package/-/patch-package-8.0.0.tgz", - "integrity": "sha512-da8BVIhzjtgScwDJ2TtKsfT5JFWz1hYoBl9rUQ1f38MC2HwnEIkK8VN3dKMKcP7P7bvvgzNDbfNHtx3MsQb5vA==", - "dependencies": { - "@yarnpkg/lockfile": "^1.1.0", - "chalk": "^4.1.2", - "ci-info": "^3.7.0", - "cross-spawn": "^7.0.3", - "find-yarn-workspace-root": "^2.0.0", - "fs-extra": "^9.0.0", - "json-stable-stringify": "^1.0.2", - "klaw-sync": "^6.0.0", - "minimist": "^1.2.6", - "open": "^7.4.2", - "rimraf": "^2.6.3", - "semver": "^7.5.3", - "slash": "^2.0.0", - "tmp": "^0.0.33", - "yaml": "^2.2.2" - }, - "bin": { - "patch-package": "index.js" - }, - "engines": { - "node": ">=14", - "npm": ">5" - } - }, - "node_modules/patch-package/node_modules/rimraf": { - "version": "2.7.1", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz", - "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==", - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - } - }, - "node_modules/patch-package/node_modules/slash": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-2.0.0.tgz", - "integrity": "sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A==", - "engines": { - "node": ">=6" - } - }, "node_modules/path-exists": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", @@ -4835,6 +4664,7 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, "engines": { "node": ">=0.10.0" } @@ -4843,6 +4673,7 @@ "version": "3.1.1", "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, "engines": { "node": ">=8" } @@ -4866,6 +4697,7 @@ "version": "2.3.1", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, "engines": { "node": ">=8.6" }, @@ -5057,18 +4889,18 @@ ] }, "node_modules/ramda": { - "version": "0.29.1", - "resolved": "https://registry.npmjs.org/ramda/-/ramda-0.29.1.tgz", - "integrity": "sha512-OfxIeWzd4xdUNxlWhgFazxsA/nl3mS4/jGZI5n00uWOoSSFRhC1b6gl6xvmzUamgmqELraWp0J/qqVlXYPDPyA==", + "version": "0.30.0", + "resolved": "https://registry.npmjs.org/ramda/-/ramda-0.30.0.tgz", + "integrity": "sha512-13Y0iMhIQuAm/wNGBL/9HEqIfRGmNmjKnTPlKWfA9f7dnDkr8d45wQ+S7+ZLh/Pq9PdcGxkqKUEA7ySu1QSd9Q==", "funding": { "type": "opencollective", "url": "https://opencollective.com/ramda" } }, "node_modules/ramda-adjunct": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/ramda-adjunct/-/ramda-adjunct-4.1.1.tgz", - "integrity": "sha512-BnCGsZybQZMDGram9y7RiryoRHS5uwx8YeGuUeDKuZuvK38XO6JJfmK85BwRWAKFA6pZ5nZBO/HBFtExVaf31w==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/ramda-adjunct/-/ramda-adjunct-5.0.0.tgz", + "integrity": "sha512-iEehjqp/ZGjYZybZByDaDu27c+79SE7rKDcySLdmjAwKWkz6jNhvGgZwzUGaMsij8Llp9+1N1Gy0drpAq8ZSyA==", "engines": { "node": ">=0.10.3" }, @@ -5077,7 +4909,7 @@ "url": "https://opencollective.com/ramda-adjunct" }, "peerDependencies": { - "ramda": ">= 0.29.0" + "ramda": ">= 0.30.0" } }, "node_modules/randexp": { @@ -5205,6 +5037,28 @@ "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" }, + "node_modules/react-redux": { + "version": "9.1.2", + "resolved": "https://registry.npmjs.org/react-redux/-/react-redux-9.1.2.tgz", + "integrity": "sha512-0OA4dhM1W48l3uzmv6B7TXPCGmokUU4p1M44DGN2/D9a1FjVPukVjER1PcPX97jIg6aUeLq1XJo1IpfbgULn0w==", + "dependencies": { + "@types/use-sync-external-store": "^0.0.3", + "use-sync-external-store": "^1.0.0" + }, + "peerDependencies": { + "@types/react": "^18.2.25", + "react": "^18.0", + "redux": "^5.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "redux": { + "optional": true + } + } + }, "node_modules/react-remove-scroll": { "version": "2.5.5", "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.5.5.tgz", @@ -5513,6 +5367,7 @@ "version": "7.6.0", "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz", "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==", + "devOptional": true, "dependencies": { "lru-cache": "^6.0.0" }, @@ -5586,6 +5441,7 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, "dependencies": { "shebang-regex": "^3.0.0" }, @@ -5597,14 +5453,15 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, "engines": { "node": ">=8" } }, "node_modules/short-unique-id": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/short-unique-id/-/short-unique-id-5.1.1.tgz", - "integrity": "sha512-qqisAdcWLXSTNK2MKXI66ldHpTKWv+5c28TPG//8Tv9mwC2UL/J/w2EsJaPzVxVRTmoBc4KwGIuZiz58wButfA==", + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/short-unique-id/-/short-unique-id-5.2.0.tgz", + "integrity": "sha512-cMGfwNyfDZ/nzJ2k2M+ClthBIh//GlZl1JEf47Uoa9XR11bz8Pa2T2wQO4bVrRdH48LrIDWJahQziKo3MjhsWg==", "bin": { "short-unique-id": "bin/short-unique-id", "suid": "bin/short-unique-id" @@ -5761,6 +5618,7 @@ "version": "7.2.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, "dependencies": { "has-flag": "^4.0.0" }, @@ -5769,9 +5627,9 @@ } }, "node_modules/swagger-client": { - "version": "3.27.2", - "resolved": "https://registry.npmjs.org/swagger-client/-/swagger-client-3.27.2.tgz", - "integrity": "sha512-7dVtvyCXmpHXmv5xgS5DyAyxN17l75qmxN8BCNb/z3sj+kYDsxwJeJP3X6enPyxtZsMZFDMxC+EtiFbml7pS6Q==", + "version": "3.27.8", + "resolved": "https://registry.npmjs.org/swagger-client/-/swagger-client-3.27.8.tgz", + "integrity": "sha512-2wrqNHdMhgQzBM4xjxNDPvinysQ1lc9wHqMJ/HyllyzRyEKL96KyEfP8laI8G1gGbO/vmdtTDEXPFDHp1RrOHQ==", "dependencies": { "@babel/runtime-corejs3": "^7.22.15", "@swagger-api/apidom-core": ">=0.99.1 <1.0.0", @@ -5786,7 +5644,9 @@ "js-yaml": "^4.1.0", "node-abort-controller": "^3.1.1", "node-fetch-commonjs": "^3.3.2", + "openapi-path-templating": "^1.5.1", "qs": "^6.10.2", + "ramda-adjunct": "^5.0.0", "traverse": "=0.6.8" } }, @@ -5799,23 +5659,22 @@ } }, "node_modules/swagger-ui-react": { - "version": "5.17.2", - "resolved": "https://registry.npmjs.org/swagger-ui-react/-/swagger-ui-react-5.17.2.tgz", - "integrity": "sha512-jwhKQ0IdM1t77clbJ9EorL7+6B5Sr1mG+ryqSELxT5MaG4y3yOIyFbZ0Xn/EnSyRuww/V8FTK/0KIX3gf41taw==", + "version": "5.17.8", + "resolved": "https://registry.npmjs.org/swagger-ui-react/-/swagger-ui-react-5.17.8.tgz", + "integrity": "sha512-Afr/Svo1nGlmfClo+PYUUZftIS66uRtPsmLb/5AdSaJUJO9FqXpDb8lTk0MWLd3pTzNRxaxzbSMeDi+P65M0pw==", "dependencies": { - "@babel/runtime-corejs3": "^7.24.4", + "@babel/runtime-corejs3": "^7.24.5", "@braintree/sanitize-url": "=7.0.1", "base64-js": "^1.5.1", "classnames": "^2.5.1", "css.escape": "1.5.1", "deep-extend": "0.6.0", - "dompurify": "=3.1.0", + "dompurify": "=3.1.2", "ieee754": "^1.2.1", "immutable": "^3.x.x", "js-file-download": "^0.4.12", "js-yaml": "=4.1.0", "lodash": "^4.17.21", - "patch-package": "^8.0.0", "prop-types": "^15.8.1", "randexp": "^0.5.3", "randombytes": "^2.1.0", @@ -5824,7 +5683,7 @@ "react-immutable-proptypes": "2.2.0", "react-immutable-pure-component": "^2.2.0", "react-inspector": "^6.0.1", - "react-redux": "^9.1.1", + "react-redux": "^9.1.2", "react-syntax-highlighter": "^15.5.0", "redux": "^5.0.1", "redux-immutable": "^4.0.0", @@ -5832,7 +5691,7 @@ "reselect": "^5.1.0", "serialize-error": "^8.1.0", "sha.js": "^2.4.11", - "swagger-client": "^3.27.2", + "swagger-client": "^3.27.8", "url-parse": "^1.5.10", "xml": "=1.0.1", "xml-but-prettier": "^1.0.1", @@ -5843,32 +5702,6 @@ "react-dom": ">=16.8.0 <19" } }, - "node_modules/swagger-ui-react/node_modules/react-redux": { - "version": "9.1.1", - "resolved": "https://registry.npmjs.org/react-redux/-/react-redux-9.1.1.tgz", - "integrity": "sha512-5ynfGDzxxsoV73+4czQM56qF43vsmgJsO22rmAvU5tZT2z5Xow/A2uhhxwXuGTxgdReF3zcp7A80gma2onRs1A==", - "dependencies": { - "@types/use-sync-external-store": "^0.0.3", - "use-sync-external-store": "^1.0.0" - }, - "peerDependencies": { - "@types/react": "^18.2.25", - "react": "^18.0", - "react-native": ">=0.69", - "redux": "^5.0.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "react-native": { - "optional": true - }, - "redux": { - "optional": true - } - } - }, "node_modules/tar-fs": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.1.tgz", @@ -5903,21 +5736,11 @@ "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", "dev": true }, - "node_modules/tmp": { - "version": "0.0.33", - "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", - "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", - "dependencies": { - "os-tmpdir": "~1.0.2" - }, - "engines": { - "node": ">=0.6.0" - } - }, "node_modules/to-regex-range": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, "dependencies": { "is-number": "^7.0.0" }, @@ -6060,14 +5883,6 @@ "resolved": "https://registry.npmjs.org/uc.micro/-/uc.micro-2.1.0.tgz", "integrity": "sha512-ARDJmphmdvUk6Glw7y9DQ2bFkKBHwQHLi2lsaH6PPmz/Ka9sFOBsBluozhDltWmnv9u/cF6Rt87znRTPV+yp/A==" }, - "node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "engines": { - "node": ">= 10.0.0" - } - }, "node_modules/unraw": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/unraw/-/unraw-3.0.0.tgz", @@ -6147,9 +5962,9 @@ "optional": true }, "node_modules/vite": { - "version": "5.2.10", - "resolved": "https://registry.npmjs.org/vite/-/vite-5.2.10.tgz", - "integrity": "sha512-PAzgUZbP7msvQvqdSD+ErD5qGnSFiGOoWmV5yAKUEI0kdhjbH6nMWVyZQC/hSc4aXwc0oJ9aEdIiF9Oje0JFCw==", + "version": "5.2.11", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.2.11.tgz", + "integrity": "sha512-HndV31LWW05i1BLPMUCE1B9E9GFbOu1MbenhS58FuK6owSO5qHm7GiCotrNY1YE5rMeQSFBGmT5ZaLEjFizgiQ==", "dev": true, "dependencies": { "esbuild": "^0.20.1", @@ -6230,6 +6045,7 @@ "version": "2.0.2", "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, "dependencies": { "isexe": "^2.0.0" }, @@ -6252,7 +6068,8 @@ "node_modules/wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "devOptional": true }, "node_modules/xml": { "version": "1.0.1", @@ -6278,18 +6095,8 @@ "node_modules/yallist": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" - }, - "node_modules/yaml": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.4.2.tgz", - "integrity": "sha512-B3VqDZ+JAg1nZpaEmWtTXUlBneoGx6CPM9b0TENK6aoSu5t73dItudwdgmi6tHlIZZId4dZ9skcAQ2UbcyAeVA==", - "bin": { - "yaml": "bin.mjs" - }, - "engines": { - "node": ">= 14" - } + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "devOptional": true }, "node_modules/yocto-queue": { "version": "0.1.0", diff --git a/playground/package.json b/playground/package.json index 845369d2c6..1b9ddf71c7 100644 --- a/playground/package.json +++ b/playground/package.json @@ -10,14 +10,14 @@ "preview": "vite preview" }, "dependencies": { - "graphiql": "^3.2.0", + "graphiql": "^3.2.2", "graphql": "^16.8.1", "react": "^18.3.1", "react-dom": "^18.3.1", - "swagger-ui-react": "^5.17.2" + "swagger-ui-react": "^5.17.8" }, "devDependencies": { - "@types/react": "^18.3.1", + "@types/react": "^18.3.2", "@types/react-dom": "^18.3.0", "@types/swagger-ui-react": "^4.18.3", "@typescript-eslint/eslint-plugin": "^7.7.1", @@ -25,8 +25,8 @@ "@vitejs/plugin-react-swc": "^3.6.0", "eslint": "^8.57.0", "eslint-plugin-react-hooks": "^4.6.2", - "eslint-plugin-react-refresh": "^0.4.6", + "eslint-plugin-react-refresh": "^0.4.7", "typescript": "^5.4.5", - "vite": "^5.2.10" + "vite": "^5.2.11" } } From 530f21e9f584c0dbb90c79a09ac28b43b7d1a0ae Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Tue, 14 May 2024 10:00:45 -0700 Subject: [PATCH 08/78] refactor: Reorganize global CLI flags (#2615) ## Relevant issue(s) Resolves #2614 Resolves #2582 ## Description This PR moves the global CLI flags that only apply to the start command. It also fixes the log color config issue above. ## Tasks - [x] I made sure the code is well commented, particularly hard-to-understand areas. - [x] I made sure the repository-held documentation is changed accordingly. - [x] I made sure the pull request title adheres to the conventional commit style (the subset used in the project can be found in [tools/configs/chglog/config.yml](tools/configs/chglog/config.yml)). - [x] I made sure to discuss its limitations such as threats to validity, vulnerability to mistake and misuse, robustness to invalidation of assumptions, resource requirements, ... ## How has this been tested? `make test` Specify the platform(s) on which this was tested: - MacOS --- cli/config.go | 73 +++++----- cli/config_test.go | 8 +- cli/root.go | 134 +++++------------- cli/start.go | 56 +++++++- cli/utils.go | 3 +- docs/cli/defradb.md | 37 ++--- docs/cli/defradb_client.md | 35 ++--- docs/cli/defradb_client_acp.md | 39 ++--- docs/cli/defradb_client_acp_policy.md | 39 ++--- docs/cli/defradb_client_acp_policy_add.md | 39 ++--- docs/cli/defradb_client_backup.md | 39 ++--- docs/cli/defradb_client_backup_export.md | 39 ++--- docs/cli/defradb_client_backup_import.md | 39 ++--- docs/cli/defradb_client_collection.md | 35 ++--- docs/cli/defradb_client_collection_create.md | 47 +++--- docs/cli/defradb_client_collection_delete.md | 47 +++--- .../cli/defradb_client_collection_describe.md | 39 ++--- docs/cli/defradb_client_collection_docIDs.md | 47 +++--- docs/cli/defradb_client_collection_get.md | 47 +++--- docs/cli/defradb_client_collection_patch.md | 47 +++--- docs/cli/defradb_client_collection_update.md | 47 +++--- docs/cli/defradb_client_dump.md | 39 ++--- docs/cli/defradb_client_index.md | 39 ++--- docs/cli/defradb_client_index_create.md | 39 ++--- docs/cli/defradb_client_index_drop.md | 39 ++--- docs/cli/defradb_client_index_list.md | 39 ++--- docs/cli/defradb_client_p2p.md | 39 ++--- docs/cli/defradb_client_p2p_collection.md | 39 ++--- docs/cli/defradb_client_p2p_collection_add.md | 39 ++--- .../defradb_client_p2p_collection_getall.md | 39 ++--- .../defradb_client_p2p_collection_remove.md | 39 ++--- docs/cli/defradb_client_p2p_info.md | 39 ++--- docs/cli/defradb_client_p2p_replicator.md | 39 ++--- .../defradb_client_p2p_replicator_delete.md | 39 ++--- .../defradb_client_p2p_replicator_getall.md | 39 ++--- docs/cli/defradb_client_p2p_replicator_set.md | 39 ++--- docs/cli/defradb_client_query.md | 39 ++--- docs/cli/defradb_client_schema.md | 39 ++--- docs/cli/defradb_client_schema_add.md | 39 ++--- docs/cli/defradb_client_schema_describe.md | 39 ++--- docs/cli/defradb_client_schema_migration.md | 39 ++--- .../defradb_client_schema_migration_down.md | 39 ++--- .../defradb_client_schema_migration_reload.md | 39 ++--- ...db_client_schema_migration_set-registry.md | 39 ++--- .../defradb_client_schema_migration_set.md | 39 ++--- .../cli/defradb_client_schema_migration_up.md | 39 ++--- docs/cli/defradb_client_schema_patch.md | 39 ++--- docs/cli/defradb_client_schema_set-active.md | 39 ++--- docs/cli/defradb_client_tx.md | 39 ++--- docs/cli/defradb_client_tx_commit.md | 39 ++--- docs/cli/defradb_client_tx_create.md | 39 ++--- docs/cli/defradb_client_tx_discard.md | 39 ++--- docs/cli/defradb_client_view.md | 39 ++--- docs/cli/defradb_client_view_add.md | 39 ++--- docs/cli/defradb_keyring.md | 35 ++--- docs/cli/defradb_keyring_export.md | 35 ++--- docs/cli/defradb_keyring_generate.md | 35 ++--- docs/cli/defradb_keyring_import.md | 35 ++--- docs/cli/defradb_server-dump.md | 35 ++--- docs/cli/defradb_start.md | 38 ++--- docs/cli/defradb_version.md | 35 ++--- docs/config.md | 2 +- 62 files changed, 988 insertions(+), 1485 deletions(-) diff --git a/cli/config.go b/cli/config.go index fe2a84ca2d..089a1b074f 100644 --- a/cli/config.go +++ b/cli/config.go @@ -11,6 +11,7 @@ package cli import ( + "errors" "os" "path/filepath" "strings" @@ -39,29 +40,29 @@ var configPaths = []string{ "keyring.path", } -// configFlags is a mapping of config keys to cli flags to bind to. +// configFlags is a mapping of cli flag names to config keys to bind. var configFlags = map[string]string{ - "log.level": "log-level", - "log.output": "log-output", - "log.format": "log-format", - "log.stacktrace": "log-stacktrace", - "log.source": "log-source", - "log.overrides": "log-overrides", - "log.nocolor": "log-no-color", - "api.address": "url", - "datastore.maxtxnretries": "max-txn-retries", - "datastore.store": "store", - "datastore.badger.valuelogfilesize": "valuelogfilesize", - "net.peers": "peers", - "net.p2paddresses": "p2paddr", - "net.p2pdisabled": "no-p2p", - "api.allowed-origins": "allowed-origins", - "api.pubkeypath": "pubkeypath", - "api.privkeypath": "privkeypath", - "keyring.namespace": "keyring-namespace", - "keyring.backend": "keyring-backend", - "keyring.path": "keyring-path", - "keyring.disabled": "no-keyring", + "log-level": "log.level", + "log-output": "log.output", + "log-format": "log.format", + "log-stacktrace": "log.stacktrace", + "log-source": "log.source", + "log-overrides": "log.overrides", + "no-log-color": "log.colordisabled", + "url": "api.address", + "max-txn-retries": "datastore.maxtxnretries", + "store": "datastore.store", + "valuelogfilesize": "datastore.badger.valuelogfilesize", + "peers": "net.peers", + "p2paddr": "net.p2paddresses", + "no-p2p": "net.p2pdisabled", + "allowed-origins": "api.allowed-origins", + "pubkeypath": "api.pubkeypath", + "privkeypath": "api.privkeypath", + "keyring-namespace": "keyring.namespace", + "keyring-backend": "keyring.backend", + "keyring-path": "keyring.path", + "no-keyring": "keyring.disabled", } // defaultConfig returns a new config with default values. @@ -84,11 +85,11 @@ func defaultConfig() *viper.Viper { } // createConfig writes the default config file if one does not exist. -func createConfig(rootdir string, flags *pflag.FlagSet) error { +func createConfig(rootdir string) error { cfg := defaultConfig() cfg.AddConfigPath(rootdir) - if err := bindConfigFlags(cfg, flags); err != nil { + if err := bindConfigFlags(cfg); err != nil { return err } // make sure rootdir exists @@ -106,7 +107,7 @@ func createConfig(rootdir string, flags *pflag.FlagSet) error { } // loadConfig returns a new config with values from the config in the given rootdir. -func loadConfig(rootdir string, flags *pflag.FlagSet) (*viper.Viper, error) { +func loadConfig(rootdir string) (*viper.Viper, error) { cfg := defaultConfig() cfg.AddConfigPath(rootdir) @@ -119,7 +120,7 @@ func loadConfig(rootdir string, flags *pflag.FlagSet) (*viper.Viper, error) { return nil, err } // bind cli flags to config keys - if err := bindConfigFlags(cfg, flags); err != nil { + if err := bindConfigFlags(cfg); err != nil { return nil, err } @@ -131,13 +132,14 @@ func loadConfig(rootdir string, flags *pflag.FlagSet) (*viper.Viper, error) { } } - // set default logging config + // set logging config corelog.SetConfig(corelog.Config{ Level: cfg.GetString("log.level"), Format: cfg.GetString("log.format"), Output: cfg.GetString("log.output"), EnableStackTrace: cfg.GetBool("log.stacktrace"), EnableSource: cfg.GetBool("log.source"), + DisableColor: cfg.GetBool("log.colordisabled"), }) // set logging config overrides @@ -147,12 +149,13 @@ func loadConfig(rootdir string, flags *pflag.FlagSet) (*viper.Viper, error) { } // bindConfigFlags binds the set of cli flags to config values. -func bindConfigFlags(cfg *viper.Viper, flags *pflag.FlagSet) error { - for key, flag := range configFlags { - err := cfg.BindPFlag(key, flags.Lookup(flag)) - if err != nil { - return err - } - } - return nil +func bindConfigFlags(cfg *viper.Viper) error { + var errs []error + rootFlags.VisitAll(func(f *pflag.Flag) { + errs = append(errs, cfg.BindPFlag(configFlags[f.Name], f)) + }) + startFlags.VisitAll(func(f *pflag.Flag) { + errs = append(errs, cfg.BindPFlag(configFlags[f.Name], f)) + }) + return errors.Join(errs...) } diff --git a/cli/config_test.go b/cli/config_test.go index 3cbd9aa687..8cc5f62bdb 100644 --- a/cli/config_test.go +++ b/cli/config_test.go @@ -20,11 +20,11 @@ import ( func TestCreateConfig(t *testing.T) { rootdir := t.TempDir() - err := createConfig(rootdir, NewDefraCommand().PersistentFlags()) + err := createConfig(rootdir) require.NoError(t, err) // ensure no errors when config already exists - err = createConfig(rootdir, NewDefraCommand().PersistentFlags()) + err = createConfig(rootdir) require.NoError(t, err) assert.FileExists(t, filepath.Join(rootdir, "config.yaml")) @@ -32,7 +32,7 @@ func TestCreateConfig(t *testing.T) { func TestLoadConfigNotExist(t *testing.T) { rootdir := t.TempDir() - cfg, err := loadConfig(rootdir, NewDefraCommand().PersistentFlags()) + cfg, err := loadConfig(rootdir) require.NoError(t, err) assert.Equal(t, 5, cfg.GetInt("datastore.maxtxnretries")) @@ -57,7 +57,7 @@ func TestLoadConfigNotExist(t *testing.T) { assert.Equal(t, false, cfg.GetBool("log.stacktrace")) assert.Equal(t, false, cfg.GetBool("log.source")) assert.Equal(t, "", cfg.GetString("log.overrides")) - assert.Equal(t, false, cfg.GetBool("log.nocolor")) + assert.Equal(t, false, cfg.GetBool("log.colordisabled")) assert.Equal(t, filepath.Join(rootdir, "keys"), cfg.GetString("keyring.path")) assert.Equal(t, false, cfg.GetBool("keyring.disabled")) diff --git a/cli/root.go b/cli/root.go index a974628eed..37488f9549 100644 --- a/cli/root.go +++ b/cli/root.go @@ -12,156 +12,98 @@ package cli import ( "github.com/spf13/cobra" + "github.com/spf13/pflag" ) -func MakeRootCommand() *cobra.Command { - var cmd = &cobra.Command{ - SilenceUsage: true, - Use: "defradb", - Short: "DefraDB Edge Database", - Long: `DefraDB is the edge database to power the user-centric future. +// rootFlags is a set of persistent flags that are bound to config values. +var rootFlags = pflag.NewFlagSet("root", pflag.ContinueOnError) -Start a DefraDB node, interact with a local or remote node, and much more. -`, - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if err := setContextRootDir(cmd); err != nil { - return err - } - return setContextConfig(cmd) - }, - } - - cmd.PersistentFlags().String( +func init() { + rootFlags.String( "rootdir", "", "Directory for persistent data (default: $HOME/.defradb)", ) - - cmd.PersistentFlags().String( + rootFlags.String( "log-level", "info", "Log level to use. Options are debug, info, error, fatal", ) - - cmd.PersistentFlags().String( + rootFlags.String( "log-output", "stderr", "Log output path. Options are stderr or stdout.", ) - - cmd.PersistentFlags().String( + rootFlags.String( "log-format", "text", "Log format to use. Options are text or json", ) - - cmd.PersistentFlags().Bool( + rootFlags.Bool( "log-stacktrace", false, "Include stacktrace in error and fatal logs", ) - - cmd.PersistentFlags().Bool( + rootFlags.Bool( "log-source", false, "Include source location in logs", ) - - cmd.PersistentFlags().String( + rootFlags.String( "log-overrides", "", "Logger config overrides. Format ,=,...;,...", ) - - cmd.PersistentFlags().Bool( - "log-no-color", + rootFlags.Bool( + "no-log-color", false, "Disable colored log output", ) - - cmd.PersistentFlags().String( + rootFlags.String( "url", "127.0.0.1:9181", "URL of HTTP endpoint to listen on or connect to", ) - - cmd.PersistentFlags().StringArray( - "peers", - []string{}, - "List of peers to connect to", - ) - - cmd.PersistentFlags().Int( - "max-txn-retries", - 5, - "Specify the maximum number of retries per transaction", - ) - - cmd.PersistentFlags().String( - "store", - "badger", - "Specify the datastore to use (supported: badger, memory)", - ) - - cmd.PersistentFlags().Int( - "valuelogfilesize", - 1<<30, - "Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize", - ) - - cmd.PersistentFlags().StringSlice( - "p2paddr", - []string{"/ip4/127.0.0.1/tcp/9171"}, - "Listen addresses for the p2p network (formatted as a libp2p MultiAddr)", - ) - - cmd.PersistentFlags().Bool( - "no-p2p", - false, - "Disable the peer-to-peer network synchronization system", - ) - - cmd.PersistentFlags().StringArray( - "allowed-origins", - []string{}, - "List of origins to allow for CORS requests", - ) - - cmd.PersistentFlags().String( - "pubkeypath", - "", - "Path to the public key for tls", - ) - - cmd.PersistentFlags().String( - "privkeypath", - "", - "Path to the private key for tls", - ) - - cmd.PersistentFlags().String( + rootFlags.String( "keyring-namespace", "defradb", "Service name to use when using the system backend", ) - - cmd.PersistentFlags().String( + rootFlags.String( "keyring-backend", "file", "Keyring backend to use. Options are file or system", ) - - cmd.PersistentFlags().String( + rootFlags.String( "keyring-path", "keys", "Path to store encrypted keys when using the file backend", ) - - cmd.PersistentFlags().Bool( + rootFlags.Bool( "no-keyring", false, "Disable the keyring and generate ephemeral keys", ) +} + +func MakeRootCommand() *cobra.Command { + var cmd = &cobra.Command{ + SilenceUsage: true, + Use: "defradb", + Short: "DefraDB Edge Database", + Long: `DefraDB is the edge database to power the user-centric future. + +Start a DefraDB node, interact with a local or remote node, and much more. +`, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if err := setContextRootDir(cmd); err != nil { + return err + } + return setContextConfig(cmd) + }, + } + + cmd.PersistentFlags().AddFlagSet(rootFlags) return cmd } diff --git a/cli/start.go b/cli/start.go index a3d986a0a9..c371475064 100644 --- a/cli/start.go +++ b/cli/start.go @@ -18,6 +18,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/spf13/cobra" + "github.com/spf13/pflag" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/http" @@ -28,6 +29,57 @@ import ( "github.com/sourcenetwork/defradb/node" ) +// startFlags is a set of persistent flags that are bound to config values. +var startFlags = pflag.NewFlagSet("start", pflag.ContinueOnError) + +func init() { + startFlags.StringArray( + "peers", + []string{}, + "List of peers to connect to", + ) + startFlags.Int( + "max-txn-retries", + 5, + "Specify the maximum number of retries per transaction", + ) + startFlags.String( + "store", + "badger", + "Specify the datastore to use (supported: badger, memory)", + ) + startFlags.Int( + "valuelogfilesize", + 1<<30, + "Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize", + ) + startFlags.StringSlice( + "p2paddr", + []string{"/ip4/127.0.0.1/tcp/9171"}, + "Listen addresses for the p2p network (formatted as a libp2p MultiAddr)", + ) + startFlags.Bool( + "no-p2p", + false, + "Disable the peer-to-peer network synchronization system", + ) + startFlags.StringArray( + "allowed-origins", + []string{}, + "List of origins to allow for CORS requests", + ) + startFlags.String( + "pubkeypath", + "", + "Path to the public key for tls", + ) + startFlags.String( + "privkeypath", + "", + "Path to the private key for tls", + ) +} + func MakeStartCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "start", @@ -39,7 +91,7 @@ func MakeStartCommand() *cobra.Command { return err } rootdir := mustGetContextRootDir(cmd) - if err := createConfig(rootdir, cmd.Root().PersistentFlags()); err != nil { + if err := createConfig(rootdir); err != nil { return err } return setContextConfig(cmd) @@ -149,5 +201,7 @@ func MakeStartCommand() *cobra.Command { }, } + cmd.PersistentFlags().AddFlagSet(startFlags) + return cmd } diff --git a/cli/utils.go b/cli/utils.go index 97b1f144ff..e85dd09e2c 100644 --- a/cli/utils.go +++ b/cli/utils.go @@ -114,8 +114,7 @@ func setContextDB(cmd *cobra.Command) error { // setContextConfig sets teh config for the current command context. func setContextConfig(cmd *cobra.Command) error { rootdir := mustGetContextRootDir(cmd) - flags := cmd.Root().PersistentFlags() - cfg, err := loadConfig(rootdir, flags) + cfg, err := loadConfig(rootdir) if err != nil { return err } diff --git a/docs/cli/defradb.md b/docs/cli/defradb.md index 8c26dd86e3..3edc08b8d9 100644 --- a/docs/cli/defradb.md +++ b/docs/cli/defradb.md @@ -12,29 +12,20 @@ Start a DefraDB node, interact with a local or remote node, and much more. ### Options ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -h, --help help for defradb - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -h, --help help for defradb + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client.md b/docs/cli/defradb_client.md index c2a8e31972..73c029153f 100644 --- a/docs/cli/defradb_client.md +++ b/docs/cli/defradb_client.md @@ -18,28 +18,19 @@ Execute queries, add schema types, obtain node info, etc. ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_acp.md b/docs/cli/defradb_client_acp.md index a81ddea70c..b8d92ee2bd 100644 --- a/docs/cli/defradb_client_acp.md +++ b/docs/cli/defradb_client_acp.md @@ -19,30 +19,21 @@ Learn more about [ACP](/acp/README.md) ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_acp_policy.md b/docs/cli/defradb_client_acp_policy.md index 0ae3eb70c6..3330a7e3dc 100644 --- a/docs/cli/defradb_client_acp_policy.md +++ b/docs/cli/defradb_client_acp_policy.md @@ -15,30 +15,21 @@ Interact with the acp policy features of DefraDB instance ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_acp_policy_add.md b/docs/cli/defradb_client_acp_policy_add.md index 00a78f7b49..6b1e1865f6 100644 --- a/docs/cli/defradb_client_acp_policy_add.md +++ b/docs/cli/defradb_client_acp_policy_add.md @@ -63,30 +63,21 @@ defradb client acp policy add [-i --identity] [policy] [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_backup.md b/docs/cli/defradb_client_backup.md index bf879df080..a75f1edd43 100644 --- a/docs/cli/defradb_client_backup.md +++ b/docs/cli/defradb_client_backup.md @@ -16,30 +16,21 @@ Currently only supports JSON format. ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_backup_export.md b/docs/cli/defradb_client_backup_export.md index b922576e60..5a4aa314cf 100644 --- a/docs/cli/defradb_client_backup_export.md +++ b/docs/cli/defradb_client_backup_export.md @@ -30,30 +30,21 @@ defradb client backup export [-c --collections | -p --pretty | -f --format] ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_backup_import.md b/docs/cli/defradb_client_backup_import.md index df62f3575a..57326115d2 100644 --- a/docs/cli/defradb_client_backup_import.md +++ b/docs/cli/defradb_client_backup_import.md @@ -22,30 +22,21 @@ defradb client backup import [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_collection.md b/docs/cli/defradb_client_collection.md index fa0eda42e8..24242d0966 100644 --- a/docs/cli/defradb_client_collection.md +++ b/docs/cli/defradb_client_collection.md @@ -21,28 +21,19 @@ Create, read, update, and delete documents within a collection. ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_collection_create.md b/docs/cli/defradb_client_collection_create.md index 165dc72b54..c44c99e6c0 100644 --- a/docs/cli/defradb_client_collection_create.md +++ b/docs/cli/defradb_client_collection_create.md @@ -36,34 +36,25 @@ defradb client collection create [-i --identity] [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - --get-inactive Get inactive collections as well as active - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --name string Collection name - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --schema string Collection schema Root - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) - --version string Collection version ID + --get-inactive Get inactive collections as well as active + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --name string Collection name + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --schema string Collection schema Root + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --version string Collection version ID ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_collection_delete.md b/docs/cli/defradb_client_collection_delete.md index c3b2f8cd41..93fd056f76 100644 --- a/docs/cli/defradb_client_collection_delete.md +++ b/docs/cli/defradb_client_collection_delete.md @@ -31,34 +31,25 @@ defradb client collection delete [-i --identity] [--filter --docID ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --name string Collection name - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --schema string Collection schema Root - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) - --version string Collection version ID + --get-inactive Get inactive collections as well as active + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --name string Collection name + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --schema string Collection schema Root + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --version string Collection version ID ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_collection_describe.md b/docs/cli/defradb_client_collection_describe.md index de68d77a1c..4eddd04c49 100644 --- a/docs/cli/defradb_client_collection_describe.md +++ b/docs/cli/defradb_client_collection_describe.md @@ -36,30 +36,21 @@ defradb client collection describe [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_collection_docIDs.md b/docs/cli/defradb_client_collection_docIDs.md index b3b0a556c0..9ca929b594 100644 --- a/docs/cli/defradb_client_collection_docIDs.md +++ b/docs/cli/defradb_client_collection_docIDs.md @@ -26,34 +26,25 @@ defradb client collection docIDs [-i --identity] [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - --get-inactive Get inactive collections as well as active - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --name string Collection name - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --schema string Collection schema Root - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) - --version string Collection version ID + --get-inactive Get inactive collections as well as active + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --name string Collection name + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --schema string Collection schema Root + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --version string Collection version ID ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_collection_get.md b/docs/cli/defradb_client_collection_get.md index ce561ca990..1261f9103a 100644 --- a/docs/cli/defradb_client_collection_get.md +++ b/docs/cli/defradb_client_collection_get.md @@ -27,34 +27,25 @@ defradb client collection get [-i --identity] [--show-deleted] [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - --get-inactive Get inactive collections as well as active - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --name string Collection name - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --schema string Collection schema Root - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) - --version string Collection version ID + --get-inactive Get inactive collections as well as active + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --name string Collection name + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --schema string Collection schema Root + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --version string Collection version ID ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_collection_patch.md b/docs/cli/defradb_client_collection_patch.md index fba7cdcf0a..13596cf36b 100644 --- a/docs/cli/defradb_client_collection_patch.md +++ b/docs/cli/defradb_client_collection_patch.md @@ -33,34 +33,25 @@ defradb client collection patch [patch] [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - --get-inactive Get inactive collections as well as active - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --name string Collection name - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --schema string Collection schema Root - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) - --version string Collection version ID + --get-inactive Get inactive collections as well as active + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --name string Collection name + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --schema string Collection schema Root + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --version string Collection version ID ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_collection_update.md b/docs/cli/defradb_client_collection_update.md index cd906be969..b23575bbe7 100644 --- a/docs/cli/defradb_client_collection_update.md +++ b/docs/cli/defradb_client_collection_update.md @@ -38,34 +38,25 @@ defradb client collection update [-i --identity] [--filter --docID ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --name string Collection name - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --schema string Collection schema Root - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) - --version string Collection version ID + --get-inactive Get inactive collections as well as active + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --name string Collection name + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --schema string Collection schema Root + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") + --version string Collection version ID ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_dump.md b/docs/cli/defradb_client_dump.md index e608da7f10..ca831313cd 100644 --- a/docs/cli/defradb_client_dump.md +++ b/docs/cli/defradb_client_dump.md @@ -15,30 +15,21 @@ defradb client dump [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_index.md b/docs/cli/defradb_client_index.md index 5c5031d44f..1f9cfafd79 100644 --- a/docs/cli/defradb_client_index.md +++ b/docs/cli/defradb_client_index.md @@ -15,30 +15,21 @@ Manage (create, drop, or list) collection indexes on a DefraDB node. ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_index_create.md b/docs/cli/defradb_client_index_create.md index c361ccf684..331429b6f9 100644 --- a/docs/cli/defradb_client_index_create.md +++ b/docs/cli/defradb_client_index_create.md @@ -32,30 +32,21 @@ defradb client index create -c --collection --fields [-n - ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_index_drop.md b/docs/cli/defradb_client_index_drop.md index 68ac4701ed..40a2bf4067 100644 --- a/docs/cli/defradb_client_index_drop.md +++ b/docs/cli/defradb_client_index_drop.md @@ -24,30 +24,21 @@ defradb client index drop -c --collection -n --name [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_index_list.md b/docs/cli/defradb_client_index_list.md index bec4c6a005..dbae25798a 100644 --- a/docs/cli/defradb_client_index_list.md +++ b/docs/cli/defradb_client_index_list.md @@ -26,30 +26,21 @@ defradb client index list [-c --collection ] [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_p2p.md b/docs/cli/defradb_client_p2p.md index 23dae543e3..4801449245 100644 --- a/docs/cli/defradb_client_p2p.md +++ b/docs/cli/defradb_client_p2p.md @@ -15,30 +15,21 @@ Interact with the DefraDB P2P system ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_p2p_collection.md b/docs/cli/defradb_client_p2p_collection.md index 318c259548..0882eb93bc 100644 --- a/docs/cli/defradb_client_p2p_collection.md +++ b/docs/cli/defradb_client_p2p_collection.md @@ -16,30 +16,21 @@ The selected collections synchronize their events on the pubsub network. ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_p2p_collection_add.md b/docs/cli/defradb_client_p2p_collection_add.md index 149b69c5a8..72258df075 100644 --- a/docs/cli/defradb_client_p2p_collection_add.md +++ b/docs/cli/defradb_client_p2p_collection_add.md @@ -27,30 +27,21 @@ defradb client p2p collection add [collectionIDs] [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_p2p_collection_getall.md b/docs/cli/defradb_client_p2p_collection_getall.md index 1145bfac75..9e181e41d9 100644 --- a/docs/cli/defradb_client_p2p_collection_getall.md +++ b/docs/cli/defradb_client_p2p_collection_getall.md @@ -20,30 +20,21 @@ defradb client p2p collection getall [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_p2p_collection_remove.md b/docs/cli/defradb_client_p2p_collection_remove.md index bab6859137..c08ef717ed 100644 --- a/docs/cli/defradb_client_p2p_collection_remove.md +++ b/docs/cli/defradb_client_p2p_collection_remove.md @@ -27,30 +27,21 @@ defradb client p2p collection remove [collectionIDs] [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_p2p_info.md b/docs/cli/defradb_client_p2p_info.md index b1ebf90922..55c55e24ab 100644 --- a/docs/cli/defradb_client_p2p_info.md +++ b/docs/cli/defradb_client_p2p_info.md @@ -19,30 +19,21 @@ defradb client p2p info [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_p2p_replicator.md b/docs/cli/defradb_client_p2p_replicator.md index e88e16d487..cfa84ea2f3 100644 --- a/docs/cli/defradb_client_p2p_replicator.md +++ b/docs/cli/defradb_client_p2p_replicator.md @@ -16,30 +16,21 @@ A replicator replicates one or all collection(s) from one node to another. ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_p2p_replicator_delete.md b/docs/cli/defradb_client_p2p_replicator_delete.md index 07c4f4eb80..0ee748feeb 100644 --- a/docs/cli/defradb_client_p2p_replicator_delete.md +++ b/docs/cli/defradb_client_p2p_replicator_delete.md @@ -25,30 +25,21 @@ defradb client p2p replicator delete [-c, --collection] [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_p2p_replicator_getall.md b/docs/cli/defradb_client_p2p_replicator_getall.md index 39d41ac3d7..470f692bfc 100644 --- a/docs/cli/defradb_client_p2p_replicator_getall.md +++ b/docs/cli/defradb_client_p2p_replicator_getall.md @@ -24,30 +24,21 @@ defradb client p2p replicator getall [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_p2p_replicator_set.md b/docs/cli/defradb_client_p2p_replicator_set.md index 4d8cb7515a..52917a7150 100644 --- a/docs/cli/defradb_client_p2p_replicator_set.md +++ b/docs/cli/defradb_client_p2p_replicator_set.md @@ -25,30 +25,21 @@ defradb client p2p replicator set [-c, --collection] [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_query.md b/docs/cli/defradb_client_query.md index f5e0035273..2dcea07526 100644 --- a/docs/cli/defradb_client_query.md +++ b/docs/cli/defradb_client_query.md @@ -37,30 +37,21 @@ defradb client query [-i --identity] [request] [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_schema.md b/docs/cli/defradb_client_schema.md index 8f5a0896e3..c69bdaee8f 100644 --- a/docs/cli/defradb_client_schema.md +++ b/docs/cli/defradb_client_schema.md @@ -15,30 +15,21 @@ Make changes, updates, or look for existing schema types. ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_schema_add.md b/docs/cli/defradb_client_schema_add.md index bf06821cdb..9e70bb1f17 100644 --- a/docs/cli/defradb_client_schema_add.md +++ b/docs/cli/defradb_client_schema_add.md @@ -36,30 +36,21 @@ defradb client schema add [schema] [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_schema_describe.md b/docs/cli/defradb_client_schema_describe.md index 8048411b57..06cb28479d 100644 --- a/docs/cli/defradb_client_schema_describe.md +++ b/docs/cli/defradb_client_schema_describe.md @@ -35,30 +35,21 @@ defradb client schema describe [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_schema_migration.md b/docs/cli/defradb_client_schema_migration.md index 5e815aaff0..a7d492d535 100644 --- a/docs/cli/defradb_client_schema_migration.md +++ b/docs/cli/defradb_client_schema_migration.md @@ -15,30 +15,21 @@ Make set or look for existing schema migrations on a DefraDB node. ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_schema_migration_down.md b/docs/cli/defradb_client_schema_migration_down.md index a64c00a0f5..c457a61992 100644 --- a/docs/cli/defradb_client_schema_migration_down.md +++ b/docs/cli/defradb_client_schema_migration_down.md @@ -32,30 +32,21 @@ defradb client schema migration down --collection [fl ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_schema_migration_reload.md b/docs/cli/defradb_client_schema_migration_reload.md index 1ef1213f56..e4d1b77cdf 100644 --- a/docs/cli/defradb_client_schema_migration_reload.md +++ b/docs/cli/defradb_client_schema_migration_reload.md @@ -19,30 +19,21 @@ defradb client schema migration reload [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_schema_migration_set-registry.md b/docs/cli/defradb_client_schema_migration_set-registry.md index cd5325457d..f920a37216 100644 --- a/docs/cli/defradb_client_schema_migration_set-registry.md +++ b/docs/cli/defradb_client_schema_migration_set-registry.md @@ -25,30 +25,21 @@ defradb client schema migration set-registry [collectionID] [cfg] [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_schema_migration_set.md b/docs/cli/defradb_client_schema_migration_set.md index c0353f9622..66062fe4e8 100644 --- a/docs/cli/defradb_client_schema_migration_set.md +++ b/docs/cli/defradb_client_schema_migration_set.md @@ -32,30 +32,21 @@ defradb client schema migration set [src] [dst] [cfg] [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_schema_migration_up.md b/docs/cli/defradb_client_schema_migration_up.md index 3ce2862447..5174bf5ce0 100644 --- a/docs/cli/defradb_client_schema_migration_up.md +++ b/docs/cli/defradb_client_schema_migration_up.md @@ -32,30 +32,21 @@ defradb client schema migration up --collection [flag ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_schema_patch.md b/docs/cli/defradb_client_schema_patch.md index cae05bd26b..6c884d0a0f 100644 --- a/docs/cli/defradb_client_schema_patch.md +++ b/docs/cli/defradb_client_schema_patch.md @@ -35,30 +35,21 @@ defradb client schema patch [schema] [migration] [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_schema_set-active.md b/docs/cli/defradb_client_schema_set-active.md index a2e7dd82ad..5e9daf911b 100644 --- a/docs/cli/defradb_client_schema_set-active.md +++ b/docs/cli/defradb_client_schema_set-active.md @@ -20,30 +20,21 @@ defradb client schema set-active [versionID] [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_tx.md b/docs/cli/defradb_client_tx.md index f7bc69f3ea..e9ffb95d3b 100644 --- a/docs/cli/defradb_client_tx.md +++ b/docs/cli/defradb_client_tx.md @@ -15,30 +15,21 @@ Create, commit, and discard DefraDB transactions ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_tx_commit.md b/docs/cli/defradb_client_tx_commit.md index 3e854427dd..d641d03691 100644 --- a/docs/cli/defradb_client_tx_commit.md +++ b/docs/cli/defradb_client_tx_commit.md @@ -19,30 +19,21 @@ defradb client tx commit [id] [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_tx_create.md b/docs/cli/defradb_client_tx_create.md index a610a249a9..633c82b6e4 100644 --- a/docs/cli/defradb_client_tx_create.md +++ b/docs/cli/defradb_client_tx_create.md @@ -21,30 +21,21 @@ defradb client tx create [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_tx_discard.md b/docs/cli/defradb_client_tx_discard.md index 0f856f3855..a61bbc5fa1 100644 --- a/docs/cli/defradb_client_tx_discard.md +++ b/docs/cli/defradb_client_tx_discard.md @@ -19,30 +19,21 @@ defradb client tx discard [id] [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_view.md b/docs/cli/defradb_client_view.md index c81fec50e0..15dd2034cb 100644 --- a/docs/cli/defradb_client_view.md +++ b/docs/cli/defradb_client_view.md @@ -15,30 +15,21 @@ Manage (add) views withing a running DefraDB instance ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_client_view_add.md b/docs/cli/defradb_client_view_add.md index 58c0ea4596..ee5e94f6df 100644 --- a/docs/cli/defradb_client_view_add.md +++ b/docs/cli/defradb_client_view_add.md @@ -25,30 +25,21 @@ defradb client view add [query] [sdl] [transform] [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_keyring.md b/docs/cli/defradb_keyring.md index ca26cf1e8b..e650592529 100644 --- a/docs/cli/defradb_keyring.md +++ b/docs/cli/defradb_keyring.md @@ -16,28 +16,19 @@ Generate, import, and export private keys. ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_keyring_export.md b/docs/cli/defradb_keyring_export.md index 8292ae9c1d..78bee67e7d 100644 --- a/docs/cli/defradb_keyring_export.md +++ b/docs/cli/defradb_keyring_export.md @@ -23,28 +23,19 @@ defradb keyring export [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_keyring_generate.md b/docs/cli/defradb_keyring_generate.md index e7d9a34f47..ab8ce5ad88 100644 --- a/docs/cli/defradb_keyring_generate.md +++ b/docs/cli/defradb_keyring_generate.md @@ -32,28 +32,19 @@ defradb keyring generate [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_keyring_import.md b/docs/cli/defradb_keyring_import.md index 0b93048185..3206e33440 100644 --- a/docs/cli/defradb_keyring_import.md +++ b/docs/cli/defradb_keyring_import.md @@ -23,28 +23,19 @@ defradb keyring import [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_server-dump.md b/docs/cli/defradb_server-dump.md index 5973f12642..403d72c972 100644 --- a/docs/cli/defradb_server-dump.md +++ b/docs/cli/defradb_server-dump.md @@ -15,28 +15,19 @@ defradb server-dump [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/cli/defradb_start.md b/docs/cli/defradb_start.md index a9f9010a17..0f2bed427f 100644 --- a/docs/cli/defradb_start.md +++ b/docs/cli/defradb_start.md @@ -12,37 +12,37 @@ defradb start [flags] ### Options -``` - -h, --help help for start -``` - -### Options inherited from parent commands - ``` --allowed-origins stringArray List of origins to allow for CORS requests - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs + -h, --help help for start --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys --no-p2p Disable the peer-to-peer network synchronization system --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) --peers stringArray List of peers to connect to --privkeypath string Path to the private key for tls --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` +### Options inherited from parent commands + +``` + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") +``` + ### SEE ALSO * [defradb](defradb.md) - DefraDB Edge Database diff --git a/docs/cli/defradb_version.md b/docs/cli/defradb_version.md index 123441769b..c82cd43df8 100644 --- a/docs/cli/defradb_version.md +++ b/docs/cli/defradb_version.md @@ -17,28 +17,19 @@ defradb version [flags] ### Options inherited from parent commands ``` - --allowed-origins stringArray List of origins to allow for CORS requests - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-no-color Disable colored log output - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-keyring Disable the keyring and generate ephemeral keys - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` ### SEE ALSO diff --git a/docs/config.md b/docs/config.md index 316b6d9aa0..0981ad9a2e 100644 --- a/docs/config.md +++ b/docs/config.md @@ -89,7 +89,7 @@ Include source location in logs. Defaults to `false`. Logger config overrides. Format `,=,...;,...`. -## `log.nocolor` +## `log.colordisabled` Disable colored log output. Defaults to `false`. From 683f78c49a4023b349928985705f2ec806a48472 Mon Sep 17 00:00:00 2001 From: Fred Carle Date: Wed, 15 May 2024 23:29:50 -0400 Subject: [PATCH 09/78] refactor: Change from protobuf to cbor for IPLD (#2604) ## Relevant issue(s) Resolves #2603 ## Description This PR changes the encoding of the blocks in the blockstore to use cbor instead of protobof. As part of this, we move away from encoding/decoding individual deltas (no more Marshalling and Unmarshalling deltas) and instead, encode/decode at the block level. To do this, we use the new `github.com/ipld/go-ipld-prime` package that uses IPLD schemas to map the encoded blocks to the appropriate fields and CRDT types. The new `core/block` package handles that part of the change. The `net` package is also affected by that change with the `DAGSyncer` being replaced by the new `ipld/linking.LinkSystem`. I expect further usage of the `LinkSystem` in a couple subsequent PRs. --- client/db.go | 9 +- client/mocks/db.go | 62 ++++- datastore/blockstore.go | 20 +- datastore/dag.go | 68 ----- datastore/mocks/dag_store.go | 45 ++++ datastore/multi.go | 2 +- datastore/store.go | 10 +- .../i2603-ipld-protobuf-to-cbor.md | 3 + events/db_update.go | 10 +- go.mod | 9 +- go.sum | 2 + http/client.go | 8 +- internal/core/block/block.go | 245 ++++++++++++++++++ internal/core/block/block_test.go | 178 +++++++++++++ internal/core/block/errors.go | 68 +++++ internal/core/clock.go | 27 -- internal/core/crdt/composite.go | 90 ++----- internal/core/crdt/counter.go | 76 +++--- internal/core/crdt/ipld_union.go | 134 ++++++++++ internal/core/crdt/lwwreg.go | 57 ++-- internal/core/crdt/lwwreg_test.go | 113 +------- internal/core/delta.go | 18 -- internal/core/node.go | 34 --- internal/core/replicated.go | 6 +- internal/db/collection.go | 29 +-- internal/db/collection_delete.go | 17 +- internal/db/db.go | 9 +- internal/db/fetcher/mocks/fetcher.go | 2 +- internal/db/fetcher/versioned.go | 55 ++-- internal/db/indexed_docs_test.go | 12 + internal/merkle/clock/clock.go | 106 ++++---- internal/merkle/clock/clock_test.go | 31 ++- internal/merkle/clock/errors.go | 4 +- internal/merkle/clock/ipld.go | 150 ----------- internal/merkle/clock/ipld_test.go | 11 - internal/merkle/crdt/composite.go | 35 +-- internal/merkle/crdt/counter.go | 11 +- internal/merkle/crdt/lwwreg.go | 11 +- internal/merkle/crdt/merklecrdt.go | 26 +- internal/merkle/crdt/merklecrdt_test.go | 6 +- internal/planner/commit.go | 86 +++--- net/client.go | 2 +- net/client_test.go | 9 +- net/dag.go | 55 ++-- net/dag_test.go | 62 +---- net/peer.go | 67 ++--- net/peer_test.go | 136 ++-------- net/process.go | 93 +++---- net/server.go | 15 +- net/server_test.go | 40 ++- tests/clients/cli/wrapper.go | 8 +- tests/clients/http/wrapper.go | 8 +- .../events/simple/with_update_test.go | 4 +- tests/integration/events/utils.go | 4 +- .../mutation/create/with_version_test.go | 2 +- .../integration/query/commits/simple_test.go | 48 ++-- .../query/commits/with_cid_test.go | 8 +- .../query/commits/with_depth_test.go | 34 +-- .../query/commits/with_doc_id_cid_test.go | 4 +- .../query/commits/with_doc_id_count_test.go | 6 +- .../query/commits/with_doc_id_field_test.go | 4 +- .../commits/with_doc_id_limit_offset_test.go | 4 +- .../query/commits/with_doc_id_limit_test.go | 4 +- .../with_doc_id_order_limit_offset_test.go | 4 +- .../query/commits/with_doc_id_order_test.go | 74 +++--- .../query/commits/with_doc_id_test.go | 54 ++-- .../commits/with_doc_id_typename_test.go | 6 +- .../query/commits/with_field_test.go | 6 +- .../query/commits/with_group_test.go | 16 +- .../latest_commits/with_doc_id_field_test.go | 12 +- .../query/latest_commits/with_doc_id_test.go | 12 +- .../query/one_to_many/with_cid_doc_id_test.go | 8 +- .../query/simple/with_cid_doc_id_test.go | 18 +- .../query/simple/with_version_test.go | 54 ++-- 74 files changed, 1436 insertions(+), 1340 deletions(-) delete mode 100644 datastore/dag.go create mode 100644 docs/data_format_changes/i2603-ipld-protobuf-to-cbor.md create mode 100644 internal/core/block/block.go create mode 100644 internal/core/block/block_test.go create mode 100644 internal/core/block/errors.go delete mode 100644 internal/core/clock.go create mode 100644 internal/core/crdt/ipld_union.go delete mode 100644 internal/core/node.go delete mode 100644 internal/merkle/clock/ipld.go delete mode 100644 internal/merkle/clock/ipld_test.go diff --git a/client/db.go b/client/db.go index c5cb95eb4b..6c530dd419 100644 --- a/client/db.go +++ b/client/db.go @@ -13,7 +13,7 @@ package client import ( "context" - blockstore "github.com/ipfs/boxo/blockstore" + ds "github.com/ipfs/go-datastore" "github.com/lens-vm/lens/host-go/config/model" "github.com/sourcenetwork/immutable" @@ -48,13 +48,18 @@ type DB interface { // Blockstore returns the blockstore, within which all blocks (commits) managed by DefraDB are held. // // It sits within the rootstore returned by [Root]. - Blockstore() blockstore.Blockstore + Blockstore() datastore.DAGStore // Peerstore returns the peerstore where known host information is stored. // // It sits within the rootstore returned by [Root]. Peerstore() datastore.DSBatching + // Headstore returns the headstore where the current heads of the database are stored. + // + // It is read-only and sits within the rootstore returned by [Root]. + Headstore() ds.Read + // Close closes the database instance and releases any resources held. // // The behaviour of other functions in this package after this function has been called is undefined diff --git a/client/mocks/db.go b/client/mocks/db.go index 20b5988fe7..4f0320f0c4 100644 --- a/client/mocks/db.go +++ b/client/mocks/db.go @@ -3,15 +3,16 @@ package mocks import ( - blockstore "github.com/ipfs/boxo/blockstore" - client "github.com/sourcenetwork/defradb/client" - context "context" + client "github.com/sourcenetwork/defradb/client" + datastore "github.com/sourcenetwork/defradb/datastore" events "github.com/sourcenetwork/defradb/events" + go_datastore "github.com/ipfs/go-datastore" + immutable "github.com/sourcenetwork/immutable" mock "github.com/stretchr/testify/mock" @@ -284,15 +285,15 @@ func (_c *DB_BasicImport_Call) RunAndReturn(run func(context.Context, string) er } // Blockstore provides a mock function with given fields: -func (_m *DB) Blockstore() blockstore.Blockstore { +func (_m *DB) Blockstore() datastore.DAGStore { ret := _m.Called() - var r0 blockstore.Blockstore - if rf, ok := ret.Get(0).(func() blockstore.Blockstore); ok { + var r0 datastore.DAGStore + if rf, ok := ret.Get(0).(func() datastore.DAGStore); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(blockstore.Blockstore) + r0 = ret.Get(0).(datastore.DAGStore) } } @@ -316,12 +317,12 @@ func (_c *DB_Blockstore_Call) Run(run func()) *DB_Blockstore_Call { return _c } -func (_c *DB_Blockstore_Call) Return(_a0 blockstore.Blockstore) *DB_Blockstore_Call { +func (_c *DB_Blockstore_Call) Return(_a0 datastore.DAGStore) *DB_Blockstore_Call { _c.Call.Return(_a0) return _c } -func (_c *DB_Blockstore_Call) RunAndReturn(run func() blockstore.Blockstore) *DB_Blockstore_Call { +func (_c *DB_Blockstore_Call) RunAndReturn(run func() datastore.DAGStore) *DB_Blockstore_Call { _c.Call.Return(run) return _c } @@ -716,6 +717,49 @@ func (_c *DB_GetSchemas_Call) RunAndReturn(run func(context.Context, client.Sche return _c } +// Headstore provides a mock function with given fields: +func (_m *DB) Headstore() go_datastore.Read { + ret := _m.Called() + + var r0 go_datastore.Read + if rf, ok := ret.Get(0).(func() go_datastore.Read); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(go_datastore.Read) + } + } + + return r0 +} + +// DB_Headstore_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Headstore' +type DB_Headstore_Call struct { + *mock.Call +} + +// Headstore is a helper method to define mock.On call +func (_e *DB_Expecter) Headstore() *DB_Headstore_Call { + return &DB_Headstore_Call{Call: _e.mock.On("Headstore")} +} + +func (_c *DB_Headstore_Call) Run(run func()) *DB_Headstore_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *DB_Headstore_Call) Return(_a0 go_datastore.Read) *DB_Headstore_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_Headstore_Call) RunAndReturn(run func() go_datastore.Read) *DB_Headstore_Call { + _c.Call.Return(run) + return _c +} + // LensRegistry provides a mock function with given fields: func (_m *DB) LensRegistry() client.LensRegistry { ret := _m.Called() diff --git a/datastore/blockstore.go b/datastore/blockstore.go index be25894a3d..f9f92198cd 100644 --- a/datastore/blockstore.go +++ b/datastore/blockstore.go @@ -13,13 +13,14 @@ package datastore import ( "context" - blockstore "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/blockstore" dshelp "github.com/ipfs/boxo/datastore/dshelp" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" dsq "github.com/ipfs/go-datastore/query" ipld "github.com/ipfs/go-ipld-format" + "github.com/ipld/go-ipld-prime/storage/bsadapter" "github.com/sourcenetwork/defradb/errors" ) @@ -44,18 +45,33 @@ import ( // NewBlockstore returns a default Blockstore implementation // using the provided datastore.Batching backend. -func NewBlockstore(store DSReaderWriter) blockstore.Blockstore { +func newBlockstore(store DSReaderWriter) *bstore { return &bstore{ store: store, } } +func newIPLDStore(store blockstore.Blockstore) *bsadapter.Adapter { + return &bsadapter.Adapter{Wrapped: store} +} + type bstore struct { store DSReaderWriter rehash bool } +var _ blockstore.Blockstore = (*bstore)(nil) +var _ DAGStore = (*bstore)(nil) + +// AsIPLDStorage returns an IPLDStorage instance. +// +// It wraps the blockstore in an IPLD Blockstore adapter for use with +// the IPLD LinkSystem. +func (bs *bstore) AsIPLDStorage() IPLDStorage { + return newIPLDStore(bs) +} + // HashOnRead enables or disables rehashing of blocks on read. func (bs *bstore) HashOnRead(enabled bool) { bs.rehash = enabled diff --git a/datastore/dag.go b/datastore/dag.go deleted file mode 100644 index cd2b48eb54..0000000000 --- a/datastore/dag.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package datastore - -import ( - blockstore "github.com/ipfs/boxo/blockstore" -) - -// DAGStore is the interface to the underlying BlockStore and BlockService. -type dagStore struct { - blockstore.Blockstore // become a Blockstore - store DSReaderWriter - // bstore blockstore.Blockstore - // bserv blockservice.BlockService -} - -// NewDAGStore creates a new DAGStore with the supplied Batching datastore. -func NewDAGStore(store DSReaderWriter) DAGStore { - dstore := &dagStore{ - Blockstore: NewBlockstore(store), - store: store, - } - - return dstore -} - -// func (d *dagStore) setupBlockstore() error { -// bs := blockstore.NewBlockstore(d.store) -// // bs = blockstore.NewIdStore(bs) -// // cachedbs, err := blockstore.CachedBlockstore(d.ctx, bs, blockstore.DefaultCacheOpts()) -// // if err != nil { -// // return err -// // } -// d.bstore = bs -// return nil -// } - -// func (d *dagStore) setupBlockService() error { -// // if d.cfg.Offline { -// // d.bserv = blockservice.New(d.bstore, offline.Exchange(p.bstore)) -// // return nil -// // } - -// // bswapnet := network.NewFromIpfsHost(p.host, p.dht) -// // bswap := bitswap.New(p.ctx, bswapnet, p.bstore) -// // p.bserv = blockservice.New(p.bstore, bswap) - -// // @todo Investigate if we need an Exchanger or if it can stay as nil -// d.bserv = blockservice.New(d.bstore, offline.Exchange(d.bstore)) -// return nil -// } - -// func (d *dagStore) setupDAGService() error { -// d.DAGService = dag.NewDAGService(d.bserv) -// return nil -// } - -// func (d *dagStore) Blockstore() blockstore.Blockstore { -// return d.bstore -// } diff --git a/datastore/mocks/dag_store.go b/datastore/mocks/dag_store.go index 8408013ccc..a9ba9e2af8 100644 --- a/datastore/mocks/dag_store.go +++ b/datastore/mocks/dag_store.go @@ -8,6 +8,8 @@ import ( context "context" + datastore "github.com/sourcenetwork/defradb/datastore" + mock "github.com/stretchr/testify/mock" ) @@ -78,6 +80,49 @@ func (_c *DAGStore_AllKeysChan_Call) RunAndReturn(run func(context.Context) (<-c return _c } +// AsIPLDStorage provides a mock function with given fields: +func (_m *DAGStore) AsIPLDStorage() datastore.IPLDStorage { + ret := _m.Called() + + var r0 datastore.IPLDStorage + if rf, ok := ret.Get(0).(func() datastore.IPLDStorage); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(datastore.IPLDStorage) + } + } + + return r0 +} + +// DAGStore_AsIPLDStorage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AsIPLDStorage' +type DAGStore_AsIPLDStorage_Call struct { + *mock.Call +} + +// AsIPLDStorage is a helper method to define mock.On call +func (_e *DAGStore_Expecter) AsIPLDStorage() *DAGStore_AsIPLDStorage_Call { + return &DAGStore_AsIPLDStorage_Call{Call: _e.mock.On("AsIPLDStorage")} +} + +func (_c *DAGStore_AsIPLDStorage_Call) Run(run func()) *DAGStore_AsIPLDStorage_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *DAGStore_AsIPLDStorage_Call) Return(_a0 datastore.IPLDStorage) *DAGStore_AsIPLDStorage_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DAGStore_AsIPLDStorage_Call) RunAndReturn(run func() datastore.IPLDStorage) *DAGStore_AsIPLDStorage_Call { + _c.Call.Return(run) + return _c +} + // DeleteBlock provides a mock function with given fields: _a0, _a1 func (_m *DAGStore) DeleteBlock(_a0 context.Context, _a1 cid.Cid) error { ret := _m.Called(_a0, _a1) diff --git a/datastore/multi.go b/datastore/multi.go index 47015e4581..bbd333ba19 100644 --- a/datastore/multi.go +++ b/datastore/multi.go @@ -46,7 +46,7 @@ func MultiStoreFrom(rootstore ds.Datastore) MultiStore { head: prefix(rootRW, headStoreKey), peer: namespace.Wrap(rootstore, peerStoreKey), system: prefix(rootRW, systemStoreKey), - dag: NewDAGStore(prefix(rootRW, blockStoreKey)), + dag: newBlockstore(prefix(rootRW, blockStoreKey)), } return ms diff --git a/datastore/store.go b/datastore/store.go index 7f2764a65d..7954eb5014 100644 --- a/datastore/store.go +++ b/datastore/store.go @@ -11,8 +11,9 @@ package datastore import ( - blockstore "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/blockstore" ds "github.com/ipfs/go-datastore" + "github.com/ipld/go-ipld-prime/storage" "github.com/sourcenetwork/corelog" @@ -72,6 +73,13 @@ type DSReaderWriter interface { // DAGStore proxies the ipld.DAGService under the /core namespace for future-proofing type DAGStore interface { blockstore.Blockstore + AsIPLDStorage() IPLDStorage +} + +// IPLDStorage provides the methods needed for an IPLD LinkSystem. +type IPLDStorage interface { + storage.ReadableStorage + storage.WritableStorage } // DSBatching wraps the Batching interface from go-datastore diff --git a/docs/data_format_changes/i2603-ipld-protobuf-to-cbor.md b/docs/data_format_changes/i2603-ipld-protobuf-to-cbor.md new file mode 100644 index 0000000000..574762a1a7 --- /dev/null +++ b/docs/data_format_changes/i2603-ipld-protobuf-to-cbor.md @@ -0,0 +1,3 @@ +# Change encoding from protobuf to cbor and use the new IPLD schema + +The DAG blocks are now encoded using CBOR instead of protobuf and we use the new `github.com/ipld/go-ipld-prime` package to handle block encoding and decoding. It makes use of the new IPLD schema to define the block structure. \ No newline at end of file diff --git a/events/db_update.go b/events/db_update.go index 1d802d3e3a..2b93752573 100644 --- a/events/db_update.go +++ b/events/db_update.go @@ -12,7 +12,7 @@ package events import ( "github.com/ipfs/go-cid" - ipld "github.com/ipfs/go-ipld-format" + "github.com/sourcenetwork/immutable" ) @@ -36,10 +36,10 @@ type Update struct { // SchemaRoot is the root identifier of the schema that defined the shape of the document that was updated. SchemaRoot string - // Block is the contents of this composite commit, it contains the Cids of the field level commits that + // Block is the encoded contents of this composite commit, it contains the Cids of the field level commits that // also formed this update. - Block ipld.Node + Block []byte - // Priority is used to determine the order in which concurrent updates are applied. - Priority uint64 + // IsCreate is true if this update is the creation of a new document. + IsCreate bool } diff --git a/go.mod b/go.mod index 9b725e6d61..caab40a658 100644 --- a/go.mod +++ b/go.mod @@ -20,6 +20,8 @@ require ( github.com/ipfs/go-datastore v0.6.0 github.com/ipfs/go-ipld-format v0.6.0 github.com/ipfs/go-log/v2 v2.5.1 + github.com/ipld/go-ipld-prime v0.21.0 + github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20240322071758-198d7dba8fb8 github.com/jbenet/goprocess v0.1.4 github.com/lens-vm/lens/host-go v0.0.0-20231127204031-8d858ed2926c github.com/lestrrat-go/jwx/v2 v2.0.21 @@ -30,6 +32,7 @@ require ( github.com/libp2p/go-libp2p-record v0.2.0 github.com/multiformats/go-multiaddr v0.12.3 github.com/multiformats/go-multibase v0.2.0 + github.com/multiformats/go-multicodec v0.9.0 github.com/multiformats/go-multihash v0.2.3 github.com/sourcenetwork/badger/v4 v4.2.1-0.20231113215945-a63444ca5276 github.com/sourcenetwork/corelog v0.0.7 @@ -42,7 +45,6 @@ require ( github.com/spf13/viper v1.18.2 github.com/stretchr/testify v1.9.0 github.com/tidwall/btree v1.7.0 - github.com/ugorji/go/codec v1.2.12 github.com/valyala/fastjson v1.6.4 github.com/vito/go-sse v1.0.0 github.com/zalando/go-keyring v0.2.4 @@ -183,13 +185,10 @@ require ( github.com/ipfs/go-ipfs-delay v0.0.1 // indirect github.com/ipfs/go-ipfs-pq v0.0.3 // indirect github.com/ipfs/go-ipfs-util v0.0.3 // indirect - github.com/ipfs/go-ipld-legacy v0.2.1 // indirect github.com/ipfs/go-log v1.0.5 // indirect github.com/ipfs/go-metrics-interface v0.0.1 // indirect github.com/ipfs/go-peertaskqueue v0.8.1 // indirect github.com/ipfs/kubo v0.25.0 // indirect - github.com/ipld/go-codec-dagpb v1.6.0 // indirect - github.com/ipld/go-ipld-prime v0.21.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jmhodges/levigo v1.0.0 // indirect @@ -238,7 +237,6 @@ require ( github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect - github.com/multiformats/go-multicodec v0.9.0 // indirect github.com/multiformats/go-multistream v0.5.0 // indirect github.com/multiformats/go-varint v0.0.7 // indirect github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a // indirect @@ -285,6 +283,7 @@ require ( github.com/tendermint/tm-db v0.6.7 // indirect github.com/teserakt-io/golang-ed25519 v0.0.0-20210104091850-3888c087a4c8 // indirect github.com/textileio/go-log/v2 v2.1.3-gke-2 // indirect + github.com/ugorji/go/codec v1.2.12 // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect diff --git a/go.sum b/go.sum index cbf4919926..190875055a 100644 --- a/go.sum +++ b/go.sum @@ -634,6 +634,8 @@ github.com/ipld/go-codec-dagpb v1.6.0 h1:9nYazfyu9B1p3NAgfVdpRco3Fs2nFC72DqVsMj6 github.com/ipld/go-codec-dagpb v1.6.0/go.mod h1:ANzFhfP2uMJxRBr8CE+WQWs5UsNa0pYtmKZ+agnUw9s= github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20240322071758-198d7dba8fb8 h1:WQVfplCGOHtFNyZH7eOaEqGsbbje3NP8EFeGggUvEQs= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20240322071758-198d7dba8fb8/go.mod h1:PVDd/V/Zz9IW+Diz9LEhD+ZYS9pKzawmtVQhVd0hcgQ= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= diff --git a/http/client.go b/http/client.go index 4eaadfd2d0..7616653f71 100644 --- a/http/client.go +++ b/http/client.go @@ -20,7 +20,7 @@ import ( "strconv" "strings" - blockstore "github.com/ipfs/boxo/blockstore" + ds "github.com/ipfs/go-datastore" "github.com/lens-vm/lens/host-go/config/model" sse "github.com/vito/go-sse/sse" @@ -437,7 +437,7 @@ func (c *Client) Root() datastore.RootStore { panic("client side database") } -func (c *Client) Blockstore() blockstore.Blockstore { +func (c *Client) Blockstore() datastore.DAGStore { panic("client side database") } @@ -445,6 +445,10 @@ func (c *Client) Peerstore() datastore.DSBatching { panic("client side database") } +func (c *Client) Headstore() ds.Read { + panic("client side database") +} + func (c *Client) Events() events.Events { panic("client side database") } diff --git a/internal/core/block/block.go b/internal/core/block/block.go new file mode 100644 index 0000000000..6be17908be --- /dev/null +++ b/internal/core/block/block.go @@ -0,0 +1,245 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package coreblock + +import ( + "bytes" + "sort" + "strings" + + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/ipld/go-ipld-prime/node/bindnode" + "github.com/ipld/go-ipld-prime/schema" + "github.com/multiformats/go-multicodec" + + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/core/crdt" +) + +// Schema is the IPLD schema type that represents a `Block`. +var ( + Schema schema.Type + SchemaPrototype schema.TypedPrototype +) + +func init() { + Schema, SchemaPrototype = mustSetSchema( + &Block{}, + &DAGLink{}, + &crdt.CRDT{}, + &crdt.LWWRegDelta{}, + &crdt.CompositeDAGDelta{}, + &crdt.CounterDelta[int64]{}, // Only need to call one of the CounterDelta types. + ) +} + +type schemaDefinition interface { + // IPLDSchemaBytes returns the IPLD schema representation for the type. + IPLDSchemaBytes() []byte +} + +func mustSetSchema(schemas ...schemaDefinition) (schema.Type, schema.TypedPrototype) { + schemaBytes := make([][]byte, 0, len(schemas)) + for _, s := range schemas { + schemaBytes = append(schemaBytes, s.IPLDSchemaBytes()) + } + + ts, err := ipld.LoadSchemaBytes(bytes.Join(schemaBytes, nil)) + if err != nil { + panic(err) + } + blockSchemaType := ts.TypeByName("Block") + + // Calling bindnode.Prototype here ensure that [Block] and all the types it contains + // are compatible with the IPLD schema defined by blockSchemaType. + // If [Block] and `blockSchematype` do not match, this will panic. + proto := bindnode.Prototype(&Block{}, blockSchemaType) + + return blockSchemaType, proto +} + +// DAGLink represents a link to another object in a DAG. +type DAGLink struct { + // Name is the name of the link. + // + // This will be either the field name of the CRDT delta or "_head" for the head link. + Name string + // Link is the CID link to the object. + cidlink.Link +} + +// IPLDSchemaBytes returns the IPLD schema representation for the DAGLink. +// +// This needs to match the [DAGLink] struct or [mustSetSchema] will panic on init. +func (l DAGLink) IPLDSchemaBytes() []byte { + return []byte(` + type DAGLink struct { + name String + link Link + }`) +} + +func NewDAGLink(name string, link cidlink.Link) DAGLink { + return DAGLink{ + Name: name, + Link: link, + } +} + +// Block is a block that contains a CRDT delta and links to other blocks. +type Block struct { + // Delta is the CRDT delta that is stored in the block. + Delta crdt.CRDT + // Links are the links to other blocks in the DAG. + Links []DAGLink +} + +// IPLDSchemaBytes returns the IPLD schema representation for the block. +// +// This needs to match the [Block] struct or [mustSetSchema] will panic on init. +func (b Block) IPLDSchemaBytes() []byte { + return []byte(` + type Block struct { + delta CRDT + links [ DAGLink ] + }`) +} + +// New creates a new block with the given delta and links. +func New(delta core.Delta, links []DAGLink, heads ...cid.Cid) *Block { + blockLinks := make([]DAGLink, 0, len(links)+len(heads)) + + // Sort the heads lexicographically by CID. + // We need to do this to ensure that the block is deterministic. + sort.Slice(heads, func(i, j int) bool { + return strings.Compare(heads[i].String(), heads[j].String()) < 0 + }) + for _, head := range heads { + blockLinks = append( + blockLinks, + DAGLink{ + Name: core.HEAD, + Link: cidlink.Link{Cid: head}, + }, + ) + } + + // Sort the links lexicographically by CID. + // We need to do this to ensure that the block is deterministic. + sort.Slice(links, func(i, j int) bool { + return strings.Compare(links[i].Cid.String(), links[j].Cid.String()) < 0 + }) + + blockLinks = append(blockLinks, links...) + + var crdtDelta crdt.CRDT + switch delta := delta.(type) { + case *crdt.LWWRegDelta: + crdtDelta = crdt.CRDT{LWWRegDelta: delta} + case *crdt.CompositeDAGDelta: + crdtDelta = crdt.CRDT{CompositeDAGDelta: delta} + case *crdt.CounterDelta[int64]: + crdtDelta = crdt.CRDT{CounterDeltaInt: delta} + case *crdt.CounterDelta[float64]: + crdtDelta = crdt.CRDT{CounterDeltaFloat: delta} + } + + return &Block{ + Links: blockLinks, + Delta: crdtDelta, + } +} + +// GetFromBytes returns a block from encoded bytes. +func GetFromBytes(b []byte) (*Block, error) { + block := &Block{} + err := block.Unmarshal(b) + if err != nil { + return nil, err + } + return block, nil +} + +// GetFromNode returns a block from a node. +func GetFromNode(node ipld.Node) (*Block, error) { + block, ok := bindnode.Unwrap(node).(*Block) + if !ok { + return nil, NewErrNodeToBlock(node) + } + return block, nil +} + +// Marshal encodes the delta using CBOR encoding. +func (block *Block) Marshal() (data []byte, err error) { + b, err := ipld.Marshal(dagcbor.Encode, block, Schema) + if err != nil { + return nil, NewErrEncodingBlock(err) + } + return b, nil +} + +// Unmarshal decodes the delta from CBOR encoding. +func (block *Block) Unmarshal(b []byte) error { + _, err := ipld.Unmarshal( + b, + dagcbor.Decode, + block, + Schema, + ) + if err != nil { + return NewErrUnmarshallingBlock(err) + } + return nil +} + +// GenerateNode generates an IPLD node from the block. +func (block *Block) GenerateNode() (node ipld.Node) { + return bindnode.Wrap(block, Schema) +} + +// GetLinkByName returns the link by name. It will return false if the link does not exist. +func (block *Block) GetLinkByName(name string) (cidlink.Link, bool) { + for _, link := range block.Links { + if link.Name == name { + return link.Link, true + } + } + return cidlink.Link{}, false +} + +// GenerateLink generates a cid link for the block. +func (block *Block) GenerateLink() (cidlink.Link, error) { + node := bindnode.Wrap(block, Schema) + return GetLinkFromNode(node) +} + +// GetLinkFromNode returns the cid link from the node. +func GetLinkFromNode(node ipld.Node) (cidlink.Link, error) { + lsys := cidlink.DefaultLinkSystem() + link, err := lsys.ComputeLink(GetLinkPrototype(), node) + if err != nil { + return cidlink.Link{}, NewErrGeneratingLink(err) + } + return link.(cidlink.Link), nil +} + +// GetLinkPrototype returns the link prototype for the block. +func GetLinkPrototype() cidlink.LinkPrototype { + return cidlink.LinkPrototype{Prefix: cid.Prefix{ + Version: uint64(multicodec.Cidv1), + Codec: uint64(multicodec.DagCbor), + MhType: uint64(multicodec.Sha2_256), + MhLength: 32, + }} +} diff --git a/internal/core/block/block_test.go b/internal/core/block/block_test.go new file mode 100644 index 0000000000..75a6ce1780 --- /dev/null +++ b/internal/core/block/block_test.go @@ -0,0 +1,178 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package coreblock + +import ( + "testing" + + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/linking" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/ipld/go-ipld-prime/node/basicnode" + "github.com/ipld/go-ipld-prime/storage/memstore" + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/core/crdt" +) + +func generateBlocks(lsys *linking.LinkSystem) (cidlink.Link, error) { + // Generate new Block and save to lsys + fieldBlock := Block{ + Delta: crdt.CRDT{ + LWWRegDelta: &crdt.LWWRegDelta{ + DocID: []byte("docID"), + FieldName: "name", + Priority: 1, + SchemaVersionID: "schemaVersionID", + Data: []byte("John"), + }, + }, + } + fieldBlockLink, err := lsys.Store(ipld.LinkContext{}, GetLinkPrototype(), fieldBlock.GenerateNode()) + if err != nil { + return cidlink.Link{}, err + } + + compositeBlock := Block{ + Delta: crdt.CRDT{ + CompositeDAGDelta: &crdt.CompositeDAGDelta{ + DocID: []byte("docID"), + FieldName: "C", + Priority: 1, + SchemaVersionID: "schemaVersionID", + Status: 1, + }, + }, + Links: []DAGLink{ + { + Name: "name", + Link: fieldBlockLink.(cidlink.Link), + }, + }, + } + compositeBlockLink, err := lsys.Store(ipld.LinkContext{}, GetLinkPrototype(), compositeBlock.GenerateNode()) + if err != nil { + return cidlink.Link{}, err + } + + fieldUpdateBlock := Block{ + Delta: crdt.CRDT{ + LWWRegDelta: &crdt.LWWRegDelta{ + DocID: []byte("docID"), + FieldName: "name", + Priority: 2, + SchemaVersionID: "schemaVersionID", + Data: []byte("Johny"), + }, + }, + Links: []DAGLink{ + { + Name: core.HEAD, + Link: fieldBlockLink.(cidlink.Link), + }, + }, + } + fieldUpdateBlockLink, err := lsys.Store(ipld.LinkContext{}, GetLinkPrototype(), fieldUpdateBlock.GenerateNode()) + if err != nil { + return cidlink.Link{}, err + } + + compositeUpdateBlock := Block{ + Delta: crdt.CRDT{ + CompositeDAGDelta: &crdt.CompositeDAGDelta{ + DocID: []byte("docID"), + FieldName: "C", + Priority: 2, + SchemaVersionID: "schemaVersionID", + Status: 1, + }, + }, + Links: []DAGLink{ + { + Name: core.HEAD, + Link: compositeBlockLink.(cidlink.Link), + }, + { + Name: "name", + Link: fieldUpdateBlockLink.(cidlink.Link), + }, + }, + } + compositeUpdateBlockLink, err := lsys.Store(ipld.LinkContext{}, GetLinkPrototype(), compositeUpdateBlock.GenerateNode()) + if err != nil { + return cidlink.Link{}, err + } + + return compositeUpdateBlockLink.(cidlink.Link), nil +} + +func TestBlock(t *testing.T) { + lsys := cidlink.DefaultLinkSystem() + store := memstore.Store{} + lsys.SetReadStorage(&store) + lsys.SetWriteStorage(&store) + + rootLink, err := generateBlocks(&lsys) + require.NoError(t, err) + + proto := SchemaPrototype.Representation() + nd, err := lsys.Load(ipld.LinkContext{}, rootLink, proto) + require.NoError(t, err) + + block, err := GetFromNode(nd) + require.NoError(t, err) + + b, err := block.Marshal() + require.NoError(t, err) + + newBlock, err := GetFromBytes(b) + require.NoError(t, err) + + require.Equal(t, block, newBlock) + + newNode := block.GenerateNode() + require.Equal(t, nd, newNode) + + link, err := block.GenerateLink() + require.NoError(t, err) + require.Equal(t, rootLink, link) + + newLink, err := GetLinkFromNode(newNode) + require.NoError(t, err) + require.Equal(t, rootLink, newLink) +} + +func TestGetFromNode_WithInvalidType_ShouldFail(t *testing.T) { + _, err := GetFromNode(basicnode.NewString("test")) + require.ErrorIs(t, err, ErrNodeToBlock) +} + +func TestBlockDeltaPriority(t *testing.T) { + lsys := cidlink.DefaultLinkSystem() + store := memstore.Store{} + lsys.SetReadStorage(&store) + lsys.SetWriteStorage(&store) + + rootLink, err := generateBlocks(&lsys) + require.NoError(t, err) + + proto := SchemaPrototype.Representation() + nd, err := lsys.Load(ipld.LinkContext{}, rootLink, proto) + require.NoError(t, err) + + block, err := GetFromNode(nd) + require.NoError(t, err) + + // The generateBlocks function creates a block with one update + // which results in a priority of 2. + require.Equal(t, uint64(2), block.Delta.GetPriority()) +} diff --git a/internal/core/block/errors.go b/internal/core/block/errors.go new file mode 100644 index 0000000000..9b6b0e8a95 --- /dev/null +++ b/internal/core/block/errors.go @@ -0,0 +1,68 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package coreblock + +import ( + "fmt" + + "github.com/sourcenetwork/defradb/errors" +) + +const ( + errNodeToBlock string = "failed to convert node to block" + errEncodingBlock string = "failed to encode block" + errUnmarshallingBlock string = "failed to unmarshal block" + errGeneratingLink string = "failed to generate link" +) + +// Errors returnable from this package. +// +// This list is incomplete and undefined errors may also be returned. +// Errors returned from this package may be tested against these errors with errors.Is. +var ( + ErrNodeToBlock = errors.New(errNodeToBlock) + ErrEncodingBlock = errors.New(errEncodingBlock) + ErrUnmarshallingBlock = errors.New(errUnmarshallingBlock) + ErrGeneratingLink = errors.New(errGeneratingLink) +) + +// NewErrFailedToGetPriority returns an error indicating that the priority could not be retrieved. +func NewErrNodeToBlock(node any) error { + return errors.New( + errNodeToBlock, + errors.NewKV("Expected", fmt.Sprintf("%T", &Block{})), + errors.NewKV("Actual", fmt.Sprintf("%T", node)), + ) +} + +// NewErrEncodingBlock returns an error indicating that the block could not be encoded. +func NewErrEncodingBlock(err error) error { + return errors.Wrap( + errEncodingBlock, + err, + ) +} + +// NewErrUnmarshallingBlock returns an error indicating that the block could not be unmarshalled. +func NewErrUnmarshallingBlock(err error) error { + return errors.Wrap( + errUnmarshallingBlock, + err, + ) +} + +// NewErrGeneratingLink returns an error indicating that the link could not be generated. +func NewErrGeneratingLink(err error) error { + return errors.Wrap( + errGeneratingLink, + err, + ) +} diff --git a/internal/core/clock.go b/internal/core/clock.go deleted file mode 100644 index e7b8c7f1f2..0000000000 --- a/internal/core/clock.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package core - -import ( - "context" - - ipld "github.com/ipfs/go-ipld-format" -) - -// MerkleClock is the core logical clock implementation that manages writing to and from -// the MerkleDAG structure, ensuring a causal ordering of events. -type MerkleClock interface { - AddDAGNode( - ctx context.Context, - delta Delta, - ) (ipld.Node, error) // possibly change to AddDeltaNode? - ProcessNode(context.Context, Delta, ipld.Node) error -} diff --git a/internal/core/crdt/composite.go b/internal/core/crdt/composite.go index be762b1596..a6b7299a60 100644 --- a/internal/core/crdt/composite.go +++ b/internal/core/crdt/composite.go @@ -1,4 +1,4 @@ -// Copyright 2022 Democratized Data Foundation +// Copyright 2024 Democratized Data Foundation // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. @@ -13,14 +13,9 @@ package crdt import ( "bytes" "context" - "sort" - "strings" - dag "github.com/ipfs/boxo/ipld/merkledag" ds "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/query" - ipld "github.com/ipfs/go-ipld-format" - "github.com/ugorji/go/codec" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" @@ -41,13 +36,23 @@ type CompositeDAGDelta struct { // Status represents the status of the document. By default it is `Active`. // Alternatively, if can be set to `Deleted`. Status client.DocumentStatus - // SubDAGS should not be marshalled as they are already - // stored as links in the DAG blocks. They are needed here to - // hold on to them for the block creation. - SubDAGs []core.DAGLink `json:"-"` } -var _ core.CompositeDelta = (*CompositeDAGDelta)(nil) +var _ core.Delta = (*CompositeDAGDelta)(nil) + +// IPLDSchemaBytes returns the IPLD schema representation for the type. +// +// This needs to match the [CompositeDAGDelta] struct or [coreblock.mustSetSchema] will panic on init. +func (delta *CompositeDAGDelta) IPLDSchemaBytes() []byte { + return []byte(` + type CompositeDAGDelta struct { + docID Bytes + fieldName String + priority Int + schemaVersionID String + status Int + }`) +} // GetPriority gets the current priority for this delta. func (delta *CompositeDAGDelta) GetPriority() uint64 { @@ -59,30 +64,6 @@ func (delta *CompositeDAGDelta) SetPriority(prio uint64) { delta.Priority = prio } -// Marshal will serialize this delta to a byte array. -func (delta *CompositeDAGDelta) Marshal() ([]byte, error) { - h := &codec.CborHandle{} - buf := bytes.NewBuffer(nil) - enc := codec.NewEncoder(buf, h) - err := enc.Encode(delta) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// Unmarshal decodes the delta from CBOR. -func (delta *CompositeDAGDelta) Unmarshal(b []byte) error { - h := &codec.CborHandle{} - dec := codec.NewDecoderBytes(b, h) - return dec.Decode(delta) -} - -// Links returns the links for this delta. -func (delta *CompositeDAGDelta) Links() []core.DAGLink { - return delta.SubDAGs -} - // CompositeDAG is a CRDT structure that is used to track a collection of sub MerkleCRDTs. type CompositeDAG struct { baseCRDT @@ -104,17 +85,13 @@ func (c CompositeDAG) Value(ctx context.Context) ([]byte, error) { return nil, nil } -// Set applies a delta to the composite DAG CRDT. TBD -func (c CompositeDAG) Set(links []core.DAGLink) *CompositeDAGDelta { - // make sure the links are sorted lexicographically by CID - sort.Slice(links, func(i, j int) bool { - return strings.Compare(links[i].Cid.String(), links[j].Cid.String()) < 0 - }) +// Set returns a new composite DAG delta CRDT with the given status. +func (c CompositeDAG) Set(status client.DocumentStatus) *CompositeDAGDelta { return &CompositeDAGDelta{ DocID: []byte(c.key.DocID), FieldName: c.fieldName, SchemaVersionID: c.schemaVersionKey.SchemaVersionId, - SubDAGs: links, + Status: status, } } @@ -198,32 +175,3 @@ func (c CompositeDAG) deleteWithPrefix(ctx context.Context, key core.DataStoreKe return nil } - -// DeltaDecode is a typed helper to extract. -// a CompositeDAGDelta from a ipld.Node -// for now let's do cbor (quick to implement) -func (c CompositeDAG) DeltaDecode(node ipld.Node) (core.Delta, error) { - pbNode, ok := node.(*dag.ProtoNode) - if !ok { - return nil, client.NewErrUnexpectedType[*dag.ProtoNode]("ipld.Node", node) - } - - delta := &CompositeDAGDelta{} - err := delta.Unmarshal(pbNode.Data()) - if err != nil { - return nil, err - } - - // get links - for _, link := range pbNode.Links() { - if link.Name == "head" { // ignore the head links - continue - } - - delta.SubDAGs = append(delta.SubDAGs, core.DAGLink{ - Name: link.Name, - Cid: link.Cid, - }) - } - return delta, nil -} diff --git a/internal/core/crdt/counter.go b/internal/core/crdt/counter.go index 51292d064c..6c4e002223 100644 --- a/internal/core/crdt/counter.go +++ b/internal/core/crdt/counter.go @@ -18,10 +18,7 @@ import ( "math/big" "github.com/fxamacker/cbor/v2" - dag "github.com/ipfs/boxo/ipld/merkledag" ds "github.com/ipfs/go-datastore" - ipld "github.com/ipfs/go-ipld-format" - "github.com/ugorji/go/codec" "golang.org/x/exp/constraints" "github.com/sourcenetwork/defradb/client" @@ -31,14 +28,6 @@ import ( "github.com/sourcenetwork/defradb/internal/db/base" ) -var ( - // ensure types implements core interfaces - _ core.ReplicatedData = (*Counter[float64])(nil) - _ core.ReplicatedData = (*Counter[int64])(nil) - _ core.Delta = (*CounterDelta[float64])(nil) - _ core.Delta = (*CounterDelta[int64])(nil) -) - type Incrementable interface { constraints.Integer | constraints.Float } @@ -58,6 +47,33 @@ type CounterDelta[T Incrementable] struct { Data T } +var _ core.Delta = (*CounterDelta[float64])(nil) +var _ core.Delta = (*CounterDelta[int64])(nil) + +// IPLDSchemaBytes returns the IPLD schema representation for the type. +// +// This needs to match the [CounterDelta[T]] struct or [coreblock.mustSetSchema] will panic on init. +func (delta *CounterDelta[T]) IPLDSchemaBytes() []byte { + return []byte(` + type CounterDeltaFloat struct { + docID Bytes + fieldName String + priority Int + nonce Int + schemaVersionID String + data Float + } + + type CounterDeltaInt struct { + docID Bytes + fieldName String + priority Int + nonce Int + schemaVersionID String + data Int + }`) +} + // GetPriority gets the current priority for this delta. func (delta *CounterDelta[T]) GetPriority() uint64 { return delta.Priority @@ -68,25 +84,6 @@ func (delta *CounterDelta[T]) SetPriority(prio uint64) { delta.Priority = prio } -// Marshal encodes the delta using CBOR. -func (delta *CounterDelta[T]) Marshal() ([]byte, error) { - h := &codec.CborHandle{} - buf := bytes.NewBuffer(nil) - enc := codec.NewEncoder(buf, h) - err := enc.Encode(delta) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// Unmarshal decodes the delta from CBOR. -func (delta *CounterDelta[T]) Unmarshal(b []byte) error { - h := &codec.CborHandle{} - dec := codec.NewDecoderBytes(b, h) - return dec.Decode(delta) -} - // Counter, is a simple CRDT type that allows increment/decrement // of an Int and Float data types that ensures convergence. type Counter[T Incrementable] struct { @@ -94,6 +91,9 @@ type Counter[T Incrementable] struct { AllowDecrement bool } +var _ core.ReplicatedData = (*Counter[float64])(nil) +var _ core.ReplicatedData = (*Counter[int64])(nil) + // NewCounter returns a new instance of the Counter with the given ID. func NewCounter[T Incrementable]( store datastore.DSReaderWriter, @@ -201,22 +201,6 @@ func (c Counter[T]) getCurrentValue(ctx context.Context, key core.DataStoreKey) return getNumericFromBytes[T](curValue) } -// DeltaDecode is a typed helper to extract a CounterDelta from a ipld.Node -func (c Counter[T]) DeltaDecode(node ipld.Node) (core.Delta, error) { - pbNode, ok := node.(*dag.ProtoNode) - if !ok { - return nil, client.NewErrUnexpectedType[*dag.ProtoNode]("ipld.Node", node) - } - - delta := &CounterDelta[T]{} - err := delta.Unmarshal(pbNode.Data()) - if err != nil { - return nil, err - } - - return delta, nil -} - func (c Counter[T]) CType() client.CType { if c.AllowDecrement { return client.PN_COUNTER diff --git a/internal/core/crdt/ipld_union.go b/internal/core/crdt/ipld_union.go new file mode 100644 index 0000000000..5d4cfc9f9e --- /dev/null +++ b/internal/core/crdt/ipld_union.go @@ -0,0 +1,134 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package crdt + +import "github.com/sourcenetwork/defradb/internal/core" + +// CRDT is a union type used for IPLD schemas that can hold any of the CRDT deltas. +type CRDT struct { + LWWRegDelta *LWWRegDelta + CompositeDAGDelta *CompositeDAGDelta + CounterDeltaInt *CounterDelta[int64] + CounterDeltaFloat *CounterDelta[float64] +} + +// IPLDSchemaBytes returns the IPLD schema representation for the CRDT. +// +// This needs to match the [CRDT] struct or [mustSetSchema] will panic on init. +func (c CRDT) IPLDSchemaBytes() []byte { + return []byte(` + type CRDT union { + | LWWRegDelta "lww" + | CompositeDAGDelta "composite" + | CounterDeltaInt "counterInt" + | CounterDeltaFloat "counterFloat" + } representation keyed`) +} + +// GetDelta returns the delta that is stored in the CRDT. +func (c CRDT) GetDelta() core.Delta { + switch { + case c.LWWRegDelta != nil: + return c.LWWRegDelta + case c.CompositeDAGDelta != nil: + return c.CompositeDAGDelta + case c.CounterDeltaFloat != nil: + return c.CounterDeltaFloat + case c.CounterDeltaInt != nil: + return c.CounterDeltaInt + } + return nil +} + +// GetPriority returns the priority of the delta. +func (c CRDT) GetPriority() uint64 { + switch { + case c.LWWRegDelta != nil: + return c.LWWRegDelta.GetPriority() + case c.CompositeDAGDelta != nil: + return c.CompositeDAGDelta.GetPriority() + case c.CounterDeltaFloat != nil: + return c.CounterDeltaFloat.GetPriority() + case c.CounterDeltaInt != nil: + return c.CounterDeltaInt.GetPriority() + } + return 0 +} + +// GetFieldName returns the field name of the delta. +func (c CRDT) GetFieldName() string { + switch { + case c.LWWRegDelta != nil: + return c.LWWRegDelta.FieldName + case c.CompositeDAGDelta != nil: + return c.CompositeDAGDelta.FieldName + case c.CounterDeltaFloat != nil: + return c.CounterDeltaFloat.FieldName + case c.CounterDeltaInt != nil: + return c.CounterDeltaInt.FieldName + } + return "" +} + +// GetDocID returns the docID of the delta. +func (c CRDT) GetDocID() []byte { + switch { + case c.LWWRegDelta != nil: + return c.LWWRegDelta.DocID + case c.CompositeDAGDelta != nil: + return c.CompositeDAGDelta.DocID + case c.CounterDeltaFloat != nil: + return c.CounterDeltaFloat.DocID + case c.CounterDeltaInt != nil: + return c.CounterDeltaInt.DocID + } + return nil +} + +// GetSchemaVersionID returns the schema version ID of the delta. +func (c CRDT) GetSchemaVersionID() string { + switch { + case c.LWWRegDelta != nil: + return c.LWWRegDelta.SchemaVersionID + case c.CompositeDAGDelta != nil: + return c.CompositeDAGDelta.SchemaVersionID + case c.CounterDeltaFloat != nil: + return c.CounterDeltaFloat.SchemaVersionID + case c.CounterDeltaInt != nil: + return c.CounterDeltaInt.SchemaVersionID + } + return "" +} + +// GetStatus returns the status of the delta. +// +// Currently only implemented for CompositeDAGDelta. +func (c CRDT) GetStatus() uint8 { + if c.CompositeDAGDelta != nil { + return uint8(c.CompositeDAGDelta.Status) + } + return 0 +} + +// GetData returns the data of the delta. +// +// Currently only implemented for LWWRegDelta. +func (c CRDT) GetData() []byte { + if c.LWWRegDelta != nil { + return c.LWWRegDelta.Data + } + return nil +} + +// IsComposite returns true if the CRDT is a composite CRDT. +func (c CRDT) IsComposite() bool { + return c.CompositeDAGDelta != nil +} diff --git a/internal/core/crdt/lwwreg.go b/internal/core/crdt/lwwreg.go index 066c8e1523..0df8187dae 100644 --- a/internal/core/crdt/lwwreg.go +++ b/internal/core/crdt/lwwreg.go @@ -1,4 +1,4 @@ -// Copyright 2022 Democratized Data Foundation +// Copyright 2024 Democratized Data Foundation // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. @@ -14,12 +14,8 @@ import ( "bytes" "context" - dag "github.com/ipfs/boxo/ipld/merkledag" ds "github.com/ipfs/go-datastore" - ipld "github.com/ipfs/go-ipld-format" - "github.com/ugorji/go/codec" - "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/internal/core" @@ -41,6 +37,20 @@ type LWWRegDelta struct { var _ core.Delta = (*LWWRegDelta)(nil) +// IPLDSchemaBytes returns the IPLD schema representation for the type. +// +// This needs to match the [LWWRegDelta] struct or [coreblock.mustSetSchema] will panic on init. +func (delta LWWRegDelta) IPLDSchemaBytes() []byte { + return []byte(` + type LWWRegDelta struct { + docID Bytes + fieldName String + priority Int + schemaVersionID String + data Bytes + }`) +} + // GetPriority gets the current priority for this delta. func (delta *LWWRegDelta) GetPriority() uint64 { return delta.Priority @@ -51,26 +61,6 @@ func (delta *LWWRegDelta) SetPriority(prio uint64) { delta.Priority = prio } -// Marshal encodes the delta using CBOR. -// for now le'ts do cbor (quick to implement) -func (delta *LWWRegDelta) Marshal() ([]byte, error) { - h := &codec.CborHandle{} - buf := bytes.NewBuffer(nil) - enc := codec.NewEncoder(buf, h) - err := enc.Encode(delta) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// Unmarshal decodes the delta from CBOR. -func (delta *LWWRegDelta) Unmarshal(b []byte) error { - h := &codec.CborHandle{} - dec := codec.NewDecoderBytes(b, h) - return dec.Decode(delta) -} - // LWWRegister, Last-Writer-Wins Register, is a simple CRDT type that allows set/get // of an arbitrary data type that ensures convergence. type LWWRegister struct { @@ -161,20 +151,3 @@ func (reg LWWRegister) setValue(ctx context.Context, val []byte, priority uint64 return reg.setPriority(ctx, reg.key, priority) } - -// DeltaDecode is a typed helper to extract -// a LWWRegDelta from a ipld.Node -// for now let's do cbor (quick to implement) -func (reg LWWRegister) DeltaDecode(node ipld.Node) (core.Delta, error) { - pbNode, ok := node.(*dag.ProtoNode) - if !ok { - return nil, client.NewErrUnexpectedType[*dag.ProtoNode]("ipld.Node", node) - } - - delta := &LWWRegDelta{} - err := delta.Unmarshal(pbNode.Data()) - if err != nil { - return nil, err - } - return delta, nil -} diff --git a/internal/core/crdt/lwwreg_test.go b/internal/core/crdt/lwwreg_test.go index c51fda6a46..2083a5b800 100644 --- a/internal/core/crdt/lwwreg_test.go +++ b/internal/core/crdt/lwwreg_test.go @@ -1,4 +1,4 @@ -// Copyright 2022 Democratized Data Foundation +// Copyright 2024 Democratized Data Foundation // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. @@ -15,12 +15,7 @@ import ( "reflect" "testing" - dag "github.com/ipfs/boxo/ipld/merkledag" - "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" - ipld "github.com/ipfs/go-ipld-format" - mh "github.com/multiformats/go-multihash" - "github.com/ugorji/go/codec" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/internal/core" @@ -148,109 +143,3 @@ func TestLWWRegisterDeltaSetPriority(t *testing.T) { ) } } - -func TestLWWRegisterDeltaMarshal(t *testing.T) { - delta := &LWWRegDelta{ - Data: []byte("test"), - Priority: uint64(10), - } - bytes, err := delta.Marshal() - if err != nil { - t.Errorf("Marshal returned an error: %v", err) - return - } - if len(bytes) == 0 { - t.Error("Expected Marshal to return serialized bytes, but output is empty") - return - } - - h := &codec.CborHandle{} - dec := codec.NewDecoderBytes(bytes, h) - unmarshaledDelta := &LWWRegDelta{} - err = dec.Decode(unmarshaledDelta) - if err != nil { - t.Errorf("Decode returned an error: %v", err) - return - } - - if !reflect.DeepEqual(delta.Data, unmarshaledDelta.Data) { - t.Errorf( - "Unmarshalled data value doesn't match expected. Want %v, have %v", - []byte("test"), - unmarshaledDelta.Data, - ) - return - } - - if delta.Priority != unmarshaledDelta.Priority { - t.Errorf( - "Unmarshalled priority value doesn't match. Want %v, have %v", - uint64(10), - unmarshaledDelta.Priority, - ) - } -} - -func makeNode(delta core.Delta, heads []cid.Cid) (ipld.Node, error) { - var data []byte - var err error - if delta != nil { - data, err = delta.Marshal() - if err != nil { - return nil, err - } - } - - nd := dag.NodeWithData(data) - // The cid builder defaults to v0, we want to be using v1 CIDs - nd.SetCidBuilder( - cid.V1Builder{ - Codec: cid.Raw, - MhType: mh.SHA2_256, - MhLength: -1, - }) - - for _, h := range heads { - err = nd.AddRawLink("", &ipld.Link{Cid: h}) - if err != nil { - return nil, err - } - } - - return nd, nil -} - -func TestLWWRegisterDeltaDecode(t *testing.T) { - delta := &LWWRegDelta{ - Data: []byte("test"), - Priority: uint64(10), - } - - node, err := makeNode(delta, []cid.Cid{}) - if err != nil { - t.Errorf("Received errors while creating node: %v", err) - return - } - - reg := LWWRegister{} - extractedDelta, err := reg.DeltaDecode(node) - if err != nil { - t.Errorf("Received error while extracing node: %v", err) - return - } - - typedExtractedDelta, ok := extractedDelta.(*LWWRegDelta) - if !ok { - t.Error("Extracted delta from node is NOT a LWWRegDelta type") - return - } - - if !reflect.DeepEqual(typedExtractedDelta, delta) { - t.Errorf( - "Extracted delta is not the same value as the original. Expected %v, have %v", - delta, - typedExtractedDelta, - ) - return - } -} diff --git a/internal/core/delta.go b/internal/core/delta.go index fda7dd13ae..16cba6ce53 100644 --- a/internal/core/delta.go +++ b/internal/core/delta.go @@ -10,27 +10,9 @@ package core -import ( - cid "github.com/ipfs/go-cid" -) - // Delta represents a delta-state update to delta-CRDT. // They are serialized to and from Protobuf (or CBOR). type Delta interface { GetPriority() uint64 SetPriority(uint64) - Marshal() ([]byte, error) - Unmarshal(b []byte) error -} - -// CompositeDelta represents a delta-state update to a composite CRDT. -type CompositeDelta interface { - Delta - Links() []DAGLink -} - -// DAGLink represents a link to another object in a DAG. -type DAGLink struct { - Name string - Cid cid.Cid } diff --git a/internal/core/node.go b/internal/core/node.go deleted file mode 100644 index 6e9589ea04..0000000000 --- a/internal/core/node.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package core - -import ( - "context" - - cid "github.com/ipfs/go-cid" - ipld "github.com/ipfs/go-ipld-format" -) - -// NodeDeltaPair is a Node with its underlying delta already extracted. -// Used in a channel response for streaming. -type NodeDeltaPair interface { - GetNode() ipld.Node - GetDelta() Delta - Error() error -} - -// A NodeGetter extended from ipld.NodeGetter with delta-related functions. -type NodeGetter interface { - ipld.NodeGetter - GetDelta(context.Context, cid.Cid) (ipld.Node, Delta, error) - GetDeltas(context.Context, []cid.Cid) <-chan NodeDeltaPair - GetPriority(context.Context, cid.Cid) (uint64, error) -} diff --git a/internal/core/replicated.go b/internal/core/replicated.go index 75a72ece7f..f3f1b89f2c 100644 --- a/internal/core/replicated.go +++ b/internal/core/replicated.go @@ -13,20 +13,18 @@ package core import ( "context" - cid "github.com/ipfs/go-cid" - ipld "github.com/ipfs/go-ipld-format" + cid "github.com/ipld/go-ipld-prime/linking/cid" ) // ReplicatedData is a data type that allows concurrent writers to deterministically merge other // replicated data so as to converge on the same state. type ReplicatedData interface { Merge(ctx context.Context, other Delta) error - DeltaDecode(node ipld.Node) (Delta, error) // possibly rename to just Decode Value(ctx context.Context) ([]byte, error) } // PersistedReplicatedData persists a ReplicatedData to an underlying datastore. type PersistedReplicatedData interface { ReplicatedData - Publish(Delta) (cid.Cid, error) + Publish(Delta) (cid.Link, error) } diff --git a/internal/db/collection.go b/internal/db/collection.go index 40f020a13c..20cead8193 100644 --- a/internal/db/collection.go +++ b/internal/db/collection.go @@ -23,7 +23,7 @@ import ( "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/query" - ipld "github.com/ipfs/go-ipld-format" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" "github.com/lens-vm/lens/host-go/config/model" "github.com/sourcenetwork/immutable" @@ -33,6 +33,7 @@ import ( "github.com/sourcenetwork/defradb/events" "github.com/sourcenetwork/defradb/internal/acp" "github.com/sourcenetwork/defradb/internal/core" + coreblock "github.com/sourcenetwork/defradb/internal/core/block" "github.com/sourcenetwork/defradb/internal/db/base" "github.com/sourcenetwork/defradb/internal/db/description" "github.com/sourcenetwork/defradb/internal/db/fetcher" @@ -1708,7 +1709,7 @@ func (c *collection) save( // => instantiate MerkleCRDT objects // => Set/Publish new CRDT values primaryKey := c.getPrimaryKeyFromDocID(doc.ID()) - links := make([]core.DAGLink, 0) + links := make([]coreblock.DAGLink, 0) for k, v := range doc.Fields() { val, err := doc.GetValueWithField(v) if err != nil { @@ -1773,20 +1774,16 @@ func (c *collection) save( return cid.Undef, err } - node, _, err := merkleCRDT.Save(ctx, val) + link, _, err := merkleCRDT.Save(ctx, val) if err != nil { return cid.Undef, err } - link := core.DAGLink{ - Name: k, - Cid: node.Cid(), - } - links = append(links, link) + links = append(links, coreblock.NewDAGLink(k, link)) } } - headNode, priority, err := c.saveCompositeToMerkleCRDT( + link, headNode, err := c.saveCompositeToMerkleCRDT( ctx, primaryKey.ToDataStoreKey(), links, @@ -1795,17 +1792,16 @@ func (c *collection) save( if err != nil { return cid.Undef, err } - if c.db.events.Updates.HasValue() { txn.OnSuccess( func() { c.db.events.Updates.Value().Publish( events.Update{ DocID: doc.ID().String(), - Cid: headNode.Cid(), + Cid: link.Cid, SchemaRoot: c.Schema().Root, Block: headNode, - Priority: priority, + IsCreate: isCreate, }, ) }, @@ -1813,10 +1809,10 @@ func (c *collection) save( } txn.OnSuccess(func() { - doc.SetHead(headNode.Cid()) + doc.SetHead(link.Cid) }) - return headNode.Cid(), nil + return link.Cid, nil } func (c *collection) validateOneToOneLinkDoesntAlreadyExist( @@ -1987,15 +1983,16 @@ func (c *collection) exists( } // saveCompositeToMerkleCRDT saves the composite to the merkle CRDT. +// It returns the CID of the block and the encoded block. // saveCompositeToMerkleCRDT MUST not be called outside the `c.save` // and `c.applyDelete` methods as we wrap the acp logic around those methods. // Calling it elsewhere could cause the omission of acp checks. func (c *collection) saveCompositeToMerkleCRDT( ctx context.Context, dsKey core.DataStoreKey, - links []core.DAGLink, + links []coreblock.DAGLink, status client.DocumentStatus, -) (ipld.Node, uint64, error) { +) (cidlink.Link, []byte, error) { txn := mustGetContextTxn(ctx) dsKey = dsKey.WithFieldId(core.COMPOSITE_NAMESPACE) merkleCRDT := merklecrdt.NewMerkleCompositeDAG( diff --git a/internal/db/collection_delete.go b/internal/db/collection_delete.go index 05a4144c0f..420b49c218 100644 --- a/internal/db/collection_delete.go +++ b/internal/db/collection_delete.go @@ -13,10 +13,13 @@ package db import ( "context" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/events" "github.com/sourcenetwork/defradb/internal/acp" "github.com/sourcenetwork/defradb/internal/core" + coreblock "github.com/sourcenetwork/defradb/internal/core/block" "github.com/sourcenetwork/defradb/internal/merkle/clock" ) @@ -148,15 +151,12 @@ func (c *collection) applyDelete( return err } - dagLinks := make([]core.DAGLink, len(cids)) + dagLinks := make([]coreblock.DAGLink, len(cids)) for i, cid := range cids { - dagLinks[i] = core.DAGLink{ - Name: core.HEAD, - Cid: cid, - } + dagLinks[i] = coreblock.NewDAGLink(core.HEAD, cidlink.Link{Cid: cid}) } - headNode, priority, err := c.saveCompositeToMerkleCRDT( + link, b, err := c.saveCompositeToMerkleCRDT( ctx, dsKey, dagLinks, @@ -172,10 +172,9 @@ func (c *collection) applyDelete( c.db.events.Updates.Value().Publish( events.Update{ DocID: primaryKey.DocID, - Cid: headNode.Cid(), + Cid: link.Cid, SchemaRoot: c.Schema().Root, - Block: headNode, - Priority: priority, + Block: b, }, ) }, diff --git a/internal/db/db.go b/internal/db/db.go index 4721e40e48..ec7aef485e 100644 --- a/internal/db/db.go +++ b/internal/db/db.go @@ -19,10 +19,10 @@ import ( "sync" "sync/atomic" - blockstore "github.com/ipfs/boxo/blockstore" ds "github.com/ipfs/go-datastore" dsq "github.com/ipfs/go-datastore/query" "github.com/lens-vm/lens/host-go/engine/module" + "github.com/sourcenetwork/corelog" "github.com/sourcenetwork/immutable" @@ -140,7 +140,7 @@ func (db *db) Root() datastore.RootStore { } // Blockstore returns the internal DAG store which contains IPLD blocks. -func (db *db) Blockstore() blockstore.Blockstore { +func (db *db) Blockstore() datastore.DAGStore { return db.multistore.DAGstore() } @@ -149,6 +149,11 @@ func (db *db) Peerstore() datastore.DSBatching { return db.multistore.Peerstore() } +// Headstore returns the internal DAG store which contains IPLD blocks. +func (db *db) Headstore() ds.Read { + return db.multistore.Headstore() +} + func (db *db) LensRegistry() client.LensRegistry { return db.lensRegistry } diff --git a/internal/db/fetcher/mocks/fetcher.go b/internal/db/fetcher/mocks/fetcher.go index 5f0019befb..fc68f152d0 100644 --- a/internal/db/fetcher/mocks/fetcher.go +++ b/internal/db/fetcher/mocks/fetcher.go @@ -3,8 +3,8 @@ package mocks import ( - acp "github.com/sourcenetwork/defradb/internal/acp" client "github.com/sourcenetwork/defradb/client" + acp "github.com/sourcenetwork/defradb/internal/acp" context "context" diff --git a/internal/db/fetcher/versioned.go b/internal/db/fetcher/versioned.go index 5c81cea62b..fd907658aa 100644 --- a/internal/db/fetcher/versioned.go +++ b/internal/db/fetcher/versioned.go @@ -14,20 +14,19 @@ import ( "container/list" "context" - dag "github.com/ipfs/boxo/ipld/merkledag" "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" - format "github.com/ipfs/go-ipld-format" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/datastore/memory" - "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/internal/acp" acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" "github.com/sourcenetwork/defradb/internal/core" + coreblock "github.com/sourcenetwork/defradb/internal/core/block" "github.com/sourcenetwork/defradb/internal/db/base" merklecrdt "github.com/sourcenetwork/defradb/internal/merkle/crdt" "github.com/sourcenetwork/defradb/internal/planner/mapper" @@ -307,20 +306,14 @@ func (vf *VersionedFetcher) seekNext(c cid.Cid, topParent bool) error { } // decode the block - nd, err := dag.DecodeProtobuf(blk.RawData()) + block, err := coreblock.GetFromBytes(blk.RawData()) if err != nil { return NewErrVFetcherFailedToDecodeNode(err) } - // subDAGLinks := make([]cid.Cid, 0) // @todo: set slice size - l, err := nd.GetNodeLink(core.HEAD) - // ErrLinkNotFound is fine, it just means we have no more head links - if err != nil && !errors.Is(err, dag.ErrLinkNotFound) { - return NewErrVFetcherFailedToGetDagLink(err) - } - // only seekNext on parent if we have a HEAD link - if !errors.Is(err, dag.ErrLinkNotFound) { + l, ok := block.GetLinkByName(core.HEAD) + if ok { err := vf.seekNext(l.Cid, true) if err != nil { return err @@ -328,12 +321,12 @@ func (vf *VersionedFetcher) seekNext(c cid.Cid, topParent bool) error { } // loop over links and ignore head links - for _, l := range nd.Links() { + for _, l := range block.Links { if l.Name == core.HEAD { continue } - err := vf.seekNext(l.Cid, false) + err := vf.seekNext(l.Link.Cid, false) if err != nil { return err } @@ -353,25 +346,30 @@ func (vf *VersionedFetcher) seekNext(c cid.Cid, topParent bool) error { // Currently we assume the CID is a CompositeDAG CRDT node. func (vf *VersionedFetcher) merge(c cid.Cid) error { // get node - nd, err := vf.getDAGNode(c) + block, err := vf.getDAGBlock(c) + if err != nil { + return err + } + + link, err := block.GenerateLink() if err != nil { return err } // first arg 0 is the index for the composite DAG in the mCRDTs cache - if err := vf.processNode(0, nd, client.COMPOSITE, client.FieldKind_None, ""); err != nil { + if err := vf.processBlock(0, block, link, client.COMPOSITE, client.FieldKind_None, ""); err != nil { return err } // handle subgraphs // loop over links and ignore head links - for _, l := range nd.Links() { + for _, l := range block.Links { if l.Name == core.HEAD { continue } // get node - subNd, err := vf.getDAGNode(l.Cid) + subBlock, err := vf.getDAGBlock(l.Link.Cid) if err != nil { return err } @@ -380,7 +378,7 @@ func (vf *VersionedFetcher) merge(c cid.Cid) error { if !ok { return client.NewErrFieldNotExist(l.Name) } - if err := vf.processNode(uint32(field.ID), subNd, field.Typ, field.Kind, l.Name); err != nil { + if err := vf.processBlock(uint32(field.ID), subBlock, l.Link, field.Typ, field.Kind, l.Name); err != nil { return err } } @@ -388,9 +386,10 @@ func (vf *VersionedFetcher) merge(c cid.Cid) error { return nil } -func (vf *VersionedFetcher) processNode( +func (vf *VersionedFetcher) processBlock( crdtIndex uint32, - nd format.Node, + block *coreblock.Block, + blockLink cidlink.Link, ctype client.CType, kind client.FieldKind, fieldName string, @@ -414,28 +413,20 @@ func (vf *VersionedFetcher) processNode( return err } vf.mCRDTs[crdtIndex] = mcrdt - // compositeClock = compMCRDT } - delta, err := mcrdt.DeltaDecode(nd) - if err != nil { - return err - } - - err = mcrdt.Clock().ProcessNode(vf.ctx, delta, nd) + err = mcrdt.Clock().ProcessBlock(vf.ctx, block, blockLink) return err } -func (vf *VersionedFetcher) getDAGNode(c cid.Cid) (*dag.ProtoNode, error) { +func (vf *VersionedFetcher) getDAGBlock(c cid.Cid) (*coreblock.Block, error) { // get Block blk, err := vf.store.DAGstore().Get(vf.ctx, c) if err != nil { return nil, NewErrFailedToGetDagNode(err) } - // get node - // decode the block - return dag.DecodeProtobuf(blk.RawData()) + return coreblock.GetFromBytes(blk.RawData()) } // Close closes the VersionedFetcher. diff --git a/internal/db/indexed_docs_test.go b/internal/db/indexed_docs_test.go index fe51e7b354..5c4baacdf8 100644 --- a/internal/db/indexed_docs_test.go +++ b/internal/db/indexed_docs_test.go @@ -18,6 +18,7 @@ import ( ipfsDatastore "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/query" + "github.com/ipld/go-ipld-prime/storage/bsadapter" "github.com/sourcenetwork/immutable" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -317,6 +318,9 @@ func TestNonUnique_IfFailsToStoredIndexedDoc_Error(t *testing.T) { key := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName).Doc(doc).Build() mockTxn := f.mockTxn() + a := &mocks.DAGStore{} + mockTxn.MockDAGstore.EXPECT().AsIPLDStorage().Return(&bsadapter.Adapter{Wrapped: a}) + a.EXPECT().Put(mock.Anything, mock.Anything).Return(nil) dataStoreOn := mockTxn.MockDatastore.EXPECT() dataStoreOn.Put(mock.Anything, mock.Anything, mock.Anything).Unset() @@ -358,6 +362,10 @@ func TestNonUnique_IfSystemStorageHasInvalidIndexDescription_Error(t *testing.T) doc := f.newUserDoc("John", 21, f.users) mockTxn := f.mockTxn().ClearSystemStore() + a := &mocks.DAGStore{} + mockTxn.MockDAGstore.EXPECT().AsIPLDStorage().Return(&bsadapter.Adapter{Wrapped: a}) + a.EXPECT().Put(mock.Anything, mock.Anything).Return(nil) + systemStoreOn := mockTxn.MockSystemstore.EXPECT() systemStoreOn.Query(mock.Anything, mock.Anything). Return(mocks.NewQueryResultsWithValues(t, []byte("invalid")), nil) @@ -377,6 +385,10 @@ func TestNonUnique_IfSystemStorageFailsToReadIndexDesc_Error(t *testing.T) { testErr := errors.New("test error") mockTxn := f.mockTxn().ClearSystemStore() + a := &mocks.DAGStore{} + mockTxn.MockDAGstore.EXPECT().AsIPLDStorage().Return(&bsadapter.Adapter{Wrapped: a}) + a.EXPECT().Put(mock.Anything, mock.Anything).Return(nil) + systemStoreOn := mockTxn.MockSystemstore.EXPECT() systemStoreOn.Query(mock.Anything, mock.Anything). Return(nil, testErr) diff --git a/internal/merkle/clock/clock.go b/internal/merkle/clock/clock.go index 5fa6621cd4..087ba76804 100644 --- a/internal/merkle/clock/clock.go +++ b/internal/merkle/clock/clock.go @@ -16,13 +16,14 @@ package clock import ( "context" - cid "github.com/ipfs/go-cid" - ipld "github.com/ipfs/go-ipld-format" + "github.com/ipld/go-ipld-prime/linking" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" "github.com/sourcenetwork/corelog" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/internal/core" + coreblock "github.com/sourcenetwork/defradb/internal/core/block" ) var ( @@ -44,7 +45,7 @@ func NewMerkleClock( dagstore datastore.DAGStore, namespace core.HeadStoreKey, crdt core.ReplicatedData, -) core.MerkleClock { +) *MerkleClock { return &MerkleClock{ headstore: headstore, dagstore: dagstore, @@ -55,95 +56,88 @@ func NewMerkleClock( func (mc *MerkleClock) putBlock( ctx context.Context, - heads []cid.Cid, - delta core.Delta, -) (ipld.Node, error) { - node, err := makeNode(delta, heads) - if err != nil { - return nil, NewErrCreatingBlock(err) - } - - // @todo Add a DagSyncer instance to the MerkleCRDT structure - // @body At the moment there is no configured DagSyncer so MerkleClock - // blocks are not persisted into the database. - // The following is an example implementation of how it might work: - // - // ctx := context.Background() - // err = mc.store.dagSyncer.Add(ctx, node) - // if err != nil { - // return nil, errors.Wrap("error writing new block %s ", node.Cid(), err) - // } - err = mc.dagstore.Put(ctx, node) + block *coreblock.Block, +) (cidlink.Link, error) { + nd := block.GenerateNode() + lsys := cidlink.DefaultLinkSystem() + lsys.SetWriteStorage(mc.dagstore.AsIPLDStorage()) + link, err := lsys.Store(linking.LinkContext{Ctx: ctx}, coreblock.GetLinkPrototype(), nd) if err != nil { - return nil, NewErrWritingBlock(node.Cid(), err) + return cidlink.Link{}, NewErrWritingBlock(err) } - return node, nil + return link.(cidlink.Link), nil } -// @todo Change AddDAGNode to AddDelta - -// AddDAGNode adds a new delta to the existing DAG for this MerkleClock: checks the current heads, -// sets the delta priority in the Merkle DAG, and adds it to the blockstore the runs ProcessNode. -func (mc *MerkleClock) AddDAGNode( +// AddDelta adds a new delta to the existing DAG for this MerkleClock: checks the current heads, +// sets the delta priority in the Merkle DAG, and adds it to the blockstore then runs ProcessBlock. +func (mc *MerkleClock) AddDelta( ctx context.Context, delta core.Delta, -) (ipld.Node, error) { + links ...coreblock.DAGLink, +) (cidlink.Link, []byte, error) { heads, height, err := mc.headset.List(ctx) if err != nil { - return nil, NewErrGettingHeads(err) + return cidlink.Link{}, nil, NewErrGettingHeads(err) } height = height + 1 delta.SetPriority(height) + block := coreblock.New(delta, links, heads...) - // write the delta and heads to a new block - nd, err := mc.putBlock(ctx, heads, delta) + // Write the new block to the dag store. + link, err := mc.putBlock(ctx, block) if err != nil { - return nil, err + return cidlink.Link{}, nil, err } - // apply the new node and merge the delta with state - err = mc.ProcessNode( + // merge the delta and update the state + err = mc.ProcessBlock( ctx, - delta, - nd, + block, + link, ) + if err != nil { + return cidlink.Link{}, nil, err + } - return nd, err //@todo: Include raw block data in return + b, err := block.Marshal() + if err != nil { + return cidlink.Link{}, nil, err + } + + return link, b, err } -// ProcessNode processes an already merged delta into a CRDT by adding it to the state. -func (mc *MerkleClock) ProcessNode( +// ProcessBlock merges the delta CRDT and updates the state accordingly. +func (mc *MerkleClock) ProcessBlock( ctx context.Context, - delta core.Delta, - node ipld.Node, + block *coreblock.Block, + blockLink cidlink.Link, ) error { - nodeCid := node.Cid() - priority := delta.GetPriority() + priority := block.Delta.GetPriority() - err := mc.crdt.Merge(ctx, delta) + err := mc.crdt.Merge(ctx, block.Delta.GetDelta()) if err != nil { - return NewErrMergingDelta(nodeCid, err) + return NewErrMergingDelta(blockLink.Cid, err) } - links := node.Links() // check if we have any HEAD links hasHeads := false - for _, l := range links { + for _, l := range block.Links { if l.Name == "_head" { hasHeads = true break } } if !hasHeads { // reached the bottom, at a leaf - err := mc.headset.Write(ctx, nodeCid, priority) + err := mc.headset.Write(ctx, blockLink.Cid, priority) if err != nil { - return NewErrAddingHead(nodeCid, err) + return NewErrAddingHead(blockLink.Cid, err) } } - for _, l := range links { + for _, l := range block.Links { linkCid := l.Cid isHead, err := mc.headset.IsHead(ctx, linkCid) if err != nil { @@ -153,9 +147,9 @@ func (mc *MerkleClock) ProcessNode( if isHead { // reached one of the current heads, replace it with the tip // of current branch - err = mc.headset.Replace(ctx, linkCid, nodeCid, priority) + err = mc.headset.Replace(ctx, linkCid, blockLink.Cid, priority) if err != nil { - return NewErrReplacingHead(linkCid, nodeCid, err) + return NewErrReplacingHead(linkCid, blockLink.Cid, err) } continue @@ -168,13 +162,13 @@ func (mc *MerkleClock) ProcessNode( if known { // we reached a non-head node in the known tree. // This means our root block is a new head - err := mc.headset.Write(ctx, nodeCid, priority) + err := mc.headset.Write(ctx, blockLink.Cid, priority) if err != nil { log.ErrorContextE( ctx, "Failure adding head (when root is a new head)", err, - corelog.Any("Root", nodeCid), + corelog.Any("Root", blockLink.Cid), ) // OR should this also return like below comment?? // return nil, errors.Wrap("error adding head (when root is new head): %s ", root, err) diff --git a/internal/merkle/clock/clock_test.go b/internal/merkle/clock/clock_test.go index e6a882c931..c9a51a7a1e 100644 --- a/internal/merkle/clock/clock_test.go +++ b/internal/merkle/clock/clock_test.go @@ -20,6 +20,7 @@ import ( "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/internal/core" + coreblock "github.com/sourcenetwork/defradb/internal/core/block" ccid "github.com/sourcenetwork/defradb/internal/core/cid" "github.com/sourcenetwork/defradb/internal/core/crdt" ) @@ -38,14 +39,14 @@ func newTestMerkleClock() *MerkleClock { multistore.DAGstore(), core.HeadStoreKey{DocID: request.DocIDArgName, FieldId: "1"}, reg, - ).(*MerkleClock) + ) } func TestNewMerkleClock(t *testing.T) { s := newDS() multistore := datastore.MultiStoreFrom(s) reg := crdt.NewLWWRegister(multistore.Rootstore(), core.CollectionSchemaVersionKey{}, core.DataStoreKey{}, "") - clk := NewMerkleClock(multistore.Headstore(), multistore.DAGstore(), core.HeadStoreKey{}, reg).(*MerkleClock) + clk := NewMerkleClock(multistore.Headstore(), multistore.DAGstore(), core.HeadStoreKey{}, reg) if clk.headstore != multistore.Headstore() { t.Error("MerkleClock store not correctly set") @@ -61,13 +62,14 @@ func TestMerkleClockPutBlock(t *testing.T) { clk := newTestMerkleClock() reg := crdt.LWWRegister{} delta := reg.Set([]byte("test")) - node, err := clk.putBlock(ctx, nil, delta) + block := coreblock.New(delta, nil) + _, err := clk.putBlock(ctx, block) if err != nil { t.Errorf("Failed to putBlock, err: %v", err) } - if len(node.Links()) != 0 { - t.Errorf("Node links should be empty. Have %v, want %v", len(node.Links()), 0) + if len(block.Links) != 0 { + t.Errorf("Node links should be empty. Have %v, want %v", len(block.Links), 0) return } @@ -87,37 +89,38 @@ func TestMerkleClockPutBlockWithHeads(t *testing.T) { return } heads := []cid.Cid{c} - node, err := clk.putBlock(ctx, heads, delta) + block := coreblock.New(delta, nil, heads...) + _, err = clk.putBlock(ctx, block) if err != nil { t.Error("Failed to putBlock with heads:", err) return } - if len(node.Links()) != 1 { - t.Errorf("putBlock has incorrect number of heads. Have %v, want %v", len(node.Links()), 1) + if len(block.Links) != 1 { + t.Errorf("putBlock has incorrect number of heads. Have %v, want %v", len(block.Links), 1) } } -func TestMerkleClockAddDAGNode(t *testing.T) { +func TestMerkleClockAddDelta(t *testing.T) { ctx := context.Background() clk := newTestMerkleClock() reg := crdt.LWWRegister{} delta := reg.Set([]byte("test")) - _, err := clk.AddDAGNode(ctx, delta) + _, _, err := clk.AddDelta(ctx, delta) if err != nil { t.Error("Failed to add dag node:", err) return } } -func TestMerkleClockAddDAGNodeWithHeads(t *testing.T) { +func TestMerkleClockAddDeltaWithHeads(t *testing.T) { ctx := context.Background() clk := newTestMerkleClock() reg := crdt.LWWRegister{} delta := reg.Set([]byte("test")) - _, err := clk.AddDAGNode(ctx, delta) + _, _, err := clk.AddDelta(ctx, delta) if err != nil { t.Error("Failed to add dag node:", err) return @@ -126,7 +129,7 @@ func TestMerkleClockAddDAGNodeWithHeads(t *testing.T) { reg2 := crdt.LWWRegister{} delta2 := reg2.Set([]byte("test2")) - _, err = clk.AddDAGNode(ctx, delta2) + _, _, err = clk.AddDelta(ctx, delta2) if err != nil { t.Error("Failed to add second dag node with err:", err) return @@ -134,7 +137,7 @@ func TestMerkleClockAddDAGNodeWithHeads(t *testing.T) { if delta.GetPriority() != 1 && delta2.GetPriority() != 2 { t.Errorf( - "AddDAGNOde failed with incorrect delta priority vals, want (%v) (%v), have (%v) (%v)", + "AddDelta failed with incorrect delta priority vals, want (%v) (%v), have (%v) (%v)", 1, 2, delta.GetPriority(), diff --git a/internal/merkle/clock/errors.go b/internal/merkle/clock/errors.go index d2c2e7a2bb..9903f777a9 100644 --- a/internal/merkle/clock/errors.go +++ b/internal/merkle/clock/errors.go @@ -45,8 +45,8 @@ func NewErrCreatingBlock(inner error) error { return errors.Wrap(errCreatingBlock, inner) } -func NewErrWritingBlock(cid cid.Cid, inner error) error { - return errors.Wrap(errWritingBlock, inner, errors.NewKV("Cid", cid)) +func NewErrWritingBlock(inner error) error { + return errors.Wrap(errWritingBlock, inner) } func NewErrGettingHeads(inner error) error { diff --git a/internal/merkle/clock/ipld.go b/internal/merkle/clock/ipld.go deleted file mode 100644 index edef74291c..0000000000 --- a/internal/merkle/clock/ipld.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clock - -import ( - "context" - - dag "github.com/ipfs/boxo/ipld/merkledag" - cid "github.com/ipfs/go-cid" - ipld "github.com/ipfs/go-ipld-format" - mh "github.com/multiformats/go-multihash" - - "github.com/sourcenetwork/defradb/internal/core" -) - -// Credit: This file is from github.com/ipfs/go-ds-crdt - -// IPLD related things - -var _ core.NodeGetter = (*CrdtNodeGetter)(nil) - -type DeltaExtractorFn func(ipld.Node) (core.Delta, error) - -// crdtNodeGetter wraps an ipld.NodeGetter with some additional utility methods -type CrdtNodeGetter struct { - ipld.NodeGetter - DeltaExtractor DeltaExtractorFn -} - -func (ng *CrdtNodeGetter) GetDelta(ctx context.Context, c cid.Cid) (ipld.Node, core.Delta, error) { - nd, err := ng.Get(ctx, c) - if err != nil { - return nil, nil, err - } - delta, err := ng.DeltaExtractor(nd) - return nd, delta, err -} - -// GetHeight returns the height of a block -func (ng *CrdtNodeGetter) GetPriority(ctx context.Context, c cid.Cid) (uint64, error) { - _, delta, err := ng.GetDelta(ctx, c) - if err != nil { - return 0, err - } - return delta.GetPriority(), nil -} - -type DeltaEntry struct { - Delta core.Delta - Node ipld.Node - Err error -} - -func (de DeltaEntry) GetNode() ipld.Node { - return de.Node -} - -func (de DeltaEntry) GetDelta() core.Delta { - return de.Delta -} - -func (de DeltaEntry) Error() error { - return de.Err -} - -// GetDeltas uses GetMany to obtain many deltas. -func (ng *CrdtNodeGetter) GetDeltas(ctx context.Context, cids []cid.Cid) <-chan core.NodeDeltaPair { - deltaOpts := make(chan core.NodeDeltaPair, 1) - go func() { - defer close(deltaOpts) - nodeOpts := ng.GetMany(ctx, cids) - for nodeOpt := range nodeOpts { - if nodeOpt.Err != nil { - deltaOpts <- &DeltaEntry{Err: nodeOpt.Err} - continue - } - delta, err := ng.DeltaExtractor(nodeOpt.Node) - if err != nil { - deltaOpts <- &DeltaEntry{Err: err} - continue - } - deltaOpts <- &DeltaEntry{ - Delta: delta, - Node: nodeOpt.Node, - } - } - }() - return deltaOpts -} - -// add this as a field to a NodeGetter so it can be typed to a specific -// delta type (ie. LWWRegisterDelta) -// func extractDelta(nd ipld.Node) (core.Delta, error) { -// protonode, ok := nd.(*dag.ProtoNode) -// if !ok { -// return nil, errors.New("node is not a ProtoNode") -// } -// d := &pb.Delta{} -// err := proto.Unmarshal(protonode.Data(), d) -// return d, err -// } - -func makeNode(delta core.Delta, heads []cid.Cid) (ipld.Node, error) { - var data []byte - var err error - if delta != nil { - data, err = delta.Marshal() - if err != nil { - return nil, err - } - } - - nd := dag.NodeWithData(data) - // The cid builder defaults to v0, we want to be using v1 Cids - err = nd.SetCidBuilder(cid.V1Builder{ - Codec: cid.DagProtobuf, - MhType: mh.SHA2_256, - MhLength: -1, - }) - if err != nil { - return nil, err - } - - // add heads - for _, h := range heads { - if err = nd.AddRawLink("_head", &ipld.Link{Cid: h}); err != nil { - return nil, err - } - } - - // add delta specific links - if comp, ok := delta.(core.CompositeDelta); ok { - for _, dagLink := range comp.Links() { - if err = nd.AddRawLink(dagLink.Name, &ipld.Link{Cid: dagLink.Cid}); err != nil { - return nil, err - } - } - } - return nd, nil -} - -// type LocalNodeGetter diff --git a/internal/merkle/clock/ipld_test.go b/internal/merkle/clock/ipld_test.go deleted file mode 100644 index 3136356f41..0000000000 --- a/internal/merkle/clock/ipld_test.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clock diff --git a/internal/merkle/crdt/composite.go b/internal/merkle/crdt/composite.go index 6912ccb9a0..33d6ab2d3c 100644 --- a/internal/merkle/crdt/composite.go +++ b/internal/merkle/crdt/composite.go @@ -13,10 +13,11 @@ package merklecrdt import ( "context" - ipld "github.com/ipfs/go-ipld-format" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/internal/core" + coreblock "github.com/sourcenetwork/defradb/internal/core/block" corecrdt "github.com/sourcenetwork/defradb/internal/core/crdt" "github.com/sourcenetwork/defradb/internal/merkle/clock" ) @@ -55,33 +56,25 @@ func NewMerkleCompositeDAG( // Delete sets the values of CompositeDAG for a delete. func (m *MerkleCompositeDAG) Delete( ctx context.Context, - links []core.DAGLink, -) (ipld.Node, uint64, error) { - // Set() call on underlying CompositeDAG CRDT - // persist/publish delta - delta := m.reg.Set(links) - delta.Status = client.Deleted - nd, err := m.clock.AddDAGNode(ctx, delta) + links []coreblock.DAGLink, +) (cidlink.Link, []byte, error) { + delta := m.reg.Set(client.Deleted) + link, b, err := m.clock.AddDelta(ctx, delta, links...) if err != nil { - return nil, 0, err + return cidlink.Link{}, nil, err } - return nd, delta.GetPriority(), nil + return link, b, nil } // Save the value of the composite CRDT to DAG. -func (m *MerkleCompositeDAG) Save(ctx context.Context, data any) (ipld.Node, uint64, error) { - value, ok := data.([]core.DAGLink) +func (m *MerkleCompositeDAG) Save(ctx context.Context, data any) (cidlink.Link, []byte, error) { + links, ok := data.([]coreblock.DAGLink) if !ok { - return nil, 0, NewErrUnexpectedValueType(client.COMPOSITE, []core.DAGLink{}, data) - } - // Set() call on underlying CompositeDAG CRDT - // persist/publish delta - delta := m.reg.Set(value) - nd, err := m.clock.AddDAGNode(ctx, delta) - if err != nil { - return nil, 0, err + return cidlink.Link{}, nil, NewErrUnexpectedValueType(client.COMPOSITE, []coreblock.DAGLink{}, data) } - return nd, delta.GetPriority(), nil + delta := m.reg.Set(client.Active) + + return m.clock.AddDelta(ctx, delta, links...) } diff --git a/internal/merkle/crdt/counter.go b/internal/merkle/crdt/counter.go index d7b7e17302..4501de326c 100644 --- a/internal/merkle/crdt/counter.go +++ b/internal/merkle/crdt/counter.go @@ -13,7 +13,7 @@ package merklecrdt import ( "context" - ipld "github.com/ipfs/go-ipld-format" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/internal/core" @@ -47,15 +47,14 @@ func NewMerkleCounter[T crdt.Incrementable]( } // Save the value of the Counter to the DAG. -func (mc *MerkleCounter[T]) Save(ctx context.Context, data any) (ipld.Node, uint64, error) { +func (mc *MerkleCounter[T]) Save(ctx context.Context, data any) (cidlink.Link, []byte, error) { value, ok := data.(*client.FieldValue) if !ok { - return nil, 0, NewErrUnexpectedValueType(mc.reg.CType(), &client.FieldValue{}, data) + return cidlink.Link{}, nil, NewErrUnexpectedValueType(mc.reg.CType(), &client.FieldValue{}, data) } delta, err := mc.reg.Increment(ctx, value.Value().(T)) if err != nil { - return nil, 0, err + return cidlink.Link{}, nil, err } - nd, err := mc.clock.AddDAGNode(ctx, delta) - return nd, delta.GetPriority(), err + return mc.clock.AddDelta(ctx, delta) } diff --git a/internal/merkle/crdt/lwwreg.go b/internal/merkle/crdt/lwwreg.go index 04b2cf0f04..6755eac639 100644 --- a/internal/merkle/crdt/lwwreg.go +++ b/internal/merkle/crdt/lwwreg.go @@ -13,7 +13,7 @@ package merklecrdt import ( "context" - ipld "github.com/ipfs/go-ipld-format" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/internal/core" @@ -46,18 +46,17 @@ func NewMerkleLWWRegister( } // Save the value of the register to the DAG. -func (mlwwreg *MerkleLWWRegister) Save(ctx context.Context, data any) (ipld.Node, uint64, error) { +func (mlwwreg *MerkleLWWRegister) Save(ctx context.Context, data any) (cidlink.Link, []byte, error) { value, ok := data.(*client.FieldValue) if !ok { - return nil, 0, NewErrUnexpectedValueType(client.LWW_REGISTER, &client.FieldValue{}, data) + return cidlink.Link{}, nil, NewErrUnexpectedValueType(client.LWW_REGISTER, &client.FieldValue{}, data) } bytes, err := value.Bytes() if err != nil { - return nil, 0, err + return cidlink.Link{}, nil, err } // Set() call on underlying LWWRegister CRDT // persist/publish delta delta := mlwwreg.reg.Set(bytes) - nd, err := mlwwreg.clock.AddDAGNode(ctx, delta) - return nd, delta.GetPriority(), err + return mlwwreg.clock.AddDelta(ctx, delta) } diff --git a/internal/merkle/crdt/merklecrdt.go b/internal/merkle/crdt/merklecrdt.go index 6d1ba22fdf..ed8452195f 100644 --- a/internal/merkle/crdt/merklecrdt.go +++ b/internal/merkle/crdt/merklecrdt.go @@ -16,11 +16,12 @@ package merklecrdt import ( "context" - ipld "github.com/ipfs/go-ipld-format" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/internal/core" + coreblock "github.com/sourcenetwork/defradb/internal/core/block" ) type Stores interface { @@ -34,20 +35,31 @@ type Stores interface { // so it can be merged with any given semantics. type MerkleCRDT interface { core.ReplicatedData - Clock() core.MerkleClock - Save(ctx context.Context, data any) (ipld.Node, uint64, error) + Clock() MerkleClock + Save(ctx context.Context, data any) (cidlink.Link, []byte, error) +} + +// MerkleClock is the logical clock implementation that manages writing to and from +// the MerkleDAG structure, ensuring a causal ordering of events. +type MerkleClock interface { + AddDelta( + ctx context.Context, + delta core.Delta, + links ...coreblock.DAGLink, + ) (cidlink.Link, []byte, error) + ProcessBlock(context.Context, *coreblock.Block, cidlink.Link) error } // baseMerkleCRDT handles the MerkleCRDT overhead functions that aren't CRDT specific like the mutations and state // retrieval functions. It handles creating and publishing the CRDT DAG with the help of the MerkleClock. type baseMerkleCRDT struct { - clock core.MerkleClock + clock MerkleClock crdt core.ReplicatedData } var _ core.ReplicatedData = (*baseMerkleCRDT)(nil) -func (base *baseMerkleCRDT) Clock() core.MerkleClock { +func (base *baseMerkleCRDT) Clock() MerkleClock { return base.clock } @@ -55,10 +67,6 @@ func (base *baseMerkleCRDT) Merge(ctx context.Context, other core.Delta) error { return base.crdt.Merge(ctx, other) } -func (base *baseMerkleCRDT) DeltaDecode(node ipld.Node) (core.Delta, error) { - return base.crdt.DeltaDecode(node) -} - func (base *baseMerkleCRDT) Value(ctx context.Context) ([]byte, error) { return base.crdt.Value(ctx) } diff --git a/internal/merkle/crdt/merklecrdt_test.go b/internal/merkle/crdt/merklecrdt_test.go index fafcfc5905..bd42223509 100644 --- a/internal/merkle/crdt/merklecrdt_test.go +++ b/internal/merkle/crdt/merklecrdt_test.go @@ -42,14 +42,14 @@ func TestMerkleCRDTPublish(t *testing.T) { reg := crdt.LWWRegister{} delta := reg.Set([]byte("test")) - nd, err := bCRDT.clock.AddDAGNode(ctx, delta) + link, _, err := bCRDT.clock.AddDelta(ctx, delta) if err != nil { t.Error("Failed to publish delta to MerkleCRDT:", err) return } - if nd.Cid() == cid.Undef { - t.Error("Published returned invalid CID Undef:", nd.Cid()) + if link.Cid == cid.Undef { + t.Error("Published returned invalid CID Undef:", link.Cid) return } } diff --git a/internal/planner/commit.go b/internal/planner/commit.go index 9da7324f8c..2f9de44bac 100644 --- a/internal/planner/commit.go +++ b/internal/planner/commit.go @@ -11,16 +11,15 @@ package planner import ( - "github.com/fxamacker/cbor/v2" - dag "github.com/ipfs/boxo/ipld/merkledag" - blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" - ipld "github.com/ipfs/go-ipld-format" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/internal/core" + coreblock "github.com/sourcenetwork/defradb/internal/core/block" "github.com/sourcenetwork/defradb/internal/db/fetcher" "github.com/sourcenetwork/defradb/internal/planner/mapper" ) @@ -217,7 +216,12 @@ func (n *dagScanNode) Next() (bool, error) { return false, err } - currentValue, heads, err := n.dagBlockToNodeDoc(block) + dagBlock, err := coreblock.GetFromBytes(block.RawData()) + if err != nil { + return false, err + } + + currentValue, heads, err := n.dagBlockToNodeDoc(dagBlock) if err != nil { return false, err } @@ -238,7 +242,8 @@ func (n *dagScanNode) Next() (bool, error) { n.queuedCids = append(make([]*cid.Cid, len(heads)), n.queuedCids...) for i, h := range heads { - n.queuedCids[len(heads)-i-1] = &h.Cid + link := h // TODO remove when Go 1.22 #2431 + n.queuedCids[len(heads)-i-1] = &link.Cid } } @@ -289,46 +294,28 @@ which returns the current dag commit for the stored CRDT value. All the dagScanNode endpoints use similar structures */ -func (n *dagScanNode) dagBlockToNodeDoc(block blocks.Block) (core.Doc, []*ipld.Link, error) { +func (n *dagScanNode) dagBlockToNodeDoc(block *coreblock.Block) (core.Doc, []cidlink.Link, error) { commit := n.commitSelect.DocumentMapping.NewDoc() - cid := block.Cid() - n.commitSelect.DocumentMapping.SetFirstOfName(&commit, request.CidFieldName, cid.String()) - - // decode the delta, get the priority and payload - nd, err := dag.DecodeProtobuf(block.RawData()) + link, err := block.GenerateLink() if err != nil { return core.Doc{}, nil, err } + n.commitSelect.DocumentMapping.SetFirstOfName(&commit, request.CidFieldName, link.String()) - // @todo: Wrap delta unmarshaling into a proper typed interface. - var delta map[string]any - if err := cbor.Unmarshal(nd.Data(), &delta); err != nil { - return core.Doc{}, nil, err - } - - prio, ok := delta[request.DeltaArgPriority].(uint64) - if !ok { - return core.Doc{}, nil, ErrDeltaMissingPriority - } + prio := block.Delta.GetPriority() - schemaVersionId, ok := delta[request.DeltaArgSchemaVersionID].(string) - if !ok { - return core.Doc{}, nil, ErrDeltaMissingSchemaVersionID - } + schemaVersionId := block.Delta.GetSchemaVersionID() n.commitSelect.DocumentMapping.SetFirstOfName(&commit, request.SchemaVersionIDFieldName, schemaVersionId) - fieldName, ok := delta[request.DeltaArgFieldName] - if !ok { - return core.Doc{}, nil, ErrDeltaMissingFieldName - } + var fieldName any var fieldID string - switch fieldName { - case "": + if block.Delta.CompositeDAGDelta != nil { fieldID = core.COMPOSITE_NAMESPACE fieldName = nil - - default: + } else { + fName := block.Delta.GetFieldName() + fieldName = fName cols, err := n.planner.db.GetCollections( n.planner.ctx, client.CollectionFetchOptions{ @@ -345,22 +332,25 @@ func (n *dagScanNode) dagBlockToNodeDoc(block blocks.Block) (core.Doc, []*ipld.L // Because we only care about the schema, we can safely take the first - the schema is the same // for all in the set. - field, ok := cols[0].Definition().GetFieldByName(fieldName.(string)) + field, ok := cols[0].Definition().GetFieldByName(fName) if !ok { - return core.Doc{}, nil, client.NewErrFieldNotExist(fieldName.(string)) + return core.Doc{}, nil, client.NewErrFieldNotExist(fName) } fieldID = field.ID.String() } - + // We need to explicitely set delta to an untyped nil otherwise it will be marshalled + // as an empty slice in the JSON response of the HTTP client. + d := block.Delta.GetData() + if d != nil { + n.commitSelect.DocumentMapping.SetFirstOfName(&commit, request.DeltaFieldName, d) + } else { + n.commitSelect.DocumentMapping.SetFirstOfName(&commit, request.DeltaFieldName, nil) + } n.commitSelect.DocumentMapping.SetFirstOfName(&commit, request.HeightFieldName, int64(prio)) - n.commitSelect.DocumentMapping.SetFirstOfName(&commit, request.DeltaFieldName, delta[request.DeltaArgData]) n.commitSelect.DocumentMapping.SetFirstOfName(&commit, request.FieldNameFieldName, fieldName) n.commitSelect.DocumentMapping.SetFirstOfName(&commit, request.FieldIDFieldName, fieldID) - docID, ok := delta[request.DeltaArgDocID].([]byte) - if !ok { - return core.Doc{}, nil, ErrDeltaMissingDocID - } + docID := block.Delta.GetDocID() n.commitSelect.DocumentMapping.SetFirstOfName(&commit, request.DocIDArgName, string(docID)) @@ -385,19 +375,19 @@ func (n *dagScanNode) dagBlockToNodeDoc(block blocks.Block) (core.Doc, []*ipld.L n.commitSelect.DocumentMapping.SetFirstOfName(&commit, request.CollectionIDFieldName, int64(cols[0].ID())) - heads := make([]*ipld.Link, 0) + heads := make([]cidlink.Link, 0) // links linksIndexes := n.commitSelect.DocumentMapping.IndexesByName[request.LinksFieldName] for _, linksIndex := range linksIndexes { - links := make([]core.Doc, len(nd.Links())) + links := make([]core.Doc, len(block.Links)) linksMapping := n.commitSelect.DocumentMapping.ChildMappings[linksIndex] - for i, l := range nd.Links() { + for i, l := range block.Links { link := linksMapping.NewDoc() linksMapping.SetFirstOfName(&link, request.LinksNameFieldName, l.Name) - linksMapping.SetFirstOfName(&link, request.LinksCidFieldName, l.Cid.String()) + linksMapping.SetFirstOfName(&link, request.LinksCidFieldName, l.Link.Cid.String()) links[i] = link } @@ -405,9 +395,9 @@ func (n *dagScanNode) dagBlockToNodeDoc(block blocks.Block) (core.Doc, []*ipld.L commit.Fields[linksIndex] = links } - for _, l := range nd.Links() { + for _, l := range block.Links { if l.Name == "_head" { - heads = append(heads, l) + heads = append(heads, l.Link) } } diff --git a/net/client.go b/net/client.go index 414ee62e47..d29af6f60d 100644 --- a/net/client.go +++ b/net/client.go @@ -38,7 +38,7 @@ func (s *server) pushLog(ctx context.Context, evt events.Update, pid peer.ID) er SchemaRoot: []byte(evt.SchemaRoot), Creator: s.peer.host.ID().String(), Log: &pb.Document_Log{ - Block: evt.Block.RawData(), + Block: evt.Block, }, } req := &pb.PushLogRequest{ diff --git a/net/client_test.go b/net/client_test.go index bedd28437d..e074947213 100644 --- a/net/client_test.go +++ b/net/client_test.go @@ -66,8 +66,7 @@ func TestPushlogWithDialFailure(t *testing.T) { DocID: id.String(), Cid: cid, SchemaRoot: "test", - Block: &EmptyNode{}, - Priority: 1, + Block: emptyBlock(), }, peer.ID("some-peer-id")) require.Contains(t, err.Error(), "no transport security set") } @@ -89,8 +88,7 @@ func TestPushlogWithInvalidPeerID(t *testing.T) { DocID: id.String(), Cid: cid, SchemaRoot: "test", - Block: &EmptyNode{}, - Priority: 1, + Block: emptyBlock(), }, peer.ID("some-peer-id")) require.Contains(t, err.Error(), "failed to parse peer ID") } @@ -138,8 +136,7 @@ func TestPushlogW_WithValidPeerID_NoError(t *testing.T) { DocID: doc.ID().String(), Cid: cid, SchemaRoot: col.SchemaRoot(), - Block: &EmptyNode{}, - Priority: 1, + Block: emptyBlock(), }, n2.PeerInfo().ID) require.NoError(t, err) } diff --git a/net/dag.go b/net/dag.go index cc20629c0f..7718db6c27 100644 --- a/net/dag.go +++ b/net/dag.go @@ -13,43 +13,25 @@ package net import ( - "context" "sync" "time" "github.com/ipfs/go-cid" - ipld "github.com/ipfs/go-ipld-format" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/sourcenetwork/corelog" + + coreblock "github.com/sourcenetwork/defradb/internal/core/block" ) var ( DAGSyncTimeout = time.Second * 60 ) -// A DAGSyncer is an abstraction to an IPLD-based P2P storage layer. A -// DAGSyncer is a DAGService with the ability to publish new ipld nodes to the -// network, and retrieving others from it. -type DAGSyncer interface { - ipld.DAGService - // Returns true if the block is locally available (therefore, it - // is considered processed). - HasBlock(ctx context.Context, c cid.Cid) (bool, error) -} - -// A SessionDAGSyncer is a Sessions-enabled DAGSyncer. This type of DAG-Syncer -// provides an optimized NodeGetter to make multiple related requests. The -// same session-enabled NodeGetter is used to download DAG branches when -// the DAGSyncer supports it. -type SessionDAGSyncer interface { - DAGSyncer - Session(context.Context) ipld.NodeGetter -} - type dagJob struct { - session *sync.WaitGroup // A waitgroup to wait for all related jobs to conclude - bp *blockProcessor // the block processor to use - cid cid.Cid // the cid of the block to fetch from the P2P network - isComposite bool // whether this is a composite block + session *sync.WaitGroup // A waitgroup to wait for all related jobs to conclude + bp *blockProcessor // the block processor to use + cid cid.Cid // the cid of the block to fetch from the P2P network // OLD FIELDS // root cid.Cid // the root of the branch we are walking down @@ -108,8 +90,13 @@ func (p *Peer) dagWorker(jobs chan *dagJob) { } go func(j *dagJob) { - if j.bp.getter != nil && j.cid.Defined() { - cNode, err := j.bp.getter.Get(p.ctx, j.cid) + if j.bp.dagSyncer != nil && j.cid.Defined() { + // BlockOfType will return the block if it is already in the store or fetch it from the network + // if it is not. This is a blocking call and will wait for the block to be fetched. + // It uses the LinkSystem to fetch the block. Blocks retrieved from the network will + // also be stored in the blockstore in the same call. + // Blocks have to match the coreblock.SchemaPrototype to be returned. + nd, err := j.bp.dagSyncer.BlockOfType(p.ctx, cidlink.Link{Cid: j.cid}, coreblock.SchemaPrototype) if err != nil { log.ErrorContextE( p.ctx, @@ -119,19 +106,19 @@ func (p *Peer) dagWorker(jobs chan *dagJob) { j.session.Done() return } - err = j.bp.processRemoteBlock( - p.ctx, - j.session, - cNode, - j.isComposite, - ) + block, err := coreblock.GetFromNode(nd) if err != nil { log.ErrorContextE( p.ctx, - "Failed to process remote block", + "Failed to convert ipld node to block", err, corelog.Any("CID", j.cid)) } + j.bp.handleChildBlocks( + p.ctx, + j.session, + block, + ) } p.queuedChildren.Remove(j.cid) j.session.Done() diff --git a/net/dag_test.go b/net/dag_test.go index 09b2701fac..976f43653a 100644 --- a/net/dag_test.go +++ b/net/dag_test.go @@ -16,14 +16,11 @@ import ( "testing" "time" - dag "github.com/ipfs/boxo/ipld/merkledag" - "github.com/ipfs/go-cid" - ipld "github.com/ipfs/go-ipld-format" - mh "github.com/multiformats/go-multihash" "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/internal/core" + coreblock "github.com/sourcenetwork/defradb/internal/core/block" "github.com/sourcenetwork/defradb/internal/merkle/clock" netutils "github.com/sourcenetwork/defradb/net/utils" ) @@ -186,28 +183,24 @@ func TestSendJobWorker_WithPeer_NoError(t *testing.T) { wg := sync.WaitGroup{} wg.Add(1) - var getter ipld.NodeGetter = n2.Peer.newDAGSyncerTxn(txn2) - if sessionMaker, ok := getter.(SessionDAGSyncer); ok { - getter = sessionMaker.Session(ctx) - } + fetcher := n2.Peer.newDAGSyncerTxn(txn2) n2.sendJobs <- &dagJob{ - bp: newBlockProcessor(n2.Peer, txn2, col, dsKey, getter), - session: &wg, - cid: heads[0], - isComposite: true, + bp: newBlockProcessor(n2.Peer, txn2, col, dsKey, fetcher), + session: &wg, + cid: heads[0], } wg.Wait() err = txn2.Commit(ctx) require.NoError(t, err) - block, err := n1.db.Blockstore().Get(ctx, heads[0]) + b, err := n1.db.Blockstore().Get(ctx, heads[0]) require.NoError(t, err) - nd, err := dag.DecodeProtobufBlock(block) + block, err := coreblock.GetFromBytes(b.RawData()) require.NoError(t, err) - for _, link := range nd.Links() { + for _, link := range block.Links { exists, err := n2.db.Blockstore().Has(ctx, link.Cid) require.NoError(t, err) require.True(t, exists) @@ -221,42 +214,3 @@ func TestSendJobWorker_WithPeer_NoError(t *testing.T) { t.Error("failed to close sendJobWorker") } } - -func makeNode(delta core.Delta, heads []cid.Cid) (ipld.Node, error) { - var data []byte - var err error - if delta != nil { - data, err = delta.Marshal() - if err != nil { - return nil, err - } - } - - nd := dag.NodeWithData(data) - // The cid builder defaults to v0, we want to be using v1 Cids - err = nd.SetCidBuilder(cid.V1Builder{ - Codec: cid.DagProtobuf, - MhType: mh.SHA2_256, - MhLength: -1, - }) - if err != nil { - return nil, err - } - - // add heads - for _, h := range heads { - if err = nd.AddRawLink("_head", &ipld.Link{Cid: h}); err != nil { - return nil, err - } - } - - // add delta specific links - if comp, ok := delta.(core.CompositeDelta); ok { - for _, dagLink := range comp.Links() { - if err = nd.AddRawLink(dagLink.Name, &ipld.Link{Cid: dagLink.Cid}); err != nil { - return nil, err - } - } - } - return nd, nil -} diff --git a/net/peer.go b/net/peer.go index e958e5c84d..3d728a1d87 100644 --- a/net/peer.go +++ b/net/peer.go @@ -21,10 +21,10 @@ import ( "github.com/ipfs/boxo/bitswap/network" "github.com/ipfs/boxo/blockservice" exchange "github.com/ipfs/boxo/exchange" - dag "github.com/ipfs/boxo/ipld/merkledag" + dagsyncer "github.com/ipfs/boxo/fetcher" + dagsyncerbs "github.com/ipfs/boxo/fetcher/impl/blockservice" "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" - ipld "github.com/ipfs/go-ipld-format" gostream "github.com/libp2p/go-libp2p-gostream" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/host" @@ -76,7 +76,7 @@ type Peer struct { mu sync.Mutex // peer DAG service - ipld.DAGService + dagsyncerbs.FetcherConfig exch exchange.Interface bserv blockservice.BlockService @@ -248,15 +248,11 @@ func (p *Peer) handleBroadcastLoop() { return } - // check log priority, 1 is new doc log - // 2 is update log var err error - if update.Priority == 1 { + if update.IsCreate { err = p.handleDocCreateLog(update) - } else if update.Priority > 1 { - err = p.handleDocUpdateLog(update) } else { - log.InfoContext(p.ctx, "Skipping log with invalid priority of 0", corelog.Any("CID", update.Cid)) + err = p.handleDocUpdateLog(update) } if err != nil { @@ -270,7 +266,7 @@ func (p *Peer) RegisterNewDocument( ctx context.Context, docID client.DocID, c cid.Cid, - nd ipld.Node, + rawBlock []byte, schemaRoot string, ) error { // register topic @@ -285,17 +281,16 @@ func (p *Peer) RegisterNewDocument( } // publish log - body := &pb.PushLogRequest_Body{ - DocID: []byte(docID.String()), - Cid: c.Bytes(), - SchemaRoot: []byte(schemaRoot), - Creator: p.host.ID().String(), - Log: &pb.Document_Log{ - Block: nd.RawData(), - }, - } req := &pb.PushLogRequest{ - Body: body, + Body: &pb.PushLogRequest_Body{ + DocID: []byte(docID.String()), + Cid: c.Bytes(), + SchemaRoot: []byte(schemaRoot), + Creator: p.host.ID().String(), + Log: &pb.Document_Log{ + Block: rawBlock, + }, + }, } return p.server.publishLog(p.ctx, schemaRoot, req) @@ -318,7 +313,7 @@ func (p *Peer) pushToReplicator( txn.Headstore(), docID.WithFieldId(core.COMPOSITE_NAMESPACE).ToHeadStoreKey(), ) - cids, priority, err := headset.List(ctx) + cids, _, err := headset.List(ctx) if err != nil { log.ErrorContextE( ctx, @@ -340,19 +335,11 @@ func (p *Peer) pushToReplicator( continue } - // @todo: remove encode/decode loop for core.Log data - nd, err := dag.DecodeProtobuf(blk.RawData()) - if err != nil { - log.ErrorContextE(ctx, "Failed to decode protobuf", err, corelog.Any("CID", c)) - continue - } - evt := events.Update{ DocID: docIDResult.ID.String(), Cid: c, SchemaRoot: collection.SchemaRoot(), - Block: nd, - Priority: priority, + Block: blk.RawData(), } if err := p.server.pushLog(ctx, evt, pid); err != nil { log.ErrorContextE( @@ -445,7 +432,7 @@ func (p *Peer) handleDocUpdateLog(evt events.Update) error { SchemaRoot: []byte(evt.SchemaRoot), Creator: p.host.ID().String(), Log: &pb.Document_Log{ - Block: evt.Block.RawData(), + Block: evt.Block, }, } req := &pb.PushLogRequest{ @@ -510,20 +497,14 @@ func (p *Peer) setupBlockService() { } func (p *Peer) setupDAGService() { - p.DAGService = dag.NewDAGService(p.bserv) + p.FetcherConfig = dagsyncerbs.NewFetcherConfig(p.bserv) } -func (p *Peer) newDAGSyncerTxn(txn datastore.Txn) ipld.DAGService { - return dag.NewDAGService(blockservice.New(txn.DAGstore(), p.exch)) -} - -// Session returns a session-based NodeGetter. -func (p *Peer) Session(ctx context.Context) ipld.NodeGetter { - ng := dag.NewSession(ctx, p.DAGService) - if ng == p.DAGService { - log.InfoContext(ctx, "DAGService does not support sessions") - } - return ng +func (p *Peer) newDAGSyncerTxn(txn datastore.Txn) dagsyncer.Fetcher { + return p.FetcherWithSession( + p.ctx, + blockservice.NewSession(p.ctx, blockservice.New(txn.DAGstore(), p.exch)), + ) } func stopGRPCServer(ctx context.Context, server *grpc.Server) { diff --git a/net/peer_test.go b/net/peer_test.go index e64107d888..248f665073 100644 --- a/net/peer_test.go +++ b/net/peer_test.go @@ -18,7 +18,6 @@ import ( "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" - ipld "github.com/ipfs/go-ipld-format" libp2p "github.com/libp2p/go-libp2p" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/peer" @@ -28,74 +27,27 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore/memory" - "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/events" acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" + coreblock "github.com/sourcenetwork/defradb/internal/core/block" "github.com/sourcenetwork/defradb/internal/core/crdt" "github.com/sourcenetwork/defradb/internal/db" netutils "github.com/sourcenetwork/defradb/net/utils" ) -type EmptyNode struct{} - -var ErrEmptyNode error = errors.New("dummy node") - -func (n *EmptyNode) Resolve([]string) (any, []string, error) { - return nil, nil, ErrEmptyNode -} - -func (n *EmptyNode) Tree(string, int) []string { - return nil -} - -func (n *EmptyNode) ResolveLink([]string) (*ipld.Link, []string, error) { - return nil, nil, ErrEmptyNode -} - -func (n *EmptyNode) Copy() ipld.Node { - return &EmptyNode{} -} - -func (n *EmptyNode) Cid() cid.Cid { - id, err := cid.V1Builder{ - Codec: cid.DagProtobuf, - MhType: mh.SHA2_256, - MhLength: 0, // default length - }.Sum(nil) - - if err != nil { - panic("failed to create an empty cid!") +func emptyBlock() []byte { + block := coreblock.Block{ + Delta: crdt.CRDT{ + CompositeDAGDelta: &crdt.CompositeDAGDelta{}, + }, } - return id -} - -func (n *EmptyNode) Links() []*ipld.Link { - return nil -} - -func (n *EmptyNode) Loggable() map[string]any { - return nil -} - -func (n *EmptyNode) String() string { - return "[]" -} - -func (n *EmptyNode) RawData() []byte { - return nil -} - -func (n *EmptyNode) Size() (uint64, error) { - return 0, nil -} - -func (n *EmptyNode) Stat() (*ipld.NodeStat, error) { - return &ipld.NodeStat{}, nil + b, _ := block.Marshal() + return b } func createCID(doc *client.Document) (cid.Cid, error) { pref := cid.V1Builder{ - Codec: cid.DagProtobuf, + Codec: cid.DagCBOR, MhType: mh.SHA2_256, MhLength: 0, // default length } @@ -339,7 +291,7 @@ func TestRegisterNewDocument_NoError(t *testing.T) { cid, err := createCID(doc) require.NoError(t, err) - err = n.RegisterNewDocument(ctx, doc.ID(), cid, &EmptyNode{}, col.SchemaRoot()) + err = n.RegisterNewDocument(ctx, doc.ID(), cid, emptyBlock(), col.SchemaRoot()) require.NoError(t, err) } @@ -366,7 +318,7 @@ func TestRegisterNewDocument_RPCTopicAlreadyRegisteredError(t *testing.T) { cid, err := createCID(doc) require.NoError(t, err) - err = n.RegisterNewDocument(ctx, doc.ID(), cid, &EmptyNode{}, col.SchemaRoot()) + err = n.RegisterNewDocument(ctx, doc.ID(), cid, emptyBlock(), col.SchemaRoot()) require.Equal(t, err.Error(), "creating topic: joining topic: topic already exists") } @@ -946,24 +898,17 @@ func TestHandleDocCreateLog_NoError(t *testing.T) { err = col.Create(ctx, doc) require.NoError(t, err) - docCid, err := createCID(doc) + headCID, err := getHead(ctx, db, doc.ID()) require.NoError(t, err) - delta := &crdt.CompositeDAGDelta{ - SchemaVersionID: col.Schema().VersionID, - Priority: 1, - DocID: doc.ID().Bytes(), - } - - node, err := makeNode(delta, []cid.Cid{docCid}) + b, err := db.Blockstore().AsIPLDStorage().Get(ctx, headCID.KeyString()) require.NoError(t, err) err = n.handleDocCreateLog(events.Update{ DocID: doc.ID().String(), - Cid: docCid, + Cid: headCID, SchemaRoot: col.SchemaRoot(), - Block: node, - Priority: 0, + Block: b, }) require.NoError(t, err) } @@ -1029,24 +974,17 @@ func TestHandleDocUpdateLog_NoError(t *testing.T) { err = col.Create(ctx, doc) require.NoError(t, err) - docCid, err := createCID(doc) + headCID, err := getHead(ctx, db, doc.ID()) require.NoError(t, err) - delta := &crdt.CompositeDAGDelta{ - SchemaVersionID: col.Schema().VersionID, - Priority: 1, - DocID: doc.ID().Bytes(), - } - - node, err := makeNode(delta, []cid.Cid{docCid}) + b, err := db.Blockstore().AsIPLDStorage().Get(ctx, headCID.KeyString()) require.NoError(t, err) err = n.handleDocUpdateLog(events.Update{ DocID: doc.ID().String(), - Cid: docCid, + Cid: headCID, SchemaRoot: col.SchemaRoot(), - Block: node, - Priority: 0, + Block: b, }) require.NoError(t, err) } @@ -1082,16 +1020,10 @@ func TestHandleDocUpdateLog_WithExistingDocIDTopic_TopicExistsError(t *testing.T err = col.Create(ctx, doc) require.NoError(t, err) - docCid, err := createCID(doc) + headCID, err := getHead(ctx, db, doc.ID()) require.NoError(t, err) - delta := &crdt.CompositeDAGDelta{ - SchemaVersionID: col.Schema().VersionID, - Priority: 1, - DocID: doc.ID().Bytes(), - } - - node, err := makeNode(delta, []cid.Cid{docCid}) + b, err := db.Blockstore().AsIPLDStorage().Get(ctx, headCID.KeyString()) require.NoError(t, err) _, err = rpc.NewTopic(ctx, n.ps, n.host.ID(), doc.ID().String(), true) @@ -1099,9 +1031,9 @@ func TestHandleDocUpdateLog_WithExistingDocIDTopic_TopicExistsError(t *testing.T err = n.handleDocUpdateLog(events.Update{ DocID: doc.ID().String(), - Cid: docCid, + Cid: headCID, SchemaRoot: col.SchemaRoot(), - Block: node, + Block: b, }) require.ErrorContains(t, err, "topic already exists") } @@ -1126,16 +1058,10 @@ func TestHandleDocUpdateLog_WithExistingSchemaTopic_TopicExistsError(t *testing. err = col.Create(ctx, doc) require.NoError(t, err) - docCid, err := createCID(doc) + headCID, err := getHead(ctx, db, doc.ID()) require.NoError(t, err) - delta := &crdt.CompositeDAGDelta{ - SchemaVersionID: col.Schema().VersionID, - Priority: 1, - DocID: doc.ID().Bytes(), - } - - node, err := makeNode(delta, []cid.Cid{docCid}) + b, err := db.Blockstore().AsIPLDStorage().Get(ctx, headCID.KeyString()) require.NoError(t, err) _, err = rpc.NewTopic(ctx, n.ps, n.host.ID(), col.SchemaRoot(), true) @@ -1143,17 +1069,9 @@ func TestHandleDocUpdateLog_WithExistingSchemaTopic_TopicExistsError(t *testing. err = n.handleDocUpdateLog(events.Update{ DocID: doc.ID().String(), - Cid: docCid, + Cid: headCID, SchemaRoot: col.SchemaRoot(), - Block: node, + Block: b, }) require.ErrorContains(t, err, "topic already exists") } - -func TestSession_NoError(t *testing.T) { - ctx := context.Background() - _, n := newTestNode(ctx, t) - defer n.Close() - ng := n.Session(ctx) - require.Implements(t, (*ipld.NodeGetter)(nil), ng) -} diff --git a/net/process.go b/net/process.go index b02e2e1fed..882c29c360 100644 --- a/net/process.go +++ b/net/process.go @@ -18,26 +18,25 @@ import ( "fmt" "sync" - dag "github.com/ipfs/boxo/ipld/merkledag" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - ipld "github.com/ipfs/go-ipld-format" + dagsyncer "github.com/ipfs/boxo/fetcher" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" "github.com/sourcenetwork/corelog" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/internal/core" + coreblock "github.com/sourcenetwork/defradb/internal/core/block" "github.com/sourcenetwork/defradb/internal/db/base" merklecrdt "github.com/sourcenetwork/defradb/internal/merkle/crdt" ) type blockProcessor struct { *Peer - txn datastore.Txn - col client.Collection - dsKey core.DataStoreKey - getter ipld.NodeGetter + txn datastore.Txn + col client.Collection + dsKey core.DataStoreKey + dagSyncer dagsyncer.Fetcher // List of composite blocks to eventually merge composites *list.List } @@ -47,7 +46,7 @@ func newBlockProcessor( txn datastore.Txn, col client.Collection, dsKey core.DataStoreKey, - getter ipld.NodeGetter, + dagSyncer dagsyncer.Fetcher, ) *blockProcessor { return &blockProcessor{ Peer: p, @@ -55,64 +54,67 @@ func newBlockProcessor( txn: txn, col: col, dsKey: dsKey, - getter: getter, + dagSyncer: dagSyncer, } } // mergeBlock runs trough the list of composite blocks and sends them for processing. func (bp *blockProcessor) mergeBlocks(ctx context.Context) { for e := bp.composites.Front(); e != nil; e = e.Next() { - nd := e.Value.(ipld.Node) - err := bp.processBlock(ctx, nd, "") + block := e.Value.(*coreblock.Block) + link, _ := block.GenerateLink() + err := bp.processBlock(ctx, block, link, "") if err != nil { log.ErrorContextE( ctx, "Failed to process block", err, corelog.String("DocID", bp.dsKey.DocID), - corelog.Any("CID", nd.Cid()), + corelog.Any("CID", link.Cid), ) } } } // processBlock merges the block and its children to the datastore and sets the head accordingly. -func (bp *blockProcessor) processBlock(ctx context.Context, nd ipld.Node, field string) error { +func (bp *blockProcessor) processBlock( + ctx context.Context, + block *coreblock.Block, + blockLink cidlink.Link, + field string, +) error { crdt, err := initCRDTForType(bp.txn, bp.col, bp.dsKey, field) if err != nil { return err } - delta, err := crdt.DeltaDecode(nd) - if err != nil { - return errors.Wrap("failed to decode delta object", err) - } - err = crdt.Clock().ProcessNode(ctx, delta, nd) + err = crdt.Clock().ProcessBlock(ctx, block, blockLink) if err != nil { return err } - for _, link := range nd.Links() { + for _, link := range block.Links { if link.Name == core.HEAD { continue } - block, err := bp.txn.DAGstore().Get(ctx, link.Cid) + b, err := bp.txn.DAGstore().Get(ctx, link.Cid) if err != nil { return err } - nd, err := dag.DecodeProtobufBlock(block) + + childBlock, err := coreblock.GetFromBytes(b.RawData()) if err != nil { return err } - if err := bp.processBlock(ctx, nd, link.Name); err != nil { + if err := bp.processBlock(ctx, childBlock, link.Link, link.Name); err != nil { log.ErrorContextE( ctx, "Failed to process block", err, corelog.String("DocID", bp.dsKey.DocID), - corelog.Any("CID", nd.Cid()), + corelog.Any("CID", link.Cid), ) } } @@ -164,30 +166,27 @@ func initCRDTForType( ) } -func decodeBlockBuffer(buf []byte, cid cid.Cid) (ipld.Node, error) { - blk, err := blocks.NewBlockWithCid(buf, cid) - if err != nil { - return nil, errors.Wrap("failed to create block", err) - } - return ipld.Decode(blk, dag.DecodeProtobufBlock) -} - // processRemoteBlock stores the block in the DAG store and initiates a sync of the block's children. func (bp *blockProcessor) processRemoteBlock( ctx context.Context, session *sync.WaitGroup, - nd ipld.Node, - isComposite bool, + block *coreblock.Block, ) error { - if err := bp.txn.DAGstore().Put(ctx, nd); err != nil { + link, err := block.GenerateLink() + if err != nil { + return err + } + + b, err := block.Marshal() + if err != nil { return err } - if isComposite { - bp.composites.PushFront(nd) + if err := bp.txn.DAGstore().AsIPLDStorage().Put(ctx, link.Binary(), b); err != nil { + return err } - bp.handleChildBlocks(ctx, session, nd, isComposite) + bp.handleChildBlocks(ctx, session, block) return nil } @@ -195,17 +194,20 @@ func (bp *blockProcessor) processRemoteBlock( func (bp *blockProcessor) handleChildBlocks( ctx context.Context, session *sync.WaitGroup, - nd ipld.Node, - isComposite bool, + block *coreblock.Block, ) { - if len(nd.Links()) == 0 { + if block.Delta.IsComposite() { + bp.composites.PushFront(block) + } + + if len(block.Links) == 0 { return } ctx, cancel := context.WithTimeout(ctx, DAGSyncTimeout) defer cancel() - for _, link := range nd.Links() { + for _, link := range block.Links { if !bp.queuedChildren.Visit(link.Cid) { // reserve for processing continue } @@ -225,10 +227,9 @@ func (bp *blockProcessor) handleChildBlocks( session.Add(1) job := &dagJob{ - session: session, - cid: link.Cid, - isComposite: isComposite && link.Name == core.HEAD, - bp: bp, + session: session, + cid: link.Cid, + bp: bp, } select { diff --git a/net/server.go b/net/server.go index 9e5c213200..1cd9910856 100644 --- a/net/server.go +++ b/net/server.go @@ -18,7 +18,6 @@ import ( "sync" "github.com/ipfs/go-cid" - format "github.com/ipfs/go-ipld-format" "github.com/libp2p/go-libp2p/core/event" libpeer "github.com/libp2p/go-libp2p/core/peer" "github.com/sourcenetwork/corelog" @@ -33,6 +32,7 @@ import ( "github.com/sourcenetwork/defradb/datastore/badger/v4" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/internal/core" + coreblock "github.com/sourcenetwork/defradb/internal/core/block" "github.com/sourcenetwork/defradb/internal/db" pb "github.com/sourcenetwork/defradb/net/pb" ) @@ -262,20 +262,17 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL } // Create a new DAG service with the current transaction - var getter format.NodeGetter = s.peer.newDAGSyncerTxn(txn) - if sessionMaker, ok := getter.(SessionDAGSyncer); ok { - getter = sessionMaker.Session(ctx) - } + dagSyncer := s.peer.newDAGSyncerTxn(txn) // handleComposite - nd, err := decodeBlockBuffer(req.Body.Log.Block, cid) + block, err := coreblock.GetFromBytes(req.Body.Log.Block) if err != nil { - return nil, errors.Wrap("failed to decode block to ipld.Node", err) + return nil, errors.Wrap("failed to decode block", err) } var wg sync.WaitGroup - bp := newBlockProcessor(s.peer, txn, col, dsKey, getter) - err = bp.processRemoteBlock(ctx, &wg, nd, true) + bp := newBlockProcessor(s.peer, txn, col, dsKey, dagSyncer) + err = bp.processRemoteBlock(ctx, &wg, block) if err != nil { log.ErrorContextE( ctx, diff --git a/net/server_test.go b/net/server_test.go index 916e234109..93d7d8130f 100644 --- a/net/server_test.go +++ b/net/server_test.go @@ -15,6 +15,8 @@ import ( "testing" "time" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore/query" "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" rpc "github.com/sourcenetwork/go-libp2p-pubsub-rpc" @@ -24,6 +26,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore/memory" "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/internal/core" net_pb "github.com/sourcenetwork/defradb/net/pb" ) @@ -247,6 +250,27 @@ func TestDocQueue(t *testing.T) { q.mu.Unlock() } +func getHead(ctx context.Context, db client.DB, docID client.DocID) (cid.Cid, error) { + prefix := core.DataStoreKeyFromDocID(docID).ToHeadStoreKey().WithFieldId(core.COMPOSITE_NAMESPACE).ToString() + results, err := db.Headstore().Query(ctx, query.Query{Prefix: prefix}) + if err != nil { + return cid.Undef, err + } + entries, err := results.Rest() + if err != nil { + return cid.Undef, err + } + + if len(entries) > 0 { + hsKey, err := core.NewHeadStoreKey(entries[0].Key) + if err != nil { + return cid.Undef, err + } + return hsKey.Cid, nil + } + return cid.Undef, errors.New("no head found") +} + func TestPushLog(t *testing.T) { ctx := context.Background() db, n := newTestNode(ctx, t) @@ -265,23 +289,27 @@ func TestPushLog(t *testing.T) { doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`), col.Definition()) require.NoError(t, err) - cid, err := createCID(doc) - require.NoError(t, err) - ctx = grpcpeer.NewContext(ctx, &grpcpeer.Peer{ Addr: addr{n.PeerID()}, }) - block := &EmptyNode{} + err = col.Create(ctx, doc) + require.NoError(t, err) + + headCID, err := getHead(ctx, db, doc.ID()) + require.NoError(t, err) + + b, err := db.Blockstore().AsIPLDStorage().Get(ctx, headCID.KeyString()) + require.NoError(t, err) _, err = n.server.PushLog(ctx, &net_pb.PushLogRequest{ Body: &net_pb.PushLogRequest_Body{ DocID: []byte(doc.ID().String()), - Cid: cid.Bytes(), + Cid: headCID.Bytes(), SchemaRoot: []byte(col.SchemaRoot()), Creator: n.PeerID().String(), Log: &net_pb.Document_Log{ - Block: block.RawData(), + Block: b, }, }, }) diff --git a/tests/clients/cli/wrapper.go b/tests/clients/cli/wrapper.go index b0dddff9cd..18e560b16c 100644 --- a/tests/clients/cli/wrapper.go +++ b/tests/clients/cli/wrapper.go @@ -20,7 +20,7 @@ import ( "strconv" "strings" - blockstore "github.com/ipfs/boxo/blockstore" + ds "github.com/ipfs/go-datastore" "github.com/lens-vm/lens/host-go/config/model" "github.com/libp2p/go-libp2p/core/peer" @@ -512,10 +512,14 @@ func (w *Wrapper) Root() datastore.RootStore { return w.node.Root() } -func (w *Wrapper) Blockstore() blockstore.Blockstore { +func (w *Wrapper) Blockstore() datastore.DAGStore { return w.node.Blockstore() } +func (w *Wrapper) Headstore() ds.Read { + return w.node.Headstore() +} + func (w *Wrapper) Peerstore() datastore.DSBatching { return w.node.Peerstore() } diff --git a/tests/clients/http/wrapper.go b/tests/clients/http/wrapper.go index 51911c3321..4727542cce 100644 --- a/tests/clients/http/wrapper.go +++ b/tests/clients/http/wrapper.go @@ -14,7 +14,7 @@ import ( "context" "net/http/httptest" - blockstore "github.com/ipfs/boxo/blockstore" + ds "github.com/ipfs/go-datastore" "github.com/lens-vm/lens/host-go/config/model" "github.com/libp2p/go-libp2p/core/peer" @@ -203,10 +203,14 @@ func (w *Wrapper) Root() datastore.RootStore { return w.node.Root() } -func (w *Wrapper) Blockstore() blockstore.Blockstore { +func (w *Wrapper) Blockstore() datastore.DAGStore { return w.node.Blockstore() } +func (w *Wrapper) Headstore() ds.Read { + return w.node.Headstore() +} + func (w *Wrapper) Peerstore() datastore.DSBatching { return w.node.Peerstore() } diff --git a/tests/integration/events/simple/with_update_test.go b/tests/integration/events/simple/with_update_test.go index d224690827..689734dc73 100644 --- a/tests/integration/events/simple/with_update_test.go +++ b/tests/integration/events/simple/with_update_test.go @@ -66,14 +66,14 @@ func TestEventsSimpleWithUpdate(t *testing.T) { ExpectedUpdates: []testUtils.ExpectedUpdate{ { DocID: immutable.Some(docID1), - Cid: immutable.Some("bafybeif757a4mdwimqwl24ujjnao6xlajiajz2hwuleopnptusuttri6zu"), + Cid: immutable.Some("bafyreifvrmwmlwtglxe3afki36spu6d5qs6vvza57kxs4giyi53r5vbbnu"), }, { DocID: immutable.Some(docID2), }, { DocID: immutable.Some(docID1), - Cid: immutable.Some("bafybeifhmjw6ay5rvwznqh37ogcw5hrmqtxrnredoh6psn7lhgtdc253km"), + Cid: immutable.Some("bafyreihfijpchdbc6fb3klay3a2ktcwav7mse6udbxpauslwzsmn6qczna"), }, }, } diff --git a/tests/integration/events/utils.go b/tests/integration/events/utils.go index 6a8f54aac2..d19a96a052 100644 --- a/tests/integration/events/utils.go +++ b/tests/integration/events/utils.go @@ -58,7 +58,7 @@ type ExpectedUpdate struct { // The expected Cid, as a string (results in much more readable errors) Cid immutable.Option[string] SchemaRoot immutable.Option[string] - Priority immutable.Option[uint64] + IsCreate immutable.Option[bool] } const eventTimeout = 100 * time.Millisecond @@ -97,7 +97,7 @@ func ExecuteRequestTestCase( expectedEvent := testCase.ExpectedUpdates[indexOfNextExpectedUpdate] assertIfExpected(t, expectedEvent.Cid, update.Cid.String()) assertIfExpected(t, expectedEvent.DocID, update.DocID) - assertIfExpected(t, expectedEvent.Priority, update.Priority) + assertIfExpected(t, expectedEvent.IsCreate, update.IsCreate) assertIfExpected(t, expectedEvent.SchemaRoot, update.SchemaRoot) indexOfNextExpectedUpdate++ diff --git a/tests/integration/mutation/create/with_version_test.go b/tests/integration/mutation/create/with_version_test.go index 943916f1ed..69f6d2b9f3 100644 --- a/tests/integration/mutation/create/with_version_test.go +++ b/tests/integration/mutation/create/with_version_test.go @@ -39,7 +39,7 @@ func TestMutationCreate_ReturnsVersionCID(t *testing.T) { { "_version": []map[string]any{ { - "cid": "bafybeif757a4mdwimqwl24ujjnao6xlajiajz2hwuleopnptusuttri6zu", + "cid": "bafyreifvrmwmlwtglxe3afki36spu6d5qs6vvza57kxs4giyi53r5vbbnu", }, }, }, diff --git a/tests/integration/query/commits/simple_test.go b/tests/integration/query/commits/simple_test.go index b90d5d0ea4..13f8307840 100644 --- a/tests/integration/query/commits/simple_test.go +++ b/tests/integration/query/commits/simple_test.go @@ -36,13 +36,13 @@ func TestQueryCommits(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", }, { - "cid": "bafybeihc26puzzgnctvgvgowihytif52tmdj3y2ksx6tvge2wtjkjvtszi", + "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", }, { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", }, }, }, @@ -79,22 +79,22 @@ func TestQueryCommitsMultipleDocs(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeihnalsemihsyycy3vaxbhq6iqrixmsk5k3idq52u76h2f5wkvobx4", + "cid": "bafyreidoznu3dbvgngnfk3xjjgpmrpjs3wrpbc7ntl3rbacspeee5kp53a", }, { - "cid": "bafybeifxk5rhzuemqn2o35hh7346gydqlfmhkdzeguiqo5vczgyz4xz7rm", + "cid": "bafyreia5q5oya6vnv2kvffzfnl23762zvtqxmatjd5s3ldpwxfdo3aey6i", }, { - "cid": "bafybeig36zwhejk54nvvey5wsfbl7rzm7xscsyji5uqp6j4hw4zh7dhep4", + "cid": "bafyreibr7mv7b4kg4zym7fow6ljhl6kdzfgcvfi5q26ogz5gsev75ewxcq", }, { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", }, { - "cid": "bafybeihc26puzzgnctvgvgowihytif52tmdj3y2ksx6tvge2wtjkjvtszi", + "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", }, { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", }, }, }, @@ -125,15 +125,15 @@ func TestQueryCommitsWithSchemaVersionIdField(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", "schemaVersionId": "bafkreicprhqxzlw3akyssz2v6pifwfueavp7jq2yj3dghapi3qcq6achs4", }, { - "cid": "bafybeihc26puzzgnctvgvgowihytif52tmdj3y2ksx6tvge2wtjkjvtszi", + "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", "schemaVersionId": "bafkreicprhqxzlw3akyssz2v6pifwfueavp7jq2yj3dghapi3qcq6achs4", }, { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", "schemaVersionId": "bafkreicprhqxzlw3akyssz2v6pifwfueavp7jq2yj3dghapi3qcq6achs4", }, }, @@ -349,7 +349,7 @@ func TestQuery_CommitsWithAllFieldsWithUpdate_NoError(t *testing.T) { `, Results: []map[string]any{ { - "cid": "bafybeiaho26jaxdjfuvyxozws6ushksjwidllvgai6kgxmqxhzylwzkvte", + "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", "collectionID": int64(1), "delta": testUtils.CBORValue(22), "docID": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", @@ -358,13 +358,13 @@ func TestQuery_CommitsWithAllFieldsWithUpdate_NoError(t *testing.T) { "height": int64(2), "links": []map[string]any{ { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", "name": "_head", }, }, }, { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", "collectionID": int64(1), "delta": testUtils.CBORValue(21), "docID": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", @@ -374,7 +374,7 @@ func TestQuery_CommitsWithAllFieldsWithUpdate_NoError(t *testing.T) { "links": []map[string]any{}, }, { - "cid": "bafybeihc26puzzgnctvgvgowihytif52tmdj3y2ksx6tvge2wtjkjvtszi", + "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", "collectionID": int64(1), "delta": testUtils.CBORValue("John"), "docID": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", @@ -384,7 +384,7 @@ func TestQuery_CommitsWithAllFieldsWithUpdate_NoError(t *testing.T) { "links": []map[string]any{}, }, { - "cid": "bafybeiep7c6ouykgidnwzjeasyim3ost5qjkro4qvs62t4u4u7rolbmugm", + "cid": "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4", "collectionID": int64(1), "delta": nil, "docID": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", @@ -393,17 +393,17 @@ func TestQuery_CommitsWithAllFieldsWithUpdate_NoError(t *testing.T) { "height": int64(2), "links": []map[string]any{ { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", "name": "_head", }, { - "cid": "bafybeiaho26jaxdjfuvyxozws6ushksjwidllvgai6kgxmqxhzylwzkvte", + "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", "name": "age", }, }, }, { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", "collectionID": int64(1), "delta": nil, "docID": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", @@ -412,12 +412,12 @@ func TestQuery_CommitsWithAllFieldsWithUpdate_NoError(t *testing.T) { "height": int64(1), "links": []map[string]any{ { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", - "name": "age", + "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "name": "name", }, { - "cid": "bafybeihc26puzzgnctvgvgowihytif52tmdj3y2ksx6tvge2wtjkjvtszi", - "name": "name", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "name": "age", }, }, }, diff --git a/tests/integration/query/commits/with_cid_test.go b/tests/integration/query/commits/with_cid_test.go index 22f2caa5c2..0c502ea7e2 100644 --- a/tests/integration/query/commits/with_cid_test.go +++ b/tests/integration/query/commits/with_cid_test.go @@ -38,14 +38,14 @@ func TestQueryCommitsWithCid(t *testing.T) { testUtils.Request{ Request: `query { commits( - cid: "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq" + cid: "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq" ) { cid } }`, Results: []map[string]any{ { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", }, }, }, @@ -71,14 +71,14 @@ func TestQueryCommitsWithCidForFieldCommit(t *testing.T) { testUtils.Request{ Request: `query { commits( - cid: "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq" + cid: "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq" ) { cid } }`, Results: []map[string]any{ { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", }, }, }, diff --git a/tests/integration/query/commits/with_depth_test.go b/tests/integration/query/commits/with_depth_test.go index 3475985174..a950e72b85 100644 --- a/tests/integration/query/commits/with_depth_test.go +++ b/tests/integration/query/commits/with_depth_test.go @@ -36,13 +36,13 @@ func TestQueryCommitsWithDepth1(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", }, { - "cid": "bafybeihc26puzzgnctvgvgowihytif52tmdj3y2ksx6tvge2wtjkjvtszi", + "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", }, { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", }, }, }, @@ -81,16 +81,16 @@ func TestQueryCommitsWithDepth1WithUpdate(t *testing.T) { Results: []map[string]any{ { // "Age" field head - "cid": "bafybeiaho26jaxdjfuvyxozws6ushksjwidllvgai6kgxmqxhzylwzkvte", + "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", "height": int64(2), }, { // "Name" field head (unchanged from create) - "cid": "bafybeihc26puzzgnctvgvgowihytif52tmdj3y2ksx6tvge2wtjkjvtszi", + "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", "height": int64(1), }, { - "cid": "bafybeiep7c6ouykgidnwzjeasyim3ost5qjkro4qvs62t4u4u7rolbmugm", + "cid": "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4", "height": int64(2), }, }, @@ -137,27 +137,27 @@ func TestQueryCommitsWithDepth2WithUpdate(t *testing.T) { Results: []map[string]any{ { // Composite head - "cid": "bafybeigzaxekosbmrfrzjhkztodipzmz3voiqnia275347b6vkq5keouf4", + "cid": "bafyreial53rqep7uoheucc3rzvhs6dbhydnkbqe4w2bhd3hsybub4u3h6m", "height": int64(3), }, { // Composite head -1 - "cid": "bafybeiaho26jaxdjfuvyxozws6ushksjwidllvgai6kgxmqxhzylwzkvte", + "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", "height": int64(2), }, { // "Name" field head (unchanged from create) - "cid": "bafybeihc26puzzgnctvgvgowihytif52tmdj3y2ksx6tvge2wtjkjvtszi", + "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", "height": int64(1), }, { // "Age" field head - "cid": "bafybeifwa5vgfvnrdwzqmojsxilwbg2k37axh2fs57zfmddz3l5yivn4la", + "cid": "bafyreifyknutg2lsajcsqrfegr65t5h5s743jkp3bfuzphx4nmqztfwmga", "height": int64(3), }, { // "Age" field head -1 - "cid": "bafybeiep7c6ouykgidnwzjeasyim3ost5qjkro4qvs62t4u4u7rolbmugm", + "cid": "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4", "height": int64(2), }, }, @@ -195,22 +195,22 @@ func TestQueryCommitsWithDepth1AndMultipleDocs(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeicacj5fmr267b6kkmv4ck3g5cm5odca7hu7ajwagfttpspbsu7n5u", + "cid": "bafyreicab3zaizu5bwyfn25hy5zf6hsdx6un5kyixiktxkgwtyobdlr52e", }, { - "cid": "bafybeiexu7xpwhyo2azo2ap2nbny5d4chhr725xrhmxnt5ebabucyjlfqu", + "cid": "bafyreig3kdxtlbaohkcxx5bysmyrjvdggoeq47x6cyrbyfevi2hbgkq4sa", }, { - "cid": "bafybeibbp6jn7y2t6jakbdtvboruieo3iobyuumppbwbw7rwkmz4tdh5yq", + "cid": "bafyreifq22zqplhdxr2rvuanegqw6ogaur46f5ud3upp7p2dw7tt6vozpa", }, { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", }, { - "cid": "bafybeihc26puzzgnctvgvgowihytif52tmdj3y2ksx6tvge2wtjkjvtszi", + "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", }, { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", }, }, }, diff --git a/tests/integration/query/commits/with_doc_id_cid_test.go b/tests/integration/query/commits/with_doc_id_cid_test.go index 9f61805048..9e1972e391 100644 --- a/tests/integration/query/commits/with_doc_id_cid_test.go +++ b/tests/integration/query/commits/with_doc_id_cid_test.go @@ -104,14 +104,14 @@ func TestQueryCommitsWithDocIDAndCidWithUpdate(t *testing.T) { Request: ` { commits( docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", - cid: "bafybeiep7c6ouykgidnwzjeasyim3ost5qjkro4qvs62t4u4u7rolbmugm" + cid: "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4" ) { cid } }`, Results: []map[string]any{ { - "cid": "bafybeiep7c6ouykgidnwzjeasyim3ost5qjkro4qvs62t4u4u7rolbmugm", + "cid": "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4", }, }, }, diff --git a/tests/integration/query/commits/with_doc_id_count_test.go b/tests/integration/query/commits/with_doc_id_count_test.go index 89ba666163..ef4dc103bf 100644 --- a/tests/integration/query/commits/with_doc_id_count_test.go +++ b/tests/integration/query/commits/with_doc_id_count_test.go @@ -37,15 +37,15 @@ func TestQueryCommitsWithDocIDAndLinkCount(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", "_count": 0, }, { - "cid": "bafybeihc26puzzgnctvgvgowihytif52tmdj3y2ksx6tvge2wtjkjvtszi", + "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", "_count": 0, }, { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", "_count": 2, }, }, diff --git a/tests/integration/query/commits/with_doc_id_field_test.go b/tests/integration/query/commits/with_doc_id_field_test.go index 65fb4a5637..4d0e838be7 100644 --- a/tests/integration/query/commits/with_doc_id_field_test.go +++ b/tests/integration/query/commits/with_doc_id_field_test.go @@ -118,7 +118,7 @@ func TestQueryCommitsWithDocIDAndFieldId(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", }, }, }, @@ -150,7 +150,7 @@ func TestQueryCommitsWithDocIDAndCompositeFieldId(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", }, }, }, diff --git a/tests/integration/query/commits/with_doc_id_limit_offset_test.go b/tests/integration/query/commits/with_doc_id_limit_offset_test.go index 47b21aaf08..fc2a51da20 100644 --- a/tests/integration/query/commits/with_doc_id_limit_offset_test.go +++ b/tests/integration/query/commits/with_doc_id_limit_offset_test.go @@ -57,10 +57,10 @@ func TestQueryCommitsWithDocIDAndLimitAndOffset(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeigzaxekosbmrfrzjhkztodipzmz3voiqnia275347b6vkq5keouf4", + "cid": "bafyreial53rqep7uoheucc3rzvhs6dbhydnkbqe4w2bhd3hsybub4u3h6m", }, { - "cid": "bafybeiaho26jaxdjfuvyxozws6ushksjwidllvgai6kgxmqxhzylwzkvte", + "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", }, }, }, diff --git a/tests/integration/query/commits/with_doc_id_limit_test.go b/tests/integration/query/commits/with_doc_id_limit_test.go index 938ce72ea9..78bdc100c4 100644 --- a/tests/integration/query/commits/with_doc_id_limit_test.go +++ b/tests/integration/query/commits/with_doc_id_limit_test.go @@ -50,10 +50,10 @@ func TestQueryCommitsWithDocIDAndLimit(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeigzaxekosbmrfrzjhkztodipzmz3voiqnia275347b6vkq5keouf4", + "cid": "bafyreial53rqep7uoheucc3rzvhs6dbhydnkbqe4w2bhd3hsybub4u3h6m", }, { - "cid": "bafybeiaho26jaxdjfuvyxozws6ushksjwidllvgai6kgxmqxhzylwzkvte", + "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", }, }, }, diff --git a/tests/integration/query/commits/with_doc_id_order_limit_offset_test.go b/tests/integration/query/commits/with_doc_id_order_limit_offset_test.go index 058825acca..cc2663ba1b 100644 --- a/tests/integration/query/commits/with_doc_id_order_limit_offset_test.go +++ b/tests/integration/query/commits/with_doc_id_order_limit_offset_test.go @@ -58,11 +58,11 @@ func TestQueryCommitsWithDocIDAndOrderAndLimitAndOffset(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeiep7c6ouykgidnwzjeasyim3ost5qjkro4qvs62t4u4u7rolbmugm", + "cid": "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4", "height": int64(2), }, { - "cid": "bafybeigzaxekosbmrfrzjhkztodipzmz3voiqnia275347b6vkq5keouf4", + "cid": "bafyreial53rqep7uoheucc3rzvhs6dbhydnkbqe4w2bhd3hsybub4u3h6m", "height": int64(3), }, }, diff --git a/tests/integration/query/commits/with_doc_id_order_test.go b/tests/integration/query/commits/with_doc_id_order_test.go index 70ab643688..5de3f114b6 100644 --- a/tests/integration/query/commits/with_doc_id_order_test.go +++ b/tests/integration/query/commits/with_doc_id_order_test.go @@ -44,23 +44,23 @@ func TestQueryCommitsWithDocIDAndOrderHeightDesc(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeiaho26jaxdjfuvyxozws6ushksjwidllvgai6kgxmqxhzylwzkvte", + "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", "height": int64(2), }, { - "cid": "bafybeiep7c6ouykgidnwzjeasyim3ost5qjkro4qvs62t4u4u7rolbmugm", + "cid": "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4", "height": int64(2), }, { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", "height": int64(1), }, { - "cid": "bafybeihc26puzzgnctvgvgowihytif52tmdj3y2ksx6tvge2wtjkjvtszi", + "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", "height": int64(1), }, { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", "height": int64(1), }, }, @@ -99,23 +99,23 @@ func TestQueryCommitsWithDocIDAndOrderHeightAsc(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", "height": int64(1), }, { - "cid": "bafybeihc26puzzgnctvgvgowihytif52tmdj3y2ksx6tvge2wtjkjvtszi", + "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", "height": int64(1), }, { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", "height": int64(1), }, { - "cid": "bafybeiaho26jaxdjfuvyxozws6ushksjwidllvgai6kgxmqxhzylwzkvte", + "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", "height": int64(2), }, { - "cid": "bafybeiep7c6ouykgidnwzjeasyim3ost5qjkro4qvs62t4u4u7rolbmugm", + "cid": "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4", "height": int64(2), }, }, @@ -154,25 +154,25 @@ func TestQueryCommitsWithDocIDAndOrderCidDesc(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeihc26puzzgnctvgvgowihytif52tmdj3y2ksx6tvge2wtjkjvtszi", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", "height": int64(1), }, { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", - "height": int64(1), - }, - { - "cid": "bafybeiep7c6ouykgidnwzjeasyim3ost5qjkro4qvs62t4u4u7rolbmugm", + "cid": "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4", "height": int64(2), }, { - "cid": "bafybeiaho26jaxdjfuvyxozws6ushksjwidllvgai6kgxmqxhzylwzkvte", - "height": int64(2), + "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "height": int64(1), }, { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", "height": int64(1), }, + { + "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", + "height": int64(2), + }, }, }, }, @@ -209,23 +209,23 @@ func TestQueryCommitsWithDocIDAndOrderCidAsc(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", - "height": int64(1), - }, - { - "cid": "bafybeiaho26jaxdjfuvyxozws6ushksjwidllvgai6kgxmqxhzylwzkvte", + "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", "height": int64(2), }, { - "cid": "bafybeiep7c6ouykgidnwzjeasyim3ost5qjkro4qvs62t4u4u7rolbmugm", - "height": int64(2), + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "height": int64(1), }, { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", + "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", "height": int64(1), }, { - "cid": "bafybeihc26puzzgnctvgvgowihytif52tmdj3y2ksx6tvge2wtjkjvtszi", + "cid": "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4", + "height": int64(2), + }, + { + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", "height": int64(1), }, }, @@ -278,39 +278,39 @@ func TestQueryCommitsWithDocIDAndOrderAndMultiUpdatesCidAsc(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", "height": int64(1), }, { - "cid": "bafybeihc26puzzgnctvgvgowihytif52tmdj3y2ksx6tvge2wtjkjvtszi", + "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", "height": int64(1), }, { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", "height": int64(1), }, { - "cid": "bafybeiaho26jaxdjfuvyxozws6ushksjwidllvgai6kgxmqxhzylwzkvte", + "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", "height": int64(2), }, { - "cid": "bafybeiep7c6ouykgidnwzjeasyim3ost5qjkro4qvs62t4u4u7rolbmugm", + "cid": "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4", "height": int64(2), }, { - "cid": "bafybeigzaxekosbmrfrzjhkztodipzmz3voiqnia275347b6vkq5keouf4", + "cid": "bafyreial53rqep7uoheucc3rzvhs6dbhydnkbqe4w2bhd3hsybub4u3h6m", "height": int64(3), }, { - "cid": "bafybeifwa5vgfvnrdwzqmojsxilwbg2k37axh2fs57zfmddz3l5yivn4la", + "cid": "bafyreifyknutg2lsajcsqrfegr65t5h5s743jkp3bfuzphx4nmqztfwmga", "height": int64(3), }, { - "cid": "bafybeifn2f5lgzall3dzva47khbtib77lt7ve5qyclou3ihi2hy2uqj4nm", + "cid": "bafyreiarqxac3gnfrfj3j6fukof375kp56i6jcvgmpfytenczsilx4xkey", "height": int64(4), }, { - "cid": "bafybeieijpm36ntafrncl4kgx6dkxgpbftcl4f7obbbmagurcgdoj6sl5y", + "cid": "bafyreihazmfdrwrk3udgnnkuxipcg7cjmzndfazif3t2vxyrq36qwfkrt4", "height": int64(4), }, }, diff --git a/tests/integration/query/commits/with_doc_id_test.go b/tests/integration/query/commits/with_doc_id_test.go index 1524409663..800dad5bfd 100644 --- a/tests/integration/query/commits/with_doc_id_test.go +++ b/tests/integration/query/commits/with_doc_id_test.go @@ -62,13 +62,13 @@ func TestQueryCommitsWithDocID(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", }, { - "cid": "bafybeihc26puzzgnctvgvgowihytif52tmdj3y2ksx6tvge2wtjkjvtszi", + "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", }, { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", }, }, }, @@ -102,23 +102,23 @@ func TestQueryCommitsWithDocIDAndLinks(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", "links": []map[string]any{}, }, { - "cid": "bafybeihc26puzzgnctvgvgowihytif52tmdj3y2ksx6tvge2wtjkjvtszi", + "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", "links": []map[string]any{}, }, { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", "links": []map[string]any{ { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", - "name": "age", + "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "name": "name", }, { - "cid": "bafybeihc26puzzgnctvgvgowihytif52tmdj3y2ksx6tvge2wtjkjvtszi", - "name": "name", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "name": "age", }, }, }, @@ -158,23 +158,23 @@ func TestQueryCommitsWithDocIDAndUpdate(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeiaho26jaxdjfuvyxozws6ushksjwidllvgai6kgxmqxhzylwzkvte", + "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", "height": int64(2), }, { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", "height": int64(1), }, { - "cid": "bafybeihc26puzzgnctvgvgowihytif52tmdj3y2ksx6tvge2wtjkjvtszi", + "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", "height": int64(1), }, { - "cid": "bafybeiep7c6ouykgidnwzjeasyim3ost5qjkro4qvs62t4u4u7rolbmugm", + "cid": "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4", "height": int64(2), }, { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", "height": int64(1), }, }, @@ -219,45 +219,45 @@ func TestQueryCommitsWithDocIDAndUpdateAndLinks(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeiaho26jaxdjfuvyxozws6ushksjwidllvgai6kgxmqxhzylwzkvte", + "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", "links": []map[string]any{ { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", "name": "_head", }, }, }, { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", "links": []map[string]any{}, }, { - "cid": "bafybeihc26puzzgnctvgvgowihytif52tmdj3y2ksx6tvge2wtjkjvtszi", + "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", "links": []map[string]any{}, }, { - "cid": "bafybeiep7c6ouykgidnwzjeasyim3ost5qjkro4qvs62t4u4u7rolbmugm", + "cid": "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4", "links": []map[string]any{ { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", "name": "_head", }, { - "cid": "bafybeiaho26jaxdjfuvyxozws6ushksjwidllvgai6kgxmqxhzylwzkvte", + "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", "name": "age", }, }, }, { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", "links": []map[string]any{ { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", - "name": "age", + "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "name": "name", }, { - "cid": "bafybeihc26puzzgnctvgvgowihytif52tmdj3y2ksx6tvge2wtjkjvtszi", - "name": "name", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "name": "age", }, }, }, diff --git a/tests/integration/query/commits/with_doc_id_typename_test.go b/tests/integration/query/commits/with_doc_id_typename_test.go index 51bc88a946..d15ebba47e 100644 --- a/tests/integration/query/commits/with_doc_id_typename_test.go +++ b/tests/integration/query/commits/with_doc_id_typename_test.go @@ -37,15 +37,15 @@ func TestQueryCommitsWithDocIDWithTypeName(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", "__typename": "Commit", }, { - "cid": "bafybeihc26puzzgnctvgvgowihytif52tmdj3y2ksx6tvge2wtjkjvtszi", + "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", "__typename": "Commit", }, { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", "__typename": "Commit", }, }, diff --git a/tests/integration/query/commits/with_field_test.go b/tests/integration/query/commits/with_field_test.go index 1ea35a8d96..fa1886304b 100644 --- a/tests/integration/query/commits/with_field_test.go +++ b/tests/integration/query/commits/with_field_test.go @@ -66,7 +66,7 @@ func TestQueryCommitsWithFieldId(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", }, }, }, @@ -98,7 +98,7 @@ func TestQueryCommitsWithCompositeFieldId(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", }, }, }, @@ -131,7 +131,7 @@ func TestQueryCommitsWithCompositeFieldIdWithReturnedSchemaVersionId(t *testing. }`, Results: []map[string]any{ { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", "schemaVersionId": "bafkreicprhqxzlw3akyssz2v6pifwfueavp7jq2yj3dghapi3qcq6achs4", }, }, diff --git a/tests/integration/query/commits/with_group_test.go b/tests/integration/query/commits/with_group_test.go index 1971e6f6dd..f69476a648 100644 --- a/tests/integration/query/commits/with_group_test.go +++ b/tests/integration/query/commits/with_group_test.go @@ -89,10 +89,10 @@ func TestQueryCommitsWithGroupByHeightWithChild(t *testing.T) { "height": int64(2), "_group": []map[string]any{ { - "cid": "bafybeiaho26jaxdjfuvyxozws6ushksjwidllvgai6kgxmqxhzylwzkvte", + "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", }, { - "cid": "bafybeiep7c6ouykgidnwzjeasyim3ost5qjkro4qvs62t4u4u7rolbmugm", + "cid": "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4", }, }, }, @@ -100,13 +100,13 @@ func TestQueryCommitsWithGroupByHeightWithChild(t *testing.T) { "height": int64(1), "_group": []map[string]any{ { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", }, { - "cid": "bafybeihc26puzzgnctvgvgowihytif52tmdj3y2ksx6tvge2wtjkjvtszi", + "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", }, { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", }, }, }, @@ -142,7 +142,7 @@ func TestQueryCommitsWithGroupByCidWithChild(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", "_group": []map[string]any{ { "height": int64(1), @@ -150,7 +150,7 @@ func TestQueryCommitsWithGroupByCidWithChild(t *testing.T) { }, }, { - "cid": "bafybeihc26puzzgnctvgvgowihytif52tmdj3y2ksx6tvge2wtjkjvtszi", + "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", "_group": []map[string]any{ { "height": int64(1), @@ -158,7 +158,7 @@ func TestQueryCommitsWithGroupByCidWithChild(t *testing.T) { }, }, { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", "_group": []map[string]any{ { "height": int64(1), diff --git a/tests/integration/query/latest_commits/with_doc_id_field_test.go b/tests/integration/query/latest_commits/with_doc_id_field_test.go index 0b886b966a..624f318c27 100644 --- a/tests/integration/query/latest_commits/with_doc_id_field_test.go +++ b/tests/integration/query/latest_commits/with_doc_id_field_test.go @@ -68,7 +68,7 @@ func TestQueryLatestCommitsWithDocIDAndFieldId(t *testing.T) { }, Results: []map[string]any{ { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", "links": []map[string]any{}, }, }, @@ -101,15 +101,15 @@ func TestQueryLatestCommitsWithDocIDAndCompositeFieldId(t *testing.T) { }, Results: []map[string]any{ { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", "links": []map[string]any{ { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", - "name": "age", + "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "name": "name", }, { - "cid": "bafybeihc26puzzgnctvgvgowihytif52tmdj3y2ksx6tvge2wtjkjvtszi", - "name": "name", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "name": "age", }, }, }, diff --git a/tests/integration/query/latest_commits/with_doc_id_test.go b/tests/integration/query/latest_commits/with_doc_id_test.go index 089f6f5086..726c009cf7 100644 --- a/tests/integration/query/latest_commits/with_doc_id_test.go +++ b/tests/integration/query/latest_commits/with_doc_id_test.go @@ -38,15 +38,15 @@ func TestQueryLatestCommitsWithDocID(t *testing.T) { }, Results: []map[string]any{ { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", "links": []map[string]any{ { - "cid": "bafybeietdefm4jtgcbpof5elqdptwnuevvzujb3j22n6dytx67vpmb3xn4", - "name": "age", + "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "name": "name", }, { - "cid": "bafybeihc26puzzgnctvgvgowihytif52tmdj3y2ksx6tvge2wtjkjvtszi", - "name": "name", + "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "name": "age", }, }, }, @@ -75,7 +75,7 @@ func TestQueryLatestCommitsWithDocIDWithSchemaVersionIdField(t *testing.T) { }, Results: []map[string]any{ { - "cid": "bafybeiafufeqwjo5eeeaobwiu6ibf73dnvlefr47mrx45z42337mfnezkq", + "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", "schemaVersionId": "bafkreicprhqxzlw3akyssz2v6pifwfueavp7jq2yj3dghapi3qcq6achs4", }, }, diff --git a/tests/integration/query/one_to_many/with_cid_doc_id_test.go b/tests/integration/query/one_to_many/with_cid_doc_id_test.go index 6b896ca6ed..a097e6fe29 100644 --- a/tests/integration/query/one_to_many/with_cid_doc_id_test.go +++ b/tests/integration/query/one_to_many/with_cid_doc_id_test.go @@ -104,7 +104,7 @@ func TestQueryOneToManyWithCidAndDocID(t *testing.T) { testUtils.Request{ Request: `query { Book ( - cid: "bafybeia3qbhebdwssoe5udinpbdj4pntb5wjr77ql7ptzq32howbaxz2cu" + cid: "bafyreicj6hg76f5hveo5ykaw6kmldtujbmamzyasje6a3gxrro7nlplhba" docID: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" ) { name @@ -179,7 +179,7 @@ func TestQueryOneToManyWithChildUpdateAndFirstCidAndDocID(t *testing.T) { testUtils.Request{ Request: `query { Book ( - cid: "bafybeia3qbhebdwssoe5udinpbdj4pntb5wjr77ql7ptzq32howbaxz2cu", + cid: "bafyreicj6hg76f5hveo5ykaw6kmldtujbmamzyasje6a3gxrro7nlplhba", docID: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" ) { name @@ -252,7 +252,7 @@ func TestQueryOneToManyWithParentUpdateAndFirstCidAndDocID(t *testing.T) { testUtils.Request{ Request: `query { Book ( - cid: "bafybeia3qbhebdwssoe5udinpbdj4pntb5wjr77ql7ptzq32howbaxz2cu", + cid: "bafyreicj6hg76f5hveo5ykaw6kmldtujbmamzyasje6a3gxrro7nlplhba", docID: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" ) { name @@ -325,7 +325,7 @@ func TestQueryOneToManyWithParentUpdateAndLastCidAndDocID(t *testing.T) { testUtils.Request{ Request: `query { Book ( - cid: "bafybeibqkdnc63xh5k4frs3x3k7z7p6sw4usjrhxd4iusbjj2uhxfjfjcq", + cid: "bafyreibom3twkrprkfljn4hh6hyenpzofdwhl2qfrnfa4eljikpyexnn2q", docID: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" ) { name diff --git a/tests/integration/query/simple/with_cid_doc_id_test.go b/tests/integration/query/simple/with_cid_doc_id_test.go index 6fe41d1aae..28bd453faf 100644 --- a/tests/integration/query/simple/with_cid_doc_id_test.go +++ b/tests/integration/query/simple/with_cid_doc_id_test.go @@ -93,7 +93,7 @@ func TestQuerySimpleWithCidAndDocID(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafybeif757a4mdwimqwl24ujjnao6xlajiajz2hwuleopnptusuttri6zu", + cid: "bafyreifvrmwmlwtglxe3afki36spu6d5qs6vvza57kxs4giyi53r5vbbnu", docID: "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" ) { name @@ -135,7 +135,7 @@ func TestQuerySimpleWithUpdateAndFirstCidAndDocID(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafybeif757a4mdwimqwl24ujjnao6xlajiajz2hwuleopnptusuttri6zu", + cid: "bafyreifvrmwmlwtglxe3afki36spu6d5qs6vvza57kxs4giyi53r5vbbnu", docID: "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" ) { name @@ -177,7 +177,7 @@ func TestQuerySimpleWithUpdateAndLastCidAndDocID(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafybeibwxvtvppws6sjfoajazevrdh27g4qwn5wguslpabyl3kzxd2a6fm", + cid: "bafyreihyutx64yvxcfeglc2kax3l4kxrp5mae4p7txrxyiszqybs54h3zq", docID: "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" ) { name @@ -224,7 +224,7 @@ func TestQuerySimpleWithUpdateAndMiddleCidAndDocID(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafybeibwxvtvppws6sjfoajazevrdh27g4qwn5wguslpabyl3kzxd2a6fm", + cid: "bafyreihyutx64yvxcfeglc2kax3l4kxrp5mae4p7txrxyiszqybs54h3zq", docID: "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" ) { name @@ -266,7 +266,7 @@ func TestQuerySimpleWithUpdateAndFirstCidAndDocIDAndSchemaVersion(t *testing.T) testUtils.Request{ Request: `query { Users ( - cid: "bafybeif757a4mdwimqwl24ujjnao6xlajiajz2hwuleopnptusuttri6zu", + cid: "bafyreifvrmwmlwtglxe3afki36spu6d5qs6vvza57kxs4giyi53r5vbbnu", docID: "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" ) { name @@ -324,7 +324,7 @@ func TestCidAndDocIDQuery_ContainsPNCounterWithIntKind_NoError(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafybeicruxxfhxhyvefbxid7gukdbnfzkyad45phu4mnwzzqde24p32xnu", + cid: "bafyreifgpewgsde3mpq5upokcngfcehdujokqultxlez3cnskhxfde3dw4", docID: "bae-a688789e-d8a6-57a7-be09-22e005ab79e0" ) { name @@ -376,7 +376,7 @@ func TestCidAndDocIDQuery_ContainsPNCounterWithFloatKind_NoError(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafybeibeo7pmvzpkkanwd72q4qu3m4yxex3coufq7uogvcnjwgqzrlpco4", + cid: "bafyreic7ds5alv2e7hxdbz3st5tj3vt4rvwydz6sap4wraudl3kzy6y27e", docID: "bae-fa6a97e9-e0e9-5826-8a8c-57775d35e07c" ) { name @@ -423,7 +423,7 @@ func TestCidAndDocIDQuery_ContainsPCounterWithIntKind_NoError(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafybeidtjhrssohan2f5nt7ml3nh4bovpaqhqjvijlpacfednyx77iw5y4", + cid: "bafyreih2vkwfws7vksed465gq736gi5nfxmajlxxemc64nkcendkddtqym", docID: "bae-a688789e-d8a6-57a7-be09-22e005ab79e0" ) { name @@ -470,7 +470,7 @@ func TestCidAndDocIDQuery_ContainsPCounterWithFloatKind_NoError(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafybeieimeijjl4hdvqkt5gkn62j54nlnaetm4te7w4z2mdljlyphfsyji", + cid: "bafyreihafx27q435dg6mpbfvdsav47oeq65zkeie3eykjbvuhlwtk5ttwm", docID: "bae-fa6a97e9-e0e9-5826-8a8c-57775d35e07c" ) { name diff --git a/tests/integration/query/simple/with_version_test.go b/tests/integration/query/simple/with_version_test.go index 615e75a293..5baf65a0ae 100644 --- a/tests/integration/query/simple/with_version_test.go +++ b/tests/integration/query/simple/with_version_test.go @@ -46,15 +46,15 @@ func TestQuerySimpleWithEmbeddedLatestCommit(t *testing.T) { "Age": int64(21), "_version": []map[string]any{ { - "cid": "bafybeigxe467aute545c52e27ll3yun7rpkledh5tbjhxxs2i76dzkfdom", + "cid": "bafyreidmbagmnhwb3qr5qctclsylkzgrwpbmiuxirtfbdf3fuzxbibljfi", "links": []map[string]any{ { - "cid": "bafybeigdvbqfwrm6dxfnfv4srbue5agzpyzoifl77ix6df7k5pjhat3fwu", - "name": "Age", + "cid": "bafyreibhdfmodhqycxtw33ffdceh2wlxqlwcwbyowvs2lrlvimph7ekg2u", + "name": "Name", }, { - "cid": "bafybeicurnibuf3b6krgqm3sh2ohmvxiodvawagx2evod573z67xf54zxu", - "name": "Name", + "cid": "bafyreigrxupxvzvjfx6wblmpc6fgekapr7nxlmokvi4gmz6ojmzmbrnapa", + "name": "Age", }, }, }, @@ -171,23 +171,23 @@ func TestQuerySimpleWithMultipleAliasedEmbeddedLatestCommit(t *testing.T) { "Age": int64(21), "_version": []map[string]any{ { - "cid": "bafybeigxe467aute545c52e27ll3yun7rpkledh5tbjhxxs2i76dzkfdom", + "cid": "bafyreidmbagmnhwb3qr5qctclsylkzgrwpbmiuxirtfbdf3fuzxbibljfi", "L1": []map[string]any{ { - "cid": "bafybeigdvbqfwrm6dxfnfv4srbue5agzpyzoifl77ix6df7k5pjhat3fwu", - "name": "Age", + "cid": "bafyreibhdfmodhqycxtw33ffdceh2wlxqlwcwbyowvs2lrlvimph7ekg2u", + "name": "Name", }, { - "cid": "bafybeicurnibuf3b6krgqm3sh2ohmvxiodvawagx2evod573z67xf54zxu", - "name": "Name", + "cid": "bafyreigrxupxvzvjfx6wblmpc6fgekapr7nxlmokvi4gmz6ojmzmbrnapa", + "name": "Age", }, }, "L2": []map[string]any{ { - "name": "Age", + "name": "Name", }, { - "name": "Name", + "name": "Age", }, }, }, @@ -242,7 +242,7 @@ func TestQuery_WithAllCommitFields_NoError(t *testing.T) { "_docID": docID, "_version": []map[string]any{ { - "cid": "bafybeigxe467aute545c52e27ll3yun7rpkledh5tbjhxxs2i76dzkfdom", + "cid": "bafyreidmbagmnhwb3qr5qctclsylkzgrwpbmiuxirtfbdf3fuzxbibljfi", "collectionID": int64(1), "delta": nil, "docID": "bae-52b9170d-b77a-5887-b877-cbdbb99b009f", @@ -251,12 +251,12 @@ func TestQuery_WithAllCommitFields_NoError(t *testing.T) { "height": int64(1), "links": []map[string]any{ { - "cid": "bafybeigdvbqfwrm6dxfnfv4srbue5agzpyzoifl77ix6df7k5pjhat3fwu", - "name": "Age", + "cid": "bafyreibhdfmodhqycxtw33ffdceh2wlxqlwcwbyowvs2lrlvimph7ekg2u", + "name": "Name", }, { - "cid": "bafybeicurnibuf3b6krgqm3sh2ohmvxiodvawagx2evod573z67xf54zxu", - "name": "Name", + "cid": "bafyreigrxupxvzvjfx6wblmpc6fgekapr7nxlmokvi4gmz6ojmzmbrnapa", + "name": "Age", }, }, "schemaVersionId": "bafkreigqmcqzkbg3elpe24vfza4rjle2r6cxu7ihzvg56aov57crhaebry", @@ -321,7 +321,7 @@ func TestQuery_WithAllCommitFieldsWithUpdate_NoError(t *testing.T) { "_docID": docID, "_version": []map[string]any{ { - "cid": "bafybeibpezk2dgdlyavsh3k7vbmgh3iwanqhkzo4byafgytjdv5c7xy73u", + "cid": "bafyreibbn2vjovh65xe5v2bqxqxkb6sek5xkbnouhryya6enesbhzfplvm", "collectionID": int64(1), "delta": nil, "docID": "bae-52b9170d-b77a-5887-b877-cbdbb99b009f", @@ -330,18 +330,18 @@ func TestQuery_WithAllCommitFieldsWithUpdate_NoError(t *testing.T) { "height": int64(2), "links": []map[string]any{ { - "cid": "bafybeihidcg4gkm6bnlyyghr5cq5dkn6x5a4l347amy7odsy5rkd7eu4qu", - "name": "Age", + "cid": "bafyreidmbagmnhwb3qr5qctclsylkzgrwpbmiuxirtfbdf3fuzxbibljfi", + "name": "_head", }, { - "cid": "bafybeigxe467aute545c52e27ll3yun7rpkledh5tbjhxxs2i76dzkfdom", - "name": "_head", + "cid": "bafyreifycx5aqjhdlmzaf3bqb6ieomfxrzercas3hxnqcwz2jb25mkrzxi", + "name": "Age", }, }, "schemaVersionId": "bafkreigqmcqzkbg3elpe24vfza4rjle2r6cxu7ihzvg56aov57crhaebry", }, { - "cid": "bafybeigxe467aute545c52e27ll3yun7rpkledh5tbjhxxs2i76dzkfdom", + "cid": "bafyreidmbagmnhwb3qr5qctclsylkzgrwpbmiuxirtfbdf3fuzxbibljfi", "collectionID": int64(1), "delta": nil, "docID": "bae-52b9170d-b77a-5887-b877-cbdbb99b009f", @@ -350,12 +350,12 @@ func TestQuery_WithAllCommitFieldsWithUpdate_NoError(t *testing.T) { "height": int64(1), "links": []map[string]any{ { - "cid": "bafybeigdvbqfwrm6dxfnfv4srbue5agzpyzoifl77ix6df7k5pjhat3fwu", - "name": "Age", + "cid": "bafyreibhdfmodhqycxtw33ffdceh2wlxqlwcwbyowvs2lrlvimph7ekg2u", + "name": "Name", }, { - "cid": "bafybeicurnibuf3b6krgqm3sh2ohmvxiodvawagx2evod573z67xf54zxu", - "name": "Name", + "cid": "bafyreigrxupxvzvjfx6wblmpc6fgekapr7nxlmokvi4gmz6ojmzmbrnapa", + "name": "Age", }, }, "schemaVersionId": "bafkreigqmcqzkbg3elpe24vfza4rjle2r6cxu7ihzvg56aov57crhaebry", From 53bdfdfdb434aee3979d826dcc5b0161cf068ac4 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 21 May 2024 16:23:32 -0400 Subject: [PATCH 10/78] bot: Update dependencies (bulk dependabot PRs) 05-20-2024 (#2631) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ✅ This PR was created by combining the following PRs: #2630 bot: Bump github.com/multiformats/go-multiaddr from 0.12.3 to 0.12.4 #2629 bot: Bump google.golang.org/grpc from 1.63.2 to 1.64.0 #2628 bot: Bump swagger-ui-react from 5.17.8 to 5.17.10 in /playground #2627 bot: Bump @typescript-eslint/eslint-plugin from 7.8.0 to 7.9.0 in /playground ⚠️ The following PRs were resolved manually due to merge conflicts: #2626 bot: Bump @typescript-eslint/parser from 7.8.0 to 7.9.0 in /playground --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Shahzad Lone --- go.mod | 8 +- go.sum | 16 +- net/dialer.go | 19 +- playground/package-lock.json | 796 ++++++++++++++++++++++------------- playground/package.json | 6 +- 5 files changed, 539 insertions(+), 306 deletions(-) diff --git a/go.mod b/go.mod index caab40a658..1d8a5a771c 100644 --- a/go.mod +++ b/go.mod @@ -30,7 +30,7 @@ require ( github.com/libp2p/go-libp2p-kad-dht v0.25.2 github.com/libp2p/go-libp2p-pubsub v0.10.1 github.com/libp2p/go-libp2p-record v0.2.0 - github.com/multiformats/go-multiaddr v0.12.3 + github.com/multiformats/go-multiaddr v0.12.4 github.com/multiformats/go-multibase v0.2.0 github.com/multiformats/go-multicodec v0.9.0 github.com/multiformats/go-multihash v0.2.3 @@ -53,7 +53,7 @@ require ( go.uber.org/zap v1.27.0 golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 golang.org/x/term v0.20.0 - google.golang.org/grpc v1.63.2 + google.golang.org/grpc v1.64.0 google.golang.org/protobuf v1.33.0 ) @@ -309,8 +309,8 @@ require ( golang.org/x/tools v0.18.0 // indirect gonum.org/v1/gonum v0.14.0 // indirect google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.5.1 // indirect diff --git a/go.sum b/go.sum index 190875055a..e91de0abcb 100644 --- a/go.sum +++ b/go.sum @@ -833,8 +833,8 @@ github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9 github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.12.3 h1:hVBXvPRcKG0w80VinQ23P5t7czWgg65BmIvQKjDydU8= -github.com/multiformats/go-multiaddr v0.12.3/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII= +github.com/multiformats/go-multiaddr v0.12.4 h1:rrKqpY9h+n80EwhhC/kkcunCZZ7URIF8yN1WEUt2Hvc= +github.com/multiformats/go-multiaddr v0.12.4/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII= github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= @@ -1499,10 +1499,10 @@ google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= -google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= -google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= +google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4= +google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -1523,8 +1523,8 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= -google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/net/dialer.go b/net/dialer.go index 2e8d8b361e..0202da8d9d 100644 --- a/net/dialer.go +++ b/net/dialer.go @@ -15,7 +15,6 @@ package net import ( "context" gonet "net" - "time" gostream "github.com/libp2p/go-libp2p-gostream" libpeer "github.com/libp2p/go-libp2p/core/peer" @@ -27,11 +26,6 @@ import ( pb "github.com/sourcenetwork/defradb/net/pb" ) -var ( - // DialTimeout is the max time duration to wait when dialing a peer. - DialTimeout = time.Second * 10 -) - // dial attempts to open a gRPC connection over libp2p to a peer. func (s *server) dial(peerID libpeer.ID) (pb.ServiceClient, error) { s.mu.Lock() @@ -46,9 +40,16 @@ func (s *server) dial(peerID libpeer.ID) (pb.ServiceClient, error) { return pb.NewServiceClient(conn), nil } } - ctx, cancel := context.WithTimeout(context.Background(), DialTimeout) - defer cancel() - conn, err := grpc.DialContext(ctx, peerID.String(), s.opts...) + // We need the "passthrough:" in the beginning of the target, + // otherwise [grpc.NewClient] will assume (the default) "dns" target. + // More information here: + // - https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md#dialing-in-grpc + // - https://github.com/grpc/grpc/blob/master/doc/naming.md + // - https://github.com/grpc/grpc-go/issues/1786 + conn, err := grpc.NewClient( + "passthrough:"+peerID.String(), + s.opts..., + ) if err != nil { return nil, err } diff --git a/playground/package-lock.json b/playground/package-lock.json index f77bb3002d..ed3a9ac500 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -12,14 +12,14 @@ "graphql": "^16.8.1", "react": "^18.3.1", "react-dom": "^18.3.1", - "swagger-ui-react": "^5.17.8" + "swagger-ui-react": "^5.17.10" }, "devDependencies": { "@types/react": "^18.3.2", "@types/react-dom": "^18.3.0", "@types/swagger-ui-react": "^4.18.3", - "@typescript-eslint/eslint-plugin": "^7.7.1", - "@typescript-eslint/parser": "^7.7.1", + "@typescript-eslint/eslint-plugin": "^7.9.0", + "@typescript-eslint/parser": "^7.9.0", "@vitejs/plugin-react-swc": "^3.6.0", "eslint": "^8.57.0", "eslint-plugin-react-hooks": "^4.6.2", @@ -1654,264 +1654,264 @@ ] }, "node_modules/@swagger-api/apidom-ast": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ast/-/apidom-ast-0.99.2.tgz", - "integrity": "sha512-poNlXWAU2XBl192+lo5sC6loB3qGvwK30V1pta6Hs200KeTayVsMMRL4R6wDDYEtsbv7M3vQaFKcRGbYUk/SgA==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ast/-/apidom-ast-1.0.0-alpha.1.tgz", + "integrity": "sha512-yYkW8OmNbZ1S1U7NA+YiALNMef/4BcJlrZEBZ8Iyqh/Rmty66qFf9/ZIS6RJ5a5OPQdB9Xn7V7WxfYdkrhOyQQ==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-error": "^0.99.0", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-error": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0", "unraw": "^3.0.0" } }, "node_modules/@swagger-api/apidom-core": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-core/-/apidom-core-0.99.2.tgz", - "integrity": "sha512-deudG9eCxqgPnZyIcZzpmDxF0cja0hdPFS2hB0Op6aB4TKc9mOP1+1iEIDI3Tlx/nzgIayyAl1bblyhK3yH5fQ==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-core/-/apidom-core-1.0.0-alpha.1.tgz", + "integrity": "sha512-zPHqGEcdRvD/xfRlJi367GSZ9VXFv7hoh+Ohado5JU/sA8DtVZEiQ+Vfusk3WBIpvvSVezh5Hxyl6P1bTsCLKw==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-ast": "^0.99.2", - "@swagger-api/apidom-error": "^0.99.0", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-ast": "^1.0.0-alpha.1", + "@swagger-api/apidom-error": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "minim": "~0.23.8", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0", "short-unique-id": "^5.0.2", - "stampit": "^4.3.2" + "ts-mixer": "^6.0.3" } }, "node_modules/@swagger-api/apidom-error": { - "version": "0.99.0", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-error/-/apidom-error-0.99.0.tgz", - "integrity": "sha512-ZdFdn+GeIo23X2GKFrfH4Y5KY8yTzVF1l/Mqjs8+nD30LTbYg6f3ITHn429dk8fDT3NT69fG+gGm60FAFaKkeQ==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-error/-/apidom-error-1.0.0-alpha.1.tgz", + "integrity": "sha512-AyaQQjpjBHPMQeVT1n5R92NRNEbTbbUGZYf1nEzPk9KEQm2y9K6HBbxg3htSrI3sgUj8LzxQocx8umEkDmj4FA==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7" } }, "node_modules/@swagger-api/apidom-json-pointer": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-json-pointer/-/apidom-json-pointer-0.99.2.tgz", - "integrity": "sha512-bZENmE3H2si1yP38VLUAdhoMWNxkh98+/dCOESaw3R5zXHG04di3ShbYsCG0StkigF+eCfCdaj6XoikQOGSkiA==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-json-pointer/-/apidom-json-pointer-1.0.0-alpha.1.tgz", + "integrity": "sha512-Ev8dVTWUCnlS/yOePe4PLz9NdVfyNQB2QGlvtv0zys1AOzoHvxI/xaJCnbroHmHrBkvkyDXwccY2h/LzkMBoVQ==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.2", - "@swagger-api/apidom-error": "^0.99.0", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-error": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-ns-api-design-systems": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-api-design-systems/-/apidom-ns-api-design-systems-0.99.2.tgz", - "integrity": "sha512-854ioZ/FB5DNiJcMinD9/a6dj6h/poOsKcb4POhPTzMSM0fHLIQUp//Ufhx7qL6qsepwtLapkgZ3/hAYN7lnBg==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-api-design-systems/-/apidom-ns-api-design-systems-1.0.0-alpha.1.tgz", + "integrity": "sha512-bG16p1dY9WlNfSv4K5IUxILnl7GDiwp6Uoik8QGNpTbkSNW1Xky1DWyehmNUOG/P4A62E2aWuWO60WkJYHscSw==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.2", - "@swagger-api/apidom-error": "^0.99.0", - "@swagger-api/apidom-ns-openapi-3-1": "^0.99.2", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-error": "^1.0.0-alpha.1", + "@swagger-api/apidom-ns-openapi-3-1": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0", "ts-mixer": "^6.0.3" } }, "node_modules/@swagger-api/apidom-ns-asyncapi-2": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-asyncapi-2/-/apidom-ns-asyncapi-2-0.99.2.tgz", - "integrity": "sha512-HF38kCszKYQqhQ6VMEMqd5r7gPGBRpHwPcoYaRJSDeOST/qLLG78xpoCJKQEyL3PQprea0gXKz1LG1uslDHgtQ==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-asyncapi-2/-/apidom-ns-asyncapi-2-1.0.0-alpha.1.tgz", + "integrity": "sha512-oKp4jY24dKeKY+rVQ76q84zmlcKcBtW+sHT3qx3AC0XZlSQRhrsv2x5/9r/MQoov7LLuGH8T6kI+HPMNPCuzDg==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.2", - "@swagger-api/apidom-ns-json-schema-draft-7": "^0.99.2", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-ns-json-schema-draft-7": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0", "ts-mixer": "^6.0.3" } }, "node_modules/@swagger-api/apidom-ns-json-schema-draft-4": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-4/-/apidom-ns-json-schema-draft-4-0.99.2.tgz", - "integrity": "sha512-vgCRaqDLI/SmTECZeKO47RGFFx6MCpOcbSm60sV0/ZJxeK+TgkNjIRJTyuRQNts44K863CWgY+bwzzn1zhNqUg==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-4/-/apidom-ns-json-schema-draft-4-1.0.0-alpha.1.tgz", + "integrity": "sha512-gKmmTnmf4DGSfI6543Ajcqzf+epVW8ufxLkIMiSC1gUES2N9ncIyZ7VF5WKx3duWYokQ0abSnsIlCBDRYjFEWQ==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-ast": "^0.99.2", - "@swagger-api/apidom-core": "^0.99.2", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-ast": "^1.0.0-alpha.1", + "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0", "ts-mixer": "^6.0.4" } }, "node_modules/@swagger-api/apidom-ns-json-schema-draft-6": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-6/-/apidom-ns-json-schema-draft-6-0.99.2.tgz", - "integrity": "sha512-ayKGsd65a6p/k4s5L2el+vMoMi8kc/bLXVszWszFDET1eZNvhKwEMLylGzKMfnwAFgpj+kJOKn4MZsD6PK6U/A==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-6/-/apidom-ns-json-schema-draft-6-1.0.0-alpha.1.tgz", + "integrity": "sha512-4QSFBuSJQokozbyvOPrcwV8fL/abBcY+QYaF7d5Ft87M/+9HtUKyfon6WSLbhAFpaP8ZLhwvJl1kHFXfA/HenA==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.2", - "@swagger-api/apidom-error": "^0.99.0", - "@swagger-api/apidom-ns-json-schema-draft-4": "^0.99.2", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-error": "^1.0.0-alpha.1", + "@swagger-api/apidom-ns-json-schema-draft-4": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0", "ts-mixer": "^6.0.4" } }, "node_modules/@swagger-api/apidom-ns-json-schema-draft-7": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-7/-/apidom-ns-json-schema-draft-7-0.99.2.tgz", - "integrity": "sha512-Rn2YeQKxj6hSijQAzGRRxMYDRIedqHjE69z9xigVbvm+iDXxLJIwasuzFa7BIMRDZF5eAJkBPHXTiU9cXVsl6w==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-7/-/apidom-ns-json-schema-draft-7-1.0.0-alpha.1.tgz", + "integrity": "sha512-/s9N8a+ronGXsD7uLfvOijnO/qqO5GWM0RYbAol7p8noYWN5ELg8iJScwn7CqjObRZyjMxrRGBSDAK0SquJnMQ==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.2", - "@swagger-api/apidom-error": "^0.99.0", - "@swagger-api/apidom-ns-json-schema-draft-6": "^0.99.2", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-error": "^1.0.0-alpha.1", + "@swagger-api/apidom-ns-json-schema-draft-6": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0", "ts-mixer": "^6.0.4" } }, "node_modules/@swagger-api/apidom-ns-openapi-2": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-2/-/apidom-ns-openapi-2-0.99.2.tgz", - "integrity": "sha512-4YlBvMkxSJIWrOQmsHiVuQ2VkbcWgUnOm7uiRq+8d88ur9mKI5XbP5iUvxCASuONmCqlaSU2+qoM1qesy73XPw==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-2/-/apidom-ns-openapi-2-1.0.0-alpha.1.tgz", + "integrity": "sha512-dUUFPf2LftBa/FSeRo2Me6HAJVziv0qHq5jX0jqFPTaTiIXaNHkO77W2a3308J5kdsejv7S/N8rbujdXFp+MoQ==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.2", - "@swagger-api/apidom-error": "^0.99.0", - "@swagger-api/apidom-ns-json-schema-draft-4": "^0.99.2", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-error": "^1.0.0-alpha.1", + "@swagger-api/apidom-ns-json-schema-draft-4": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0", "ts-mixer": "^6.0.3" } }, "node_modules/@swagger-api/apidom-ns-openapi-3-0": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-3-0/-/apidom-ns-openapi-3-0-0.99.2.tgz", - "integrity": "sha512-fcT597Ty3kqTkoBr1jeZ3Lfbu0a+CKd1l2ojY6RBF/5+dWNux+CRZ9qosax2XZbN+nJhSdvGLLvGvuKaV3Ybug==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-3-0/-/apidom-ns-openapi-3-0-1.0.0-alpha.1.tgz", + "integrity": "sha512-Be1XDoy6YSyZWCuNDf5y6iYtOt40A/KWI57TEy+0Eao/SLbaupzTJmErtk96bf4/DhoRvlKiAgQkKt+9bqDZ9w==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.2", - "@swagger-api/apidom-error": "^0.99.0", - "@swagger-api/apidom-ns-json-schema-draft-4": "^0.99.2", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-error": "^1.0.0-alpha.1", + "@swagger-api/apidom-ns-json-schema-draft-4": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0", "ts-mixer": "^6.0.3" } }, "node_modules/@swagger-api/apidom-ns-openapi-3-1": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-3-1/-/apidom-ns-openapi-3-1-0.99.2.tgz", - "integrity": "sha512-ubO8vi1dYpIV2a3IKhTkBCf125udoCeUZIc9wrhOFwwHHIKeInGR5L6yxlNhOQm0/doYCth77vEqcuTBpxaIrw==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-3-1/-/apidom-ns-openapi-3-1-1.0.0-alpha.1.tgz", + "integrity": "sha512-u87HFtYCtrqBthRp3y2a5YdCmiVTD7v8hv2hn6lGcUIjBB/1anqBejVbcWZa3tmknuUG+yIHROapq8HxKPkdcw==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-ast": "^0.99.2", - "@swagger-api/apidom-core": "^0.99.2", - "@swagger-api/apidom-ns-openapi-3-0": "^0.99.2", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-ast": "^1.0.0-alpha.1", + "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-ns-openapi-3-0": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0", "ts-mixer": "^6.0.3" } }, "node_modules/@swagger-api/apidom-ns-workflows-1": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-workflows-1/-/apidom-ns-workflows-1-0.99.2.tgz", - "integrity": "sha512-lm8G7cbCRXukN4UOb/bPszUiSbvN1ymvwQ2PEkyZN+DzJvYfgRuAxXt7xd2EDKJcxeH4igpAnkKoIoBoSOHg+w==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-workflows-1/-/apidom-ns-workflows-1-1.0.0-alpha.1.tgz", + "integrity": "sha512-zMSXjWKtmHk+zl/tS3m/PCDJGh6+Gr9revPtxA0OAYvhmKTVhLNX4H8WtP4J+EGAUyjZKB7gussJUodqNR25uQ==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.2", - "@swagger-api/apidom-ns-openapi-3-1": "^0.99.2", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-ns-openapi-3-1": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0", "ts-mixer": "^6.0.3" } }, "node_modules/@swagger-api/apidom-parser-adapter-api-design-systems-json": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-api-design-systems-json/-/apidom-parser-adapter-api-design-systems-json-0.99.2.tgz", - "integrity": "sha512-7WPbiUJEWggVmxsssFfW/8JGk8Yu4C9ELneh805kMsgl/DOm6hcHxqT5gXXSwamH0ZQlTmSnHl2OZSlG+U5KKQ==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-api-design-systems-json/-/apidom-parser-adapter-api-design-systems-json-1.0.0-alpha.1.tgz", + "integrity": "sha512-JbLcDtB9o/fblyKfYKJ+F2jVdcTPAvdbv1094qk9GCPl1JnU7A9SpkZKDdIF1WyUnJmDATUnSsDEib8gRfeGZw==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.2", - "@swagger-api/apidom-ns-api-design-systems": "^0.99.2", - "@swagger-api/apidom-parser-adapter-json": "^0.99.2", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-ns-api-design-systems": "^1.0.0-alpha.1", + "@swagger-api/apidom-parser-adapter-json": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-api-design-systems-yaml": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-api-design-systems-yaml/-/apidom-parser-adapter-api-design-systems-yaml-0.99.2.tgz", - "integrity": "sha512-ezOA1fjBAQPQ5X0DGYnuFyZMBSBCsaT6k9KDRr7B37Do9yj8YKa/lTlg5usXOrcLm4VgcyJGTKhAJi9kfzCKcA==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-api-design-systems-yaml/-/apidom-parser-adapter-api-design-systems-yaml-1.0.0-alpha.1.tgz", + "integrity": "sha512-T24Dq4qa/cngkfxUZ6eWULHjEscLutUTO6ltxnKDvyBlxkKURYw8FGBWRn4TSAmk9iK+UVrVg1NoFudtpfN6cQ==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.2", - "@swagger-api/apidom-ns-api-design-systems": "^0.99.2", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.99.2", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-ns-api-design-systems": "^1.0.0-alpha.1", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-asyncapi-json-2": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-asyncapi-json-2/-/apidom-parser-adapter-asyncapi-json-2-0.99.2.tgz", - "integrity": "sha512-b1ncaIc4dD0FGqty3iRCDUA/uHdd7nH271C06blQ+S9Id4D/xXxzd84z8LeNIJNLhCcnueuMKgUkGzvXP+raAA==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-asyncapi-json-2/-/apidom-parser-adapter-asyncapi-json-2-1.0.0-alpha.1.tgz", + "integrity": "sha512-gkpDw0+pf3B7MXxjk1nIo7WOKm/t9UvG4MGxDr4fB797v9Rt9fPDv2sMgsS/PFPqsa2zRhTrp2CPOKHCiOzW+A==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.2", - "@swagger-api/apidom-ns-asyncapi-2": "^0.99.2", - "@swagger-api/apidom-parser-adapter-json": "^0.99.2", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-ns-asyncapi-2": "^1.0.0-alpha.1", + "@swagger-api/apidom-parser-adapter-json": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-asyncapi-yaml-2": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-asyncapi-yaml-2/-/apidom-parser-adapter-asyncapi-yaml-2-0.99.2.tgz", - "integrity": "sha512-NuwuwdORyZPhEpxwyEgslyGfVnwIuyDvF5TDT0cLCMOIFDqbE/n77c4FAh/nQUARDEXRthiDb5pdMo/+rOxjFg==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-asyncapi-yaml-2/-/apidom-parser-adapter-asyncapi-yaml-2-1.0.0-alpha.1.tgz", + "integrity": "sha512-1N2gF6qympDdIXaoCvT+B9P9yghYuGOyAWF91sN5kQLd6VtBlZi+jTnCPLxAd+rtl3h5WIIQyNed6qC/C884Mg==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.2", - "@swagger-api/apidom-ns-asyncapi-2": "^0.99.2", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.99.2", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-ns-asyncapi-2": "^1.0.0-alpha.1", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-json": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-json/-/apidom-parser-adapter-json-0.99.2.tgz", - "integrity": "sha512-wy2WF71bLX1wEJkgmPRCEnXicV155KCelPQhCtzAGGo/B3+OuhknovBWXZNStvoJqZ/2A4a5pvYrgHoVoIKchg==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-json/-/apidom-parser-adapter-json-1.0.0-alpha.1.tgz", + "integrity": "sha512-xNiA3OKGFdf8cHXsVfM2WdOhGDj838XjhXKjKEAbPK+LVe83/QUNRSSL0nxnr0Z6VNJG1J/5y/65Horf3fapow==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-ast": "^0.99.2", - "@swagger-api/apidom-core": "^0.99.2", - "@swagger-api/apidom-error": "^0.99.0", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-ast": "^1.0.0-alpha.1", + "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-error": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0", "tree-sitter": "=0.20.4", @@ -1920,136 +1920,136 @@ } }, "node_modules/@swagger-api/apidom-parser-adapter-openapi-json-2": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-2/-/apidom-parser-adapter-openapi-json-2-0.99.2.tgz", - "integrity": "sha512-z+ATszNWaO2JlixM9h4QpTAW2fE5nPCY4IDcScuWbch8gtKBmv61+53nahYb7tc3W/X0mMqhc1LyTCy5QC2L/w==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-2/-/apidom-parser-adapter-openapi-json-2-1.0.0-alpha.1.tgz", + "integrity": "sha512-mbv+qcCRV6JpVOW2GEt0JjyeRD+lgbXmF5pcalDyr/+1Iuol8v3XLbwLHyEjR7FxMXj/DSjjfytnSKX1C+PwYA==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.2", - "@swagger-api/apidom-ns-openapi-2": "^0.99.2", - "@swagger-api/apidom-parser-adapter-json": "^0.99.2", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-ns-openapi-2": "^1.0.0-alpha.1", + "@swagger-api/apidom-parser-adapter-json": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-openapi-json-3-0": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-3-0/-/apidom-parser-adapter-openapi-json-3-0-0.99.2.tgz", - "integrity": "sha512-78PFDsF67tWDjPCGAD9cNHage8p5Vs2+zili1AF2zch3JkJA/KxBt+5va4A8w1fYaUaXi8LnMkM8VvEIAsNaOw==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-3-0/-/apidom-parser-adapter-openapi-json-3-0-1.0.0-alpha.1.tgz", + "integrity": "sha512-aJIANKczHzVKUNihytvfVJFpUGiATWsiKtMgLxShx+i3JeN/DfkRGOBM4346mldjcEcUBA2zS8UDGvDGRz6oVQ==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.2", - "@swagger-api/apidom-ns-openapi-3-0": "^0.99.2", - "@swagger-api/apidom-parser-adapter-json": "^0.99.2", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-ns-openapi-3-0": "^1.0.0-alpha.1", + "@swagger-api/apidom-parser-adapter-json": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-openapi-json-3-1": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-3-1/-/apidom-parser-adapter-openapi-json-3-1-0.99.2.tgz", - "integrity": "sha512-WQmm14C0EH0dcMzvgrGPeLkWKXyFwyunK9rrRt7xRLn8sL1Em0dC31hiVdgypo3DLrz9YW3PStpSQjEedJaWUQ==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-3-1/-/apidom-parser-adapter-openapi-json-3-1-1.0.0-alpha.1.tgz", + "integrity": "sha512-QHcx9KltTmS0qEiLP2in391sQJDm7OYT9IFRH/Iy5mde2F7WNcQqY1D8o/YklDKvnkquHRIytDNx/IEjuYwwHQ==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.2", - "@swagger-api/apidom-ns-openapi-3-1": "^0.99.2", - "@swagger-api/apidom-parser-adapter-json": "^0.99.2", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-ns-openapi-3-1": "^1.0.0-alpha.1", + "@swagger-api/apidom-parser-adapter-json": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-openapi-yaml-2": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-2/-/apidom-parser-adapter-openapi-yaml-2-0.99.2.tgz", - "integrity": "sha512-rEoE54T8KKRxtdxXgvaYba+GX8853mwcw5nzdrrvOy2tNKqsJANPeJcrQmjVYqJX7SU0HuZPK3zBvyqMyKoNsg==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-2/-/apidom-parser-adapter-openapi-yaml-2-1.0.0-alpha.1.tgz", + "integrity": "sha512-gGlz/DV+uENk2KI3YRUbkw93Co/K47vMUIW+jFJ9BuiHJZ34LnylMGtxR/J+4o+4L1WQa3o/czg7NrYG5Xe9pQ==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.2", - "@swagger-api/apidom-ns-openapi-2": "^0.99.2", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.99.2", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-ns-openapi-2": "^1.0.0-alpha.1", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-openapi-yaml-3-0": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-3-0/-/apidom-parser-adapter-openapi-yaml-3-0-0.99.2.tgz", - "integrity": "sha512-l7ve45cfAj+imE8flypjdo49zpfp0m29stpOO/q2fCD5/46wT3Z4Ve3aKhil8/TRFEX26VOKoYVNjpeUWzUMaw==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-3-0/-/apidom-parser-adapter-openapi-yaml-3-0-1.0.0-alpha.1.tgz", + "integrity": "sha512-oOo6CybNEsTwxMsSUE9xjCoyw9H0MMMPGFxAatFbwxDlqyw32CvN3ydXXaQmu4TauhNDmplJLHtRlceKDzl7OQ==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.2", - "@swagger-api/apidom-ns-openapi-3-0": "^0.99.2", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.99.2", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-ns-openapi-3-0": "^1.0.0-alpha.1", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-openapi-yaml-3-1": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-3-1/-/apidom-parser-adapter-openapi-yaml-3-1-0.99.2.tgz", - "integrity": "sha512-1ab06o/M6MAJ0Js4C1bifpj/R0T0mw26Qk4dR7qKzel9dDuEkIRMQF7JHnf2pojZE+aR59Eb4iAMKmxzokHZdA==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-3-1/-/apidom-parser-adapter-openapi-yaml-3-1-1.0.0-alpha.1.tgz", + "integrity": "sha512-ePkENiU3HlYNOULgghjQr47UeNo8hXfI+mH7Iw25XGC8VHwt5X4PpXO63kcNu1pLsdscpLPyzVCTz9J3CxMmxg==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.2", - "@swagger-api/apidom-ns-openapi-3-1": "^0.99.2", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.99.2", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-ns-openapi-3-1": "^1.0.0-alpha.1", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-workflows-json-1": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-workflows-json-1/-/apidom-parser-adapter-workflows-json-1-0.99.2.tgz", - "integrity": "sha512-VsFVmwTX/OfsXyBmIEp5Y+adqBF4Cj/cM/55KPM3mIEmKbc+PK3M08TIotMk1FdCiTafe+I28OZL+WMVujNm1A==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-workflows-json-1/-/apidom-parser-adapter-workflows-json-1-1.0.0-alpha.1.tgz", + "integrity": "sha512-kM2Vmu13eEi+7nZLvLuNF3frZV8nSibD790sqDbtSQOym+cxGB/+iQ9PMeEIddPka7l4to4vDM5HaLW6EKGKAQ==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.2", - "@swagger-api/apidom-ns-workflows-1": "^0.99.2", - "@swagger-api/apidom-parser-adapter-json": "^0.99.2", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-ns-workflows-1": "^1.0.0-alpha.1", + "@swagger-api/apidom-parser-adapter-json": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-workflows-yaml-1": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-workflows-yaml-1/-/apidom-parser-adapter-workflows-yaml-1-0.99.2.tgz", - "integrity": "sha512-yK+48YcllFc8mY711ZJ7uTfPVZmJdujIHbvGLOMxMODmETkZlEjfoTAwNTWvutcuA6cxK70tKUD8vz5572ALQA==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-workflows-yaml-1/-/apidom-parser-adapter-workflows-yaml-1-1.0.0-alpha.1.tgz", + "integrity": "sha512-3arbAoEiEQzz+YaDP6KX768GA0d3CbF8TxtpCnW2U0T2n8qou/kqSYUW2a58EoFDUhEsYyrB6+adwP9H5TSfdA==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.2", - "@swagger-api/apidom-ns-workflows-1": "^0.99.2", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.99.2", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-ns-workflows-1": "^1.0.0-alpha.1", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-yaml-1-2": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-yaml-1-2/-/apidom-parser-adapter-yaml-1-2-0.99.2.tgz", - "integrity": "sha512-eU6Rd58WzzcOYOajwp9UCURhXVO8SUCrau14W6BuF1DbJCr85FmOigy4yu2b9UWsK44ZPzH8KeyhSYwTkqkgLA==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-yaml-1-2/-/apidom-parser-adapter-yaml-1-2-1.0.0-alpha.1.tgz", + "integrity": "sha512-sDlW8XV4Q/MSJOr9aw9UwCoSAEyy4FFwi3IGqyLlgXWrh9ViaadwhCFLWxtn/stGyubXo29li+5taPuE3ETrqw==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-ast": "^0.99.2", - "@swagger-api/apidom-core": "^0.99.2", - "@swagger-api/apidom-error": "^0.99.0", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-ast": "^1.0.0-alpha.1", + "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-error": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0", "tree-sitter": "=0.20.4", @@ -2058,42 +2058,41 @@ } }, "node_modules/@swagger-api/apidom-reference": { - "version": "0.99.2", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-reference/-/apidom-reference-0.99.2.tgz", - "integrity": "sha512-QwAnCCEUbicPAVPWYOOpSI8rcj2e7TTybn1chGfdogV+NMLprGXBk/A86hO9CaSLMXkCA2rERUznSNSZWC996g==", + "version": "1.0.0-alpha.1", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-reference/-/apidom-reference-1.0.0-alpha.1.tgz", + "integrity": "sha512-iK8dyU3YsR23UuAHOlCB9OD9vKKsokyx0QGjYZpUP3EHu2gkTnn7m/NDuMpIC8MRHYlQNt42VKWZjQyC3z1nbw==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^0.99.2", - "@types/ramda": "~0.29.6", + "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@types/ramda": "~0.30.0", "axios": "^1.4.0", "minimatch": "^7.4.3", "process": "^0.11.10", "ramda": "~0.30.0", - "ramda-adjunct": "^5.0.0", - "stampit": "^4.3.2" + "ramda-adjunct": "^5.0.0" }, "optionalDependencies": { - "@swagger-api/apidom-error": "^0.99.0", - "@swagger-api/apidom-json-pointer": "^0.99.2", - "@swagger-api/apidom-ns-asyncapi-2": "^0.99.2", - "@swagger-api/apidom-ns-openapi-2": "^0.99.2", - "@swagger-api/apidom-ns-openapi-3-0": "^0.99.2", - "@swagger-api/apidom-ns-openapi-3-1": "^0.99.2", - "@swagger-api/apidom-ns-workflows-1": "^0.99.2", - "@swagger-api/apidom-parser-adapter-api-design-systems-json": "^0.99.2", - "@swagger-api/apidom-parser-adapter-api-design-systems-yaml": "^0.99.2", - "@swagger-api/apidom-parser-adapter-asyncapi-json-2": "^0.99.2", - "@swagger-api/apidom-parser-adapter-asyncapi-yaml-2": "^0.99.2", - "@swagger-api/apidom-parser-adapter-json": "^0.99.2", - "@swagger-api/apidom-parser-adapter-openapi-json-2": "^0.99.2", - "@swagger-api/apidom-parser-adapter-openapi-json-3-0": "^0.99.2", - "@swagger-api/apidom-parser-adapter-openapi-json-3-1": "^0.99.2", - "@swagger-api/apidom-parser-adapter-openapi-yaml-2": "^0.99.2", - "@swagger-api/apidom-parser-adapter-openapi-yaml-3-0": "^0.99.2", - "@swagger-api/apidom-parser-adapter-openapi-yaml-3-1": "^0.99.2", - "@swagger-api/apidom-parser-adapter-workflows-json-1": "^0.99.2", - "@swagger-api/apidom-parser-adapter-workflows-yaml-1": "^0.99.2", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.99.2" + "@swagger-api/apidom-error": "^1.0.0-alpha.1", + "@swagger-api/apidom-json-pointer": "^1.0.0-alpha.0", + "@swagger-api/apidom-ns-asyncapi-2": "^1.0.0-alpha.0", + "@swagger-api/apidom-ns-openapi-2": "^1.0.0-alpha.0", + "@swagger-api/apidom-ns-openapi-3-0": "^1.0.0-alpha.0", + "@swagger-api/apidom-ns-openapi-3-1": "^1.0.0-alpha.0", + "@swagger-api/apidom-ns-workflows-1": "^1.0.0-alpha.0", + "@swagger-api/apidom-parser-adapter-api-design-systems-json": "^1.0.0-alpha.0", + "@swagger-api/apidom-parser-adapter-api-design-systems-yaml": "^1.0.0-alpha.0", + "@swagger-api/apidom-parser-adapter-asyncapi-json-2": "^1.0.0-alpha.0", + "@swagger-api/apidom-parser-adapter-asyncapi-yaml-2": "^1.0.0-alpha.0", + "@swagger-api/apidom-parser-adapter-json": "^1.0.0-alpha.0", + "@swagger-api/apidom-parser-adapter-openapi-json-2": "^1.0.0-alpha.0", + "@swagger-api/apidom-parser-adapter-openapi-json-3-0": "^1.0.0-alpha.0", + "@swagger-api/apidom-parser-adapter-openapi-json-3-1": "^1.0.0-alpha.0", + "@swagger-api/apidom-parser-adapter-openapi-yaml-2": "^1.0.0-alpha.0", + "@swagger-api/apidom-parser-adapter-openapi-yaml-3-0": "^1.0.0-alpha.0", + "@swagger-api/apidom-parser-adapter-openapi-yaml-3-1": "^1.0.0-alpha.0", + "@swagger-api/apidom-parser-adapter-workflows-json-1": "^1.0.0-alpha.0", + "@swagger-api/apidom-parser-adapter-workflows-yaml-1": "^1.0.0-alpha.0", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^1.0.0-alpha.0" } }, "node_modules/@swagger-api/apidom-reference/node_modules/minimatch": { @@ -2369,12 +2368,6 @@ "@types/unist": "^2" } }, - "node_modules/@types/json-schema": { - "version": "7.0.15", - "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", - "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", - "dev": true - }, "node_modules/@types/prop-types": { "version": "15.7.12", "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.12.tgz", @@ -2382,11 +2375,11 @@ "devOptional": true }, "node_modules/@types/ramda": { - "version": "0.29.12", - "resolved": "https://registry.npmjs.org/@types/ramda/-/ramda-0.29.12.tgz", - "integrity": "sha512-sgIEjpJhdQPB52gDF4aphs9nl0xe54CR22DPdWqT8gQHjZYmVApgA0R3/CpMbl0Y8az2TEZrPNL2zy0EvjbkLA==", + "version": "0.30.0", + "resolved": "https://registry.npmjs.org/@types/ramda/-/ramda-0.30.0.tgz", + "integrity": "sha512-DQtfqUbSB18iM9NHbQ++kVUDuBWHMr6T2FpW1XTiksYRGjq4WnNPZLt712OEHEBJs7aMyJ68Mf2kGMOP1srVVw==", "dependencies": { - "types-ramda": "^0.29.10" + "types-ramda": "^0.30.0" } }, "node_modules/@types/react": { @@ -2408,12 +2401,6 @@ "@types/react": "*" } }, - "node_modules/@types/semver": { - "version": "7.5.8", - "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.8.tgz", - "integrity": "sha512-I8EUhyrgfLrcTkzV3TSsGyl1tSuPrEDzr0yd5m90UgNxQkyDXULk3b6MlQqTCpZpNtWe1K0hzclnZkTcLBe2UQ==", - "dev": true - }, "node_modules/@types/swagger-ui-react": { "version": "4.18.3", "resolved": "https://registry.npmjs.org/@types/swagger-ui-react/-/swagger-ui-react-4.18.3.tgz", @@ -2442,21 +2429,19 @@ "integrity": "sha512-EwmlvuaxPNej9+T4v5AuBPJa2x2UOJVdjCtDHgcDqitUeOtjnJKJ+apYjVcAoBEMjKW1VVFGZLUb5+qqa09XFA==" }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "7.8.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-7.8.0.tgz", - "integrity": "sha512-gFTT+ezJmkwutUPmB0skOj3GZJtlEGnlssems4AjkVweUPGj7jRwwqg0Hhg7++kPGJqKtTYx+R05Ftww372aIg==", + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-7.9.0.tgz", + "integrity": "sha512-6e+X0X3sFe/G/54aC3jt0txuMTURqLyekmEHViqyA2VnxhLMpvA6nqmcjIy+Cr9tLDHPssA74BP5Mx9HQIxBEA==", "dev": true, "dependencies": { "@eslint-community/regexpp": "^4.10.0", - "@typescript-eslint/scope-manager": "7.8.0", - "@typescript-eslint/type-utils": "7.8.0", - "@typescript-eslint/utils": "7.8.0", - "@typescript-eslint/visitor-keys": "7.8.0", - "debug": "^4.3.4", + "@typescript-eslint/scope-manager": "7.9.0", + "@typescript-eslint/type-utils": "7.9.0", + "@typescript-eslint/utils": "7.9.0", + "@typescript-eslint/visitor-keys": "7.9.0", "graphemer": "^1.4.0", "ignore": "^5.3.1", "natural-compare": "^1.4.0", - "semver": "^7.6.0", "ts-api-utils": "^1.3.0" }, "engines": { @@ -2476,16 +2461,63 @@ } } }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/scope-manager": { + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.9.0.tgz", + "integrity": "sha512-ZwPK4DeCDxr3GJltRz5iZejPFAAr4Wk3+2WIBaj1L5PYK5RgxExu/Y68FFVclN0y6GGwH8q+KgKRCvaTmFBbgQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.9.0", + "@typescript-eslint/visitor-keys": "7.9.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/types": { + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.9.0.tgz", + "integrity": "sha512-oZQD9HEWQanl9UfsbGVcZ2cGaR0YT5476xfWE0oE5kQa2sNK2frxOlkeacLOTh9po4AlUT5rtkGyYM5kew0z5w==", + "dev": true, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/visitor-keys": { + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.9.0.tgz", + "integrity": "sha512-iESPx2TNLDNGQLyjKhUvIKprlP49XNEK+MvIf9nIO7ZZaZdbnfWKHnXAgufpxqfA0YryH8XToi4+CjBgVnFTSQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.9.0", + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, "node_modules/@typescript-eslint/parser": { - "version": "7.8.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-7.8.0.tgz", - "integrity": "sha512-KgKQly1pv0l4ltcftP59uQZCi4HUYswCLbTqVZEJu7uLX8CTLyswqMLqLN+2QFz4jCptqWVV4SB7vdxcH2+0kQ==", + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-7.9.0.tgz", + "integrity": "sha512-qHMJfkL5qvgQB2aLvhUSXxbK7OLnDkwPzFalg458pxQgfxKDfT1ZDbHQM/I6mDIf/svlMkj21kzKuQ2ixJlatQ==", "dev": true, "dependencies": { - "@typescript-eslint/scope-manager": "7.8.0", - "@typescript-eslint/types": "7.8.0", - "@typescript-eslint/typescript-estree": "7.8.0", - "@typescript-eslint/visitor-keys": "7.8.0", + "@typescript-eslint/scope-manager": "7.9.0", + "@typescript-eslint/types": "7.9.0", + "@typescript-eslint/typescript-estree": "7.9.0", + "@typescript-eslint/visitor-keys": "7.9.0", "debug": "^4.3.4" }, "engines": { @@ -2504,6 +2536,81 @@ } } }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/scope-manager": { + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.9.0.tgz", + "integrity": "sha512-ZwPK4DeCDxr3GJltRz5iZejPFAAr4Wk3+2WIBaj1L5PYK5RgxExu/Y68FFVclN0y6GGwH8q+KgKRCvaTmFBbgQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.9.0", + "@typescript-eslint/visitor-keys": "7.9.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/types": { + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.9.0.tgz", + "integrity": "sha512-oZQD9HEWQanl9UfsbGVcZ2cGaR0YT5476xfWE0oE5kQa2sNK2frxOlkeacLOTh9po4AlUT5rtkGyYM5kew0z5w==", + "dev": true, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/typescript-estree": { + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.9.0.tgz", + "integrity": "sha512-zBCMCkrb2YjpKV3LA0ZJubtKCDxLttxfdGmwZvTqqWevUPN0FZvSI26FalGFFUZU/9YQK/A4xcQF9o/VVaCKAg==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.9.0", + "@typescript-eslint/visitor-keys": "7.9.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^1.3.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/visitor-keys": { + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.9.0.tgz", + "integrity": "sha512-iESPx2TNLDNGQLyjKhUvIKprlP49XNEK+MvIf9nIO7ZZaZdbnfWKHnXAgufpxqfA0YryH8XToi4+CjBgVnFTSQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.9.0", + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, "node_modules/@typescript-eslint/scope-manager": { "version": "7.8.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.8.0.tgz", @@ -2522,13 +2629,13 @@ } }, "node_modules/@typescript-eslint/type-utils": { - "version": "7.8.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-7.8.0.tgz", - "integrity": "sha512-H70R3AefQDQpz9mGv13Uhi121FNMh+WEaRqcXTX09YEDky21km4dV1ZXJIp8QjXc4ZaVkXVdohvWDzbnbHDS+A==", + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-7.9.0.tgz", + "integrity": "sha512-6Qy8dfut0PFrFRAZsGzuLoM4hre4gjzWJB6sUvdunCYZsYemTkzZNwF1rnGea326PHPT3zn5Lmg32M/xfJfByA==", "dev": true, "dependencies": { - "@typescript-eslint/typescript-estree": "7.8.0", - "@typescript-eslint/utils": "7.8.0", + "@typescript-eslint/typescript-estree": "7.9.0", + "@typescript-eslint/utils": "7.9.0", "debug": "^4.3.4", "ts-api-utils": "^1.3.0" }, @@ -2548,6 +2655,64 @@ } } }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/types": { + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.9.0.tgz", + "integrity": "sha512-oZQD9HEWQanl9UfsbGVcZ2cGaR0YT5476xfWE0oE5kQa2sNK2frxOlkeacLOTh9po4AlUT5rtkGyYM5kew0z5w==", + "dev": true, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/typescript-estree": { + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.9.0.tgz", + "integrity": "sha512-zBCMCkrb2YjpKV3LA0ZJubtKCDxLttxfdGmwZvTqqWevUPN0FZvSI26FalGFFUZU/9YQK/A4xcQF9o/VVaCKAg==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.9.0", + "@typescript-eslint/visitor-keys": "7.9.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^1.3.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/visitor-keys": { + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.9.0.tgz", + "integrity": "sha512-iESPx2TNLDNGQLyjKhUvIKprlP49XNEK+MvIf9nIO7ZZaZdbnfWKHnXAgufpxqfA0YryH8XToi4+CjBgVnFTSQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.9.0", + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, "node_modules/@typescript-eslint/types": { "version": "7.8.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.8.0.tgz", @@ -2590,18 +2755,15 @@ } }, "node_modules/@typescript-eslint/utils": { - "version": "7.8.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-7.8.0.tgz", - "integrity": "sha512-L0yFqOCflVqXxiZyXrDr80lnahQfSOfc9ELAAZ75sqicqp2i36kEZZGuUymHNFoYOqxRT05up760b4iGsl02nQ==", + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-7.9.0.tgz", + "integrity": "sha512-5KVRQCzZajmT4Ep+NEgjXCvjuypVvYHUW7RHlXzNPuak2oWpVoD1jf5xCP0dPAuNIchjC7uQyvbdaSTFaLqSdA==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.4.0", - "@types/json-schema": "^7.0.15", - "@types/semver": "^7.5.8", - "@typescript-eslint/scope-manager": "7.8.0", - "@typescript-eslint/types": "7.8.0", - "@typescript-eslint/typescript-estree": "7.8.0", - "semver": "^7.6.0" + "@typescript-eslint/scope-manager": "7.9.0", + "@typescript-eslint/types": "7.9.0", + "@typescript-eslint/typescript-estree": "7.9.0" }, "engines": { "node": "^18.18.0 || >=20.0.0" @@ -2614,6 +2776,81 @@ "eslint": "^8.56.0" } }, + "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/scope-manager": { + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.9.0.tgz", + "integrity": "sha512-ZwPK4DeCDxr3GJltRz5iZejPFAAr4Wk3+2WIBaj1L5PYK5RgxExu/Y68FFVclN0y6GGwH8q+KgKRCvaTmFBbgQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.9.0", + "@typescript-eslint/visitor-keys": "7.9.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/types": { + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.9.0.tgz", + "integrity": "sha512-oZQD9HEWQanl9UfsbGVcZ2cGaR0YT5476xfWE0oE5kQa2sNK2frxOlkeacLOTh9po4AlUT5rtkGyYM5kew0z5w==", + "dev": true, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/typescript-estree": { + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.9.0.tgz", + "integrity": "sha512-zBCMCkrb2YjpKV3LA0ZJubtKCDxLttxfdGmwZvTqqWevUPN0FZvSI26FalGFFUZU/9YQK/A4xcQF9o/VVaCKAg==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.9.0", + "@typescript-eslint/visitor-keys": "7.9.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^1.3.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/visitor-keys": { + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.9.0.tgz", + "integrity": "sha512-iESPx2TNLDNGQLyjKhUvIKprlP49XNEK+MvIf9nIO7ZZaZdbnfWKHnXAgufpxqfA0YryH8XToi4+CjBgVnFTSQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.9.0", + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, "node_modules/@typescript-eslint/visitor-keys": { "version": "7.8.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.8.0.tgz", @@ -2754,9 +2991,9 @@ } }, "node_modules/axios": { - "version": "1.6.8", - "resolved": "https://registry.npmjs.org/axios/-/axios-1.6.8.tgz", - "integrity": "sha512-v/ZHtJDU39mDpyBoFVkETcd/uNdxrWRrg3bKpOKzXFA6Bvqopts6ALSMU3y6ijYxbw2B+wPrIv46egTzJXCLGQ==", + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.7.0.tgz", + "integrity": "sha512-IiB0wQeKyPRdsFVhBgIo31FbzOyf2M6wYl7/NVutFwFBRMiAbjNiydJIHKeLmPugF4kJLfA1uWZ82Is2QzqqFA==", "dependencies": { "follow-redirects": "^1.15.6", "form-data": "^4.0.0", @@ -3024,9 +3261,9 @@ } }, "node_modules/core-js-pure": { - "version": "3.37.0", - "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.37.0.tgz", - "integrity": "sha512-d3BrpyFr5eD4KcbRvQ3FTUx/KWmaDesr7+a3+1+P46IUnNoEt+oiLijPINZMEon7w9oGkIINWxrBAU9DEciwFQ==", + "version": "3.37.1", + "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.37.1.tgz", + "integrity": "sha512-J/r5JTHSmzTxbiYYrzXg9w1VpqrYt+gexenBE9pugeyhwPZTAEJddyiReJWsLO6uNQ8xJZFbod6XC7KKwatCiA==", "hasInstallScript": true, "funding": { "type": "opencollective", @@ -5561,11 +5798,6 @@ "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==" }, - "node_modules/stampit": { - "version": "4.3.2", - "resolved": "https://registry.npmjs.org/stampit/-/stampit-4.3.2.tgz", - "integrity": "sha512-pE2org1+ZWQBnIxRPrBM2gVupkuDD0TTNIo1H6GdT/vO82NXli2z8lRE8cu/nBIHrcOCXFBAHpb9ZldrB2/qOA==" - }, "node_modules/string_decoder": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", @@ -5627,16 +5859,16 @@ } }, "node_modules/swagger-client": { - "version": "3.27.8", - "resolved": "https://registry.npmjs.org/swagger-client/-/swagger-client-3.27.8.tgz", - "integrity": "sha512-2wrqNHdMhgQzBM4xjxNDPvinysQ1lc9wHqMJ/HyllyzRyEKL96KyEfP8laI8G1gGbO/vmdtTDEXPFDHp1RrOHQ==", + "version": "3.28.0", + "resolved": "https://registry.npmjs.org/swagger-client/-/swagger-client-3.28.0.tgz", + "integrity": "sha512-uEi5wm30241FU4ngFJQzrHuWGPMgbbg6foGiAVuR8S8wS/Iwp2f7vbaTWJOpHwqlxedtg2WX/+PSb/BwVnw/Kw==", "dependencies": { "@babel/runtime-corejs3": "^7.22.15", - "@swagger-api/apidom-core": ">=0.99.1 <1.0.0", - "@swagger-api/apidom-error": ">=0.99.0 <1.0.0", - "@swagger-api/apidom-json-pointer": ">=0.99.1 <1.0.0", - "@swagger-api/apidom-ns-openapi-3-1": ">=0.99.1 <1.0.0", - "@swagger-api/apidom-reference": ">=0.99.1 <1.0.0", + "@swagger-api/apidom-core": ">=1.0.0-alpha.1 <1.0.0-beta.0", + "@swagger-api/apidom-error": ">=1.0.0-alpha.1 <1.0.0-beta.0", + "@swagger-api/apidom-json-pointer": ">=1.0.0-alpha.1 <1.0.0-beta.0", + "@swagger-api/apidom-ns-openapi-3-1": ">=1.0.0-alpha.1 <1.0.0-beta.0", + "@swagger-api/apidom-reference": ">=1.0.0-alpha.1 <1.0.0-beta.0", "cookie": "~0.6.0", "deepmerge": "~4.3.0", "fast-json-patch": "^3.0.0-1", @@ -5659,9 +5891,9 @@ } }, "node_modules/swagger-ui-react": { - "version": "5.17.8", - "resolved": "https://registry.npmjs.org/swagger-ui-react/-/swagger-ui-react-5.17.8.tgz", - "integrity": "sha512-Afr/Svo1nGlmfClo+PYUUZftIS66uRtPsmLb/5AdSaJUJO9FqXpDb8lTk0MWLd3pTzNRxaxzbSMeDi+P65M0pw==", + "version": "5.17.10", + "resolved": "https://registry.npmjs.org/swagger-ui-react/-/swagger-ui-react-5.17.10.tgz", + "integrity": "sha512-KQH0d3AB256RKCpOA7Zkc2cYIFLIt3nFGJSKIgAkI4PXxlFmru504GNDH0PgWFbitROS9TCki94yZs1a+sJ0Lg==", "dependencies": { "@babel/runtime-corejs3": "^7.24.5", "@braintree/sanitize-url": "=7.0.1", @@ -5691,7 +5923,7 @@ "reselect": "^5.1.0", "serialize-error": "^8.1.0", "sha.js": "^2.4.11", - "swagger-client": "^3.27.8", + "swagger-client": "^3.27.9", "url-parse": "^1.5.10", "xml": "=1.0.1", "xml-but-prettier": "^1.0.1", @@ -5858,9 +6090,9 @@ } }, "node_modules/types-ramda": { - "version": "0.29.10", - "resolved": "https://registry.npmjs.org/types-ramda/-/types-ramda-0.29.10.tgz", - "integrity": "sha512-5PJiW/eiTPyXXBYGZOYGezMl6qj7keBiZheRwfjJZY26QPHsNrjfJnz0mru6oeqqoTHOni893Jfd6zyUXfQRWg==", + "version": "0.30.0", + "resolved": "https://registry.npmjs.org/types-ramda/-/types-ramda-0.30.0.tgz", + "integrity": "sha512-oVPw/KHB5M0Du0txTEKKM8xZOG9cZBRdCVXvwHYuNJUVkAiJ9oWyqkA+9Bj2gjMsHgkkhsYevobQBWs8I2/Xvw==", "dependencies": { "ts-toolbelt": "^9.6.0" } diff --git a/playground/package.json b/playground/package.json index 1b9ddf71c7..0300c781e6 100644 --- a/playground/package.json +++ b/playground/package.json @@ -14,14 +14,14 @@ "graphql": "^16.8.1", "react": "^18.3.1", "react-dom": "^18.3.1", - "swagger-ui-react": "^5.17.8" + "swagger-ui-react": "^5.17.10" }, "devDependencies": { "@types/react": "^18.3.2", "@types/react-dom": "^18.3.0", "@types/swagger-ui-react": "^4.18.3", - "@typescript-eslint/eslint-plugin": "^7.7.1", - "@typescript-eslint/parser": "^7.7.1", + "@typescript-eslint/eslint-plugin": "^7.9.0", + "@typescript-eslint/parser": "^7.9.0", "@vitejs/plugin-react-swc": "^3.6.0", "eslint": "^8.57.0", "eslint-plugin-react-hooks": "^4.6.2", From 875afdf5c7b11caf146d58396e60fb96232f36f4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 May 2024 17:27:11 -0400 Subject: [PATCH 11/78] bot: Bump google.golang.org/protobuf from 1.33.0 to 1.34.1 (#2607) Bumps google.golang.org/protobuf from 1.33.0 to 1.34.1. [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=google.golang.org/protobuf&package-manager=go_modules&previous-version=1.33.0&new-version=1.34.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 1d8a5a771c..7933128def 100644 --- a/go.mod +++ b/go.mod @@ -54,7 +54,7 @@ require ( golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 golang.org/x/term v0.20.0 google.golang.org/grpc v1.64.0 - google.golang.org/protobuf v1.33.0 + google.golang.org/protobuf v1.34.1 ) require ( diff --git a/go.sum b/go.sum index e91de0abcb..b46681e784 100644 --- a/go.sum +++ b/go.sum @@ -1540,8 +1540,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 148da8f26f6925c6472eb045ed6857031ea055a4 Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Tue, 21 May 2024 18:34:38 -0400 Subject: [PATCH 12/78] feat: Inject ACP instance into the DB instance (#2633) ## Relevant issue(s) Resolves #2632 ## Description Injects the ACP instance into the DB instance, like how rootstore is provided. Also makes the `acp` package public, and moves the ACP options off of `db` and into `node` (similar to rootstore). --- {internal/acp => acp}/README.md | 0 {internal/acp => acp}/acp.go | 0 {internal/acp => acp}/acp_local.go | 0 {internal/acp => acp}/acp_local_test.go | 0 {internal/acp => acp}/doc.go | 0 {internal/acp => acp}/dpi.go | 0 {internal/acp => acp}/errors.go | 0 {internal/acp => acp}/identity/identity.go | 0 cli/server_dump.go | 3 +- cli/start.go | 11 +-- cli/utils.go | 2 +- http/handler_ccip_test.go | 3 +- http/middleware.go | 2 +- internal/db/backup_test.go | 2 +- internal/db/collection.go | 2 +- internal/db/collection_acp.go | 2 +- internal/db/collection_delete.go | 2 +- internal/db/config.go | 15 ---- internal/db/config_test.go | 12 ---- internal/db/context.go | 2 +- internal/db/db.go | 8 ++- internal/db/db_test.go | 5 +- internal/db/fetcher/fetcher.go | 4 +- internal/db/fetcher/indexer.go | 4 +- internal/db/fetcher/mocks/fetcher.go | 4 +- internal/db/fetcher/versioned.go | 4 +- internal/db/indexed_docs_test.go | 4 +- internal/db/permission/check.go | 4 +- internal/db/permission/register.go | 4 +- internal/lens/fetcher.go | 4 +- internal/planner/planner.go | 4 +- net/node_test.go | 21 +++--- net/peer_test.go | 24 ++++--- node/acp.go | 83 ++++++++++++++++++++++ node/node.go | 16 ++++- tests/bench/query/planner/utils.go | 4 +- tests/gen/cli/util_test.go | 3 +- tests/integration/acp.go | 2 +- tests/integration/db.go | 26 +++++-- tests/integration/utils2.go | 2 +- 40 files changed, 192 insertions(+), 96 deletions(-) rename {internal/acp => acp}/README.md (100%) rename {internal/acp => acp}/acp.go (100%) rename {internal/acp => acp}/acp_local.go (100%) rename {internal/acp => acp}/acp_local_test.go (100%) rename {internal/acp => acp}/doc.go (100%) rename {internal/acp => acp}/dpi.go (100%) rename {internal/acp => acp}/errors.go (100%) rename {internal/acp => acp}/identity/identity.go (100%) create mode 100644 node/acp.go diff --git a/internal/acp/README.md b/acp/README.md similarity index 100% rename from internal/acp/README.md rename to acp/README.md diff --git a/internal/acp/acp.go b/acp/acp.go similarity index 100% rename from internal/acp/acp.go rename to acp/acp.go diff --git a/internal/acp/acp_local.go b/acp/acp_local.go similarity index 100% rename from internal/acp/acp_local.go rename to acp/acp_local.go diff --git a/internal/acp/acp_local_test.go b/acp/acp_local_test.go similarity index 100% rename from internal/acp/acp_local_test.go rename to acp/acp_local_test.go diff --git a/internal/acp/doc.go b/acp/doc.go similarity index 100% rename from internal/acp/doc.go rename to acp/doc.go diff --git a/internal/acp/dpi.go b/acp/dpi.go similarity index 100% rename from internal/acp/dpi.go rename to acp/dpi.go diff --git a/internal/acp/errors.go b/acp/errors.go similarity index 100% rename from internal/acp/errors.go rename to acp/errors.go diff --git a/internal/acp/identity/identity.go b/acp/identity/identity.go similarity index 100% rename from internal/acp/identity/identity.go rename to acp/identity/identity.go diff --git a/cli/server_dump.go b/cli/server_dump.go index 70ba5e557b..b311f04ec1 100644 --- a/cli/server_dump.go +++ b/cli/server_dump.go @@ -13,6 +13,7 @@ package cli import ( "github.com/spf13/cobra" + "github.com/sourcenetwork/defradb/acp" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/internal/db" "github.com/sourcenetwork/defradb/node" @@ -36,7 +37,7 @@ func MakeServerDumpCmd() *cobra.Command { if err != nil { return err } - db, err := db.NewDB(cmd.Context(), rootstore) + db, err := db.NewDB(cmd.Context(), rootstore, acp.NoACP) if err != nil { return errors.Wrap("failed to initialize database", err) } diff --git a/cli/start.go b/cli/start.go index c371475064..118cf726fb 100644 --- a/cli/start.go +++ b/cli/start.go @@ -102,10 +102,6 @@ func MakeStartCommand() *cobra.Command { dbOpts := []db.Option{ db.WithUpdateEvents(), db.WithMaxRetries(cfg.GetInt("datastore.MaxTxnRetries")), - // TODO-ACP: Infuture when we add support for the --no-acp flag when admin signatures are in, - // we can allow starting of db without acp. Currently that can only be done programmatically. - // https://github.com/sourcenetwork/defradb/issues/2271 - db.WithACPInMemory(), } netOpts := []net.NodeOpt{ @@ -126,6 +122,10 @@ func MakeStartCommand() *cobra.Command { node.WithInMemory(cfg.GetString("datastore.store") == configStoreMemory), } + acpOpts := []node.ACPOpt{ + node.WithACPType(node.LocalACPType), + } + var peers []peer.AddrInfo if val := cfg.GetStringSlice("net.peers"); len(val) > 0 { addrs, err := netutils.ParsePeers(val) @@ -140,7 +140,7 @@ func MakeStartCommand() *cobra.Command { // TODO-ACP: Infuture when we add support for the --no-acp flag when admin signatures are in, // we can allow starting of db without acp. Currently that can only be done programmatically. // https://github.com/sourcenetwork/defradb/issues/2271 - dbOpts = append(dbOpts, db.WithACP(rootDir)) + acpOpts = append(acpOpts, node.WithACPPath(rootDir)) } if !cfg.GetBool("keyring.disabled") { @@ -169,6 +169,7 @@ func MakeStartCommand() *cobra.Command { node.WithNetOpts(netOpts...), node.WithServerOpts(serverOpts...), node.WithDisableP2P(cfg.GetBool("net.p2pDisabled")), + node.WithACPOpts(acpOpts...), } n, err := node.NewNode(cmd.Context(), opts...) diff --git a/cli/utils.go b/cli/utils.go index e85dd09e2c..8c394d79eb 100644 --- a/cli/utils.go +++ b/cli/utils.go @@ -21,9 +21,9 @@ import ( "github.com/spf13/viper" "golang.org/x/term" + acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/http" - acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" "github.com/sourcenetwork/defradb/internal/db" "github.com/sourcenetwork/defradb/keyring" ) diff --git a/http/handler_ccip_test.go b/http/handler_ccip_test.go index 40e088b685..ab8381565a 100644 --- a/http/handler_ccip_test.go +++ b/http/handler_ccip_test.go @@ -25,6 +25,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/sourcenetwork/defradb/acp" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore/memory" "github.com/sourcenetwork/defradb/internal/db" @@ -192,7 +193,7 @@ func TestCCIPPost_WithInvalidBody(t *testing.T) { func setupDatabase(t *testing.T) client.DB { ctx := context.Background() - cdb, err := db.NewDB(ctx, memory.NewDatastore(ctx), db.WithUpdateEvents()) + cdb, err := db.NewDB(ctx, memory.NewDatastore(ctx), acp.NoACP, db.WithUpdateEvents()) require.NoError(t, err) _, err = cdb.AddSchema(ctx, `type User { diff --git a/http/middleware.go b/http/middleware.go index 39783a3396..d37a7d3962 100644 --- a/http/middleware.go +++ b/http/middleware.go @@ -21,9 +21,9 @@ import ( "github.com/go-chi/cors" "golang.org/x/exp/slices" + acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" - acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" "github.com/sourcenetwork/defradb/internal/db" ) diff --git a/internal/db/backup_test.go b/internal/db/backup_test.go index ce22333526..486080db81 100644 --- a/internal/db/backup_test.go +++ b/internal/db/backup_test.go @@ -18,8 +18,8 @@ import ( "github.com/stretchr/testify/require" + acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" - acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" ) func TestBasicExport_WithNormalFormatting_NoError(t *testing.T) { diff --git a/internal/db/collection.go b/internal/db/collection.go index 20cead8193..4b9c988288 100644 --- a/internal/db/collection.go +++ b/internal/db/collection.go @@ -27,11 +27,11 @@ import ( "github.com/lens-vm/lens/host-go/config/model" "github.com/sourcenetwork/immutable" + "github.com/sourcenetwork/defradb/acp" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/events" - "github.com/sourcenetwork/defradb/internal/acp" "github.com/sourcenetwork/defradb/internal/core" coreblock "github.com/sourcenetwork/defradb/internal/core/block" "github.com/sourcenetwork/defradb/internal/db/base" diff --git a/internal/db/collection_acp.go b/internal/db/collection_acp.go index b61aeb8441..9ca432f9aa 100644 --- a/internal/db/collection_acp.go +++ b/internal/db/collection_acp.go @@ -13,7 +13,7 @@ package db import ( "context" - "github.com/sourcenetwork/defradb/internal/acp" + "github.com/sourcenetwork/defradb/acp" "github.com/sourcenetwork/defradb/internal/db/permission" ) diff --git a/internal/db/collection_delete.go b/internal/db/collection_delete.go index 420b49c218..9612c4f42c 100644 --- a/internal/db/collection_delete.go +++ b/internal/db/collection_delete.go @@ -15,9 +15,9 @@ import ( cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/sourcenetwork/defradb/acp" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/events" - "github.com/sourcenetwork/defradb/internal/acp" "github.com/sourcenetwork/defradb/internal/core" coreblock "github.com/sourcenetwork/defradb/internal/core/block" "github.com/sourcenetwork/defradb/internal/merkle/clock" diff --git a/internal/db/config.go b/internal/db/config.go index 56141068c4..a655647df7 100644 --- a/internal/db/config.go +++ b/internal/db/config.go @@ -11,13 +11,10 @@ package db import ( - "context" - "github.com/lens-vm/lens/host-go/engine/module" "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/events" - "github.com/sourcenetwork/defradb/internal/acp" ) const ( @@ -28,18 +25,6 @@ const ( // Option is a funtion that sets a config value on the db. type Option func(*db) -// WithACP enables access control. If path is empty then acp runs in-memory. -func WithACP(path string) Option { - return func(db *db) { - var acpLocal acp.ACPLocal - acpLocal.Init(context.Background(), path) - db.acp = immutable.Some[acp.ACP](&acpLocal) - } -} - -// WithACPInMemory enables access control in-memory. -func WithACPInMemory() Option { return WithACP("") } - // WithUpdateEvents enables the update events channel. func WithUpdateEvents() Option { return func(db *db) { diff --git a/internal/db/config_test.go b/internal/db/config_test.go index 02bd81a910..d4dbadaec6 100644 --- a/internal/db/config_test.go +++ b/internal/db/config_test.go @@ -17,18 +17,6 @@ import ( "github.com/stretchr/testify/assert" ) -func TestWithACP(t *testing.T) { - d := &db{} - WithACP("test")(d) - assert.True(t, d.acp.HasValue()) -} - -func TestWithACPInMemory(t *testing.T) { - d := &db{} - WithACPInMemory()(d) - assert.True(t, d.acp.HasValue()) -} - func TestWithUpdateEvents(t *testing.T) { d := &db{} WithUpdateEvents()(d) diff --git a/internal/db/context.go b/internal/db/context.go index 96b28e0799..88019af323 100644 --- a/internal/db/context.go +++ b/internal/db/context.go @@ -15,8 +15,8 @@ import ( "github.com/sourcenetwork/immutable" + acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/datastore" - acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" ) // txnContextKey is the key type for transaction context values. diff --git a/internal/db/db.go b/internal/db/db.go index ec7aef485e..af40627e2b 100644 --- a/internal/db/db.go +++ b/internal/db/db.go @@ -26,11 +26,11 @@ import ( "github.com/sourcenetwork/corelog" "github.com/sourcenetwork/immutable" + "github.com/sourcenetwork/defradb/acp" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/events" - "github.com/sourcenetwork/defradb/internal/acp" "github.com/sourcenetwork/defradb/internal/core" "github.com/sourcenetwork/defradb/internal/lens" "github.com/sourcenetwork/defradb/internal/request/graphql" @@ -80,14 +80,16 @@ type db struct { func NewDB( ctx context.Context, rootstore datastore.RootStore, + acp immutable.Option[acp.ACP], options ...Option, ) (client.DB, error) { - return newDB(ctx, rootstore, options...) + return newDB(ctx, rootstore, acp, options...) } func newDB( ctx context.Context, rootstore datastore.RootStore, + acp immutable.Option[acp.ACP], options ...Option, ) (*db, error) { multistore := datastore.MultiStoreFrom(rootstore) @@ -100,7 +102,7 @@ func newDB( db := &db{ rootstore: rootstore, multistore: multistore, - acp: acp.NoACP, + acp: acp, parser: parser, options: options, } diff --git a/internal/db/db_test.go b/internal/db/db_test.go index 118adb285b..fe60449cc2 100644 --- a/internal/db/db_test.go +++ b/internal/db/db_test.go @@ -16,6 +16,7 @@ import ( badger "github.com/sourcenetwork/badger/v4" + "github.com/sourcenetwork/defradb/acp" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" ) @@ -25,7 +26,7 @@ func newMemoryDB(ctx context.Context) (*db, error) { if err != nil { return nil, err } - return newDB(ctx, rootstore) + return newDB(ctx, rootstore, acp.NoACP) } func TestNewDB(t *testing.T) { @@ -37,7 +38,7 @@ func TestNewDB(t *testing.T) { return } - _, err = NewDB(ctx, rootstore) + _, err = NewDB(ctx, rootstore, acp.NoACP) if err != nil { t.Error(err) } diff --git a/internal/db/fetcher/fetcher.go b/internal/db/fetcher/fetcher.go index ffb42f9c96..bfaed9d871 100644 --- a/internal/db/fetcher/fetcher.go +++ b/internal/db/fetcher/fetcher.go @@ -20,11 +20,11 @@ import ( "github.com/sourcenetwork/immutable" + "github.com/sourcenetwork/defradb/acp" + acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/datastore/iterable" - "github.com/sourcenetwork/defradb/internal/acp" - acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" "github.com/sourcenetwork/defradb/internal/core" "github.com/sourcenetwork/defradb/internal/db/base" "github.com/sourcenetwork/defradb/internal/db/permission" diff --git a/internal/db/fetcher/indexer.go b/internal/db/fetcher/indexer.go index 31104cc13e..eff5146d6f 100644 --- a/internal/db/fetcher/indexer.go +++ b/internal/db/fetcher/indexer.go @@ -15,10 +15,10 @@ import ( "github.com/sourcenetwork/immutable" + "github.com/sourcenetwork/defradb/acp" + acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/internal/acp" - acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" "github.com/sourcenetwork/defradb/internal/core" "github.com/sourcenetwork/defradb/internal/db/base" "github.com/sourcenetwork/defradb/internal/planner/mapper" diff --git a/internal/db/fetcher/mocks/fetcher.go b/internal/db/fetcher/mocks/fetcher.go index fc68f152d0..99fee65c47 100644 --- a/internal/db/fetcher/mocks/fetcher.go +++ b/internal/db/fetcher/mocks/fetcher.go @@ -4,7 +4,7 @@ package mocks import ( client "github.com/sourcenetwork/defradb/client" - acp "github.com/sourcenetwork/defradb/internal/acp" + acp "github.com/sourcenetwork/defradb/acp" context "context" @@ -14,7 +14,7 @@ import ( fetcher "github.com/sourcenetwork/defradb/internal/db/fetcher" - identity "github.com/sourcenetwork/defradb/internal/acp/identity" + identity "github.com/sourcenetwork/defradb/acp/identity" immutable "github.com/sourcenetwork/immutable" diff --git a/internal/db/fetcher/versioned.go b/internal/db/fetcher/versioned.go index fd907658aa..6ce8f94ebc 100644 --- a/internal/db/fetcher/versioned.go +++ b/internal/db/fetcher/versioned.go @@ -20,11 +20,11 @@ import ( "github.com/sourcenetwork/immutable" + "github.com/sourcenetwork/defradb/acp" + acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/datastore/memory" - "github.com/sourcenetwork/defradb/internal/acp" - acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" "github.com/sourcenetwork/defradb/internal/core" coreblock "github.com/sourcenetwork/defradb/internal/core/block" "github.com/sourcenetwork/defradb/internal/db/base" diff --git a/internal/db/indexed_docs_test.go b/internal/db/indexed_docs_test.go index 5c4baacdf8..002e05eabe 100644 --- a/internal/db/indexed_docs_test.go +++ b/internal/db/indexed_docs_test.go @@ -24,11 +24,11 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/sourcenetwork/defradb/acp" + acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/datastore/mocks" - "github.com/sourcenetwork/defradb/internal/acp" - acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" "github.com/sourcenetwork/defradb/internal/core" "github.com/sourcenetwork/defradb/internal/db/fetcher" fetcherMocks "github.com/sourcenetwork/defradb/internal/db/fetcher/mocks" diff --git a/internal/db/permission/check.go b/internal/db/permission/check.go index 8a2057d030..36dce10489 100644 --- a/internal/db/permission/check.go +++ b/internal/db/permission/check.go @@ -15,9 +15,9 @@ import ( "github.com/sourcenetwork/immutable" + "github.com/sourcenetwork/defradb/acp" + acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/internal/acp" - acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" ) // CheckAccessOfDocOnCollectionWithACP handles the check, which tells us if access to the target diff --git a/internal/db/permission/register.go b/internal/db/permission/register.go index 06a48f398a..a46e5eef34 100644 --- a/internal/db/permission/register.go +++ b/internal/db/permission/register.go @@ -15,9 +15,9 @@ import ( "github.com/sourcenetwork/immutable" + "github.com/sourcenetwork/defradb/acp" + acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/internal/acp" - acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" ) // RegisterDocOnCollectionWithACP handles the registration of the document with acp. diff --git a/internal/lens/fetcher.go b/internal/lens/fetcher.go index 7d5801ad25..357bbe9677 100644 --- a/internal/lens/fetcher.go +++ b/internal/lens/fetcher.go @@ -18,11 +18,11 @@ import ( "github.com/sourcenetwork/immutable" + "github.com/sourcenetwork/defradb/acp" + acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/internal/acp" - acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" "github.com/sourcenetwork/defradb/internal/core" "github.com/sourcenetwork/defradb/internal/db/fetcher" "github.com/sourcenetwork/defradb/internal/planner/mapper" diff --git a/internal/planner/planner.go b/internal/planner/planner.go index 384ebf8ecb..13e1e0b2e9 100644 --- a/internal/planner/planner.go +++ b/internal/planner/planner.go @@ -15,11 +15,11 @@ import ( "github.com/sourcenetwork/immutable" + "github.com/sourcenetwork/defradb/acp" + acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/internal/acp" - acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" "github.com/sourcenetwork/defradb/internal/connor" "github.com/sourcenetwork/defradb/internal/core" "github.com/sourcenetwork/defradb/internal/planner/filter" diff --git a/net/node_test.go b/net/node_test.go index c0012bdb31..5e0b30570f 100644 --- a/net/node_test.go +++ b/net/node_test.go @@ -20,6 +20,7 @@ import ( badger "github.com/sourcenetwork/badger/v4" "github.com/stretchr/testify/require" + "github.com/sourcenetwork/defradb/acp" "github.com/sourcenetwork/defradb/client" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" "github.com/sourcenetwork/defradb/datastore/memory" @@ -35,7 +36,7 @@ func FixtureNewMemoryDBWithBroadcaster(t *testing.T) client.DB { opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} rootstore, err := badgerds.NewDatastore("", &opts) require.NoError(t, err) - database, err = db.NewDB(ctx, rootstore, db.WithUpdateEvents()) + database, err = db.NewDB(ctx, rootstore, acp.NoACP, db.WithUpdateEvents()) require.NoError(t, err) return database } @@ -43,7 +44,7 @@ func FixtureNewMemoryDBWithBroadcaster(t *testing.T) client.DB { func TestNewNode_WithEnableRelay_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) require.NoError(t, err) n, err := NewNode( context.Background(), @@ -58,7 +59,7 @@ func TestNewNode_WithDBClosed_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) require.NoError(t, err) db.Close() @@ -72,7 +73,7 @@ func TestNewNode_WithDBClosed_NoError(t *testing.T) { func TestNewNode_NoPubSub_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) require.NoError(t, err) n, err := NewNode( context.Background(), @@ -87,7 +88,7 @@ func TestNewNode_NoPubSub_NoError(t *testing.T) { func TestNewNode_WithEnablePubSub_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) require.NoError(t, err) n, err := NewNode( @@ -105,7 +106,7 @@ func TestNewNode_WithEnablePubSub_NoError(t *testing.T) { func TestNodeClose_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) require.NoError(t, err) n, err := NewNode( context.Background(), @@ -118,7 +119,7 @@ func TestNodeClose_NoError(t *testing.T) { func TestNewNode_BootstrapWithNoPeer_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) require.NoError(t, err) n1, err := NewNode( @@ -134,7 +135,7 @@ func TestNewNode_BootstrapWithNoPeer_NoError(t *testing.T) { func TestNewNode_BootstrapWithOnePeer_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) require.NoError(t, err) n1, err := NewNode( @@ -161,7 +162,7 @@ func TestNewNode_BootstrapWithOnePeer_NoError(t *testing.T) { func TestNewNode_BootstrapWithOneValidPeerAndManyInvalidPeers_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) require.NoError(t, err) n1, err := NewNode( @@ -191,7 +192,7 @@ func TestNewNode_BootstrapWithOneValidPeerAndManyInvalidPeers_NoError(t *testing func TestListenAddrs_WithListenAddresses_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) require.NoError(t, err) n, err := NewNode( context.Background(), diff --git a/net/peer_test.go b/net/peer_test.go index 248f665073..41977b4664 100644 --- a/net/peer_test.go +++ b/net/peer_test.go @@ -23,12 +23,14 @@ import ( "github.com/libp2p/go-libp2p/core/peer" mh "github.com/multiformats/go-multihash" rpc "github.com/sourcenetwork/go-libp2p-pubsub-rpc" + "github.com/sourcenetwork/immutable" "github.com/stretchr/testify/require" + "github.com/sourcenetwork/defradb/acp" + acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore/memory" "github.com/sourcenetwork/defradb/events" - acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" coreblock "github.com/sourcenetwork/defradb/internal/core/block" "github.com/sourcenetwork/defradb/internal/core/crdt" "github.com/sourcenetwork/defradb/internal/db" @@ -69,7 +71,9 @@ const randomMultiaddr = "/ip4/127.0.0.1/tcp/0" func newTestNode(ctx context.Context, t *testing.T) (client.DB, *Node) { store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, db.WithUpdateEvents(), db.WithACPInMemory()) + var acpLocal acp.ACPLocal + acpLocal.Init(context.Background(), "") + db, err := db.NewDB(ctx, store, immutable.Some[acp.ACP](&acpLocal), db.WithUpdateEvents()) require.NoError(t, err) n, err := NewNode( @@ -85,7 +89,7 @@ func newTestNode(ctx context.Context, t *testing.T) (client.DB, *Node) { func TestNewPeer_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) require.NoError(t, err) h, err := libp2p.New() @@ -108,7 +112,7 @@ func TestNewPeer_NoDB_NilDBError(t *testing.T) { func TestNewPeer_WithExistingTopic_TopicAlreadyExistsError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) require.NoError(t, err) _, err = db.AddSchema(ctx, `type User { @@ -158,11 +162,11 @@ func TestStartAndClose_NoError(t *testing.T) { func TestStart_WithKnownPeer_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db1, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + db1, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) require.NoError(t, err) store2 := memory.NewDatastore(ctx) - db2, err := db.NewDB(ctx, store2, db.WithUpdateEvents()) + db2, err := db.NewDB(ctx, store2, acp.NoACP, db.WithUpdateEvents()) require.NoError(t, err) n1, err := NewNode( @@ -194,11 +198,11 @@ func TestStart_WithKnownPeer_NoError(t *testing.T) { func TestStart_WithOfflineKnownPeer_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db1, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + db1, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) require.NoError(t, err) store2 := memory.NewDatastore(ctx) - db2, err := db.NewDB(ctx, store2, db.WithUpdateEvents()) + db2, err := db.NewDB(ctx, store2, acp.NoACP, db.WithUpdateEvents()) require.NoError(t, err) n1, err := NewNode( @@ -234,7 +238,7 @@ func TestStart_WithOfflineKnownPeer_NoError(t *testing.T) { func TestStart_WithNoUpdateChannel_NilUpdateChannelError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store) + db, err := db.NewDB(ctx, store, acp.NoACP) require.NoError(t, err) n, err := NewNode( @@ -253,7 +257,7 @@ func TestStart_WithNoUpdateChannel_NilUpdateChannelError(t *testing.T) { func TestStart_WitClosedUpdateChannel_ClosedChannelError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) require.NoError(t, err) n, err := NewNode( diff --git a/node/acp.go b/node/acp.go new file mode 100644 index 0000000000..77b14cc0e9 --- /dev/null +++ b/node/acp.go @@ -0,0 +1,83 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package node + +import ( + "context" + + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/acp" +) + +type ACPType uint8 + +const ( + NoACPType ACPType = 0 + LocalACPType ACPType = 1 +) + +// ACPOptions contains ACP configuration values. +type ACPOptions struct { + acpType ACPType + + // Note: An empty path will result in an in-memory ACP instance. + path string +} + +// DefaultACPOptions returns new options with default values. +func DefaultACPOptions() *ACPOptions { + return &ACPOptions{ + acpType: LocalACPType, + } +} + +// StoreOpt is a function for setting configuration values. +type ACPOpt func(*ACPOptions) + +// WithACPType sets the ACP type. +func WithACPType(acpType ACPType) ACPOpt { + return func(o *ACPOptions) { + o.acpType = acpType + } +} + +// WithACPPath sets the ACP path. +// +// Note: An empty path will result in an in-memory ACP instance. +func WithACPPath(path string) ACPOpt { + return func(o *ACPOptions) { + o.path = path + } +} + +// NewACP returns a new ACP module with the given options. +func NewACP(ctx context.Context, opts ...ACPOpt) (immutable.Option[acp.ACP], error) { + options := DefaultACPOptions() + for _, opt := range opts { + opt(options) + } + + switch options.acpType { + case NoACPType: + return acp.NoACP, nil + + case LocalACPType: + var acpLocal acp.ACPLocal + acpLocal.Init(ctx, options.path) + return immutable.Some[acp.ACP](&acpLocal), nil + + default: + var acpLocal acp.ACPLocal + acpLocal.Init(ctx, options.path) + return immutable.Some[acp.ACP](&acpLocal), nil + } +} diff --git a/node/node.go b/node/node.go index d0cfc87d0c..40bf572f65 100644 --- a/node/node.go +++ b/node/node.go @@ -33,6 +33,7 @@ type Options struct { dbOpts []db.Option netOpts []net.NodeOpt serverOpts []http.ServerOpt + acpOpts []ACPOpt peers []peer.AddrInfo disableP2P bool disableAPI bool @@ -53,6 +54,13 @@ func WithStoreOpts(opts ...StoreOpt) NodeOpt { } } +// WithACPOpts sets the ACP options. +func WithACPOpts(opts ...ACPOpt) NodeOpt { + return func(o *Options) { + o.acpOpts = opts + } +} + // WithDatabaseOpts sets the database options. func WithDatabaseOpts(opts ...db.Option) NodeOpt { return func(o *Options) { @@ -112,7 +120,13 @@ func NewNode(ctx context.Context, opts ...NodeOpt) (*Node, error) { if err != nil { return nil, err } - db, err := db.NewDB(ctx, rootstore, options.dbOpts...) + + acp, err := NewACP(ctx, options.acpOpts...) + if err != nil { + return nil, err + } + + db, err := db.NewDB(ctx, rootstore, acp, options.dbOpts...) if err != nil { return nil, err } diff --git a/tests/bench/query/planner/utils.go b/tests/bench/query/planner/utils.go index 7655e55003..967f141357 100644 --- a/tests/bench/query/planner/utils.go +++ b/tests/bench/query/planner/utils.go @@ -15,10 +15,10 @@ import ( "fmt" "testing" + "github.com/sourcenetwork/defradb/acp" + acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/internal/acp" - acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" "github.com/sourcenetwork/defradb/internal/core" "github.com/sourcenetwork/defradb/internal/planner" "github.com/sourcenetwork/defradb/internal/request/graphql" diff --git a/tests/gen/cli/util_test.go b/tests/gen/cli/util_test.go index 2114687ad5..dbfef99524 100644 --- a/tests/gen/cli/util_test.go +++ b/tests/gen/cli/util_test.go @@ -19,6 +19,7 @@ import ( "github.com/sourcenetwork/corelog" "github.com/stretchr/testify/require" + "github.com/sourcenetwork/defradb/acp" "github.com/sourcenetwork/defradb/client" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" "github.com/sourcenetwork/defradb/errors" @@ -49,7 +50,7 @@ func start(ctx context.Context) (*defraInstance, error) { return nil, errors.Wrap("failed to open datastore", err) } - db, err := db.NewDB(ctx, rootstore) + db, err := db.NewDB(ctx, rootstore, acp.NoACP) if err != nil { return nil, errors.Wrap("failed to create a database", err) } diff --git a/tests/integration/acp.go b/tests/integration/acp.go index ea69dd278c..b02d83eed7 100644 --- a/tests/integration/acp.go +++ b/tests/integration/acp.go @@ -14,7 +14,7 @@ import ( "github.com/sourcenetwork/immutable" "github.com/stretchr/testify/require" - acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" + acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/internal/db" ) diff --git a/tests/integration/db.go b/tests/integration/db.go index d565030b3e..cca6f2f263 100644 --- a/tests/integration/db.go +++ b/tests/integration/db.go @@ -24,6 +24,7 @@ import ( badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" "github.com/sourcenetwork/defradb/datastore/memory" "github.com/sourcenetwork/defradb/internal/db" + "github.com/sourcenetwork/defradb/node" changeDetector "github.com/sourcenetwork/defradb/tests/change_detector" ) @@ -85,8 +86,13 @@ func NewBadgerMemoryDB(ctx context.Context, dbopts ...db.Option) (client.DB, err if err != nil { return nil, err } - dbopts = append(dbopts, db.WithACPInMemory()) - db, err := db.NewDB(ctx, rootstore, dbopts...) + + acp, err := node.NewACP(ctx) + if err != nil { + return nil, err + } + + db, err := db.NewDB(ctx, rootstore, acp, dbopts...) if err != nil { return nil, err } @@ -94,8 +100,12 @@ func NewBadgerMemoryDB(ctx context.Context, dbopts ...db.Option) (client.DB, err } func NewInMemoryDB(ctx context.Context, dbopts ...db.Option) (client.DB, error) { - dbopts = append(dbopts, db.WithACPInMemory()) - db, err := db.NewDB(ctx, memory.NewDatastore(ctx), dbopts...) + acp, err := node.NewACP(ctx) + if err != nil { + return nil, err + } + + db, err := db.NewDB(ctx, memory.NewDatastore(ctx), acp, dbopts...) if err != nil { return nil, err } @@ -130,8 +140,12 @@ func NewBadgerFileDB(ctx context.Context, t testing.TB, dbopts ...db.Option) (cl return nil, "", err } - dbopts = append(dbopts, db.WithACP(dbPath)) - db, err := db.NewDB(ctx, rootstore, dbopts...) + acp, err := node.NewACP(ctx, node.WithACPPath(dbPath)) + if err != nil { + return nil, "", err + } + + db, err := db.NewDB(ctx, rootstore, acp, dbopts...) if err != nil { return nil, "", err } diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index 3e6923b52c..f5e6156707 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -27,12 +27,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/crypto" "github.com/sourcenetwork/defradb/datastore" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" "github.com/sourcenetwork/defradb/errors" - acpIdentity "github.com/sourcenetwork/defradb/internal/acp/identity" "github.com/sourcenetwork/defradb/internal/db" "github.com/sourcenetwork/defradb/internal/request/graphql" "github.com/sourcenetwork/defradb/net" From d10d5a60aa019b3c76fe681615cc0464c13d95fb Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Wed, 22 May 2024 11:24:26 -0400 Subject: [PATCH 13/78] test: Test node pkg constructor via integration test suite (#2641) ## Relevant issue(s) Resolves #2634 ## Description Tests node pkg constructor via integration test suite instead of bypassing it and directly creating `db` instances via the `db` package. --- cli/server_dump.go | 2 +- node/node.go | 3 +- node/store.go | 20 ++++- tests/bench/bench_util.go | 2 +- tests/integration/db.go | 123 +++++++++++---------------- tests/integration/events/utils.go | 3 +- tests/integration/net/order/utils.go | 3 +- 7 files changed, 74 insertions(+), 82 deletions(-) diff --git a/cli/server_dump.go b/cli/server_dump.go index b311f04ec1..1d3c68e54a 100644 --- a/cli/server_dump.go +++ b/cli/server_dump.go @@ -33,7 +33,7 @@ func MakeServerDumpCmd() *cobra.Command { storeOpts := []node.StoreOpt{ node.WithPath(cfg.GetString("datastore.badger.path")), } - rootstore, err := node.NewStore(storeOpts...) + rootstore, err := node.NewStore(cmd.Context(), storeOpts...) if err != nil { return err } diff --git a/node/node.go b/node/node.go index 40bf572f65..1293468855 100644 --- a/node/node.go +++ b/node/node.go @@ -116,7 +116,8 @@ func NewNode(ctx context.Context, opts ...NodeOpt) (*Node, error) { for _, opt := range opts { opt(options) } - rootstore, err := NewStore(options.storeOpts...) + + rootstore, err := NewStore(ctx, options.storeOpts...) if err != nil { return nil, err } diff --git a/node/store.go b/node/store.go index 8e15b7aab1..8354c0f7df 100644 --- a/node/store.go +++ b/node/store.go @@ -11,14 +11,18 @@ package node import ( + "context" + "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/datastore/badger/v4" + "github.com/sourcenetwork/defradb/datastore/memory" ) // StoreOptions contains store configuration values. type StoreOptions struct { path string inMemory bool + defraStore bool valueLogFileSize int64 encryptionKey []byte } @@ -41,6 +45,16 @@ func WithInMemory(inMemory bool) StoreOpt { } } +// WithDefraStore sets the defra store flag. +// +// Setting this to true will result in the defra node being created with +// the a custom defra implementation of the rootstore instead of badger. +func WithDefraStore(defraStore bool) StoreOpt { + return func(o *StoreOptions) { + o.defraStore = defraStore + } +} + // WithPath sets the datastore path. func WithPath(path string) StoreOpt { return func(o *StoreOptions) { @@ -63,12 +77,16 @@ func WithEncryptionKey(encryptionKey []byte) StoreOpt { } // NewStore returns a new store with the given options. -func NewStore(opts ...StoreOpt) (datastore.RootStore, error) { +func NewStore(ctx context.Context, opts ...StoreOpt) (datastore.RootStore, error) { options := DefaultStoreOptions() for _, opt := range opts { opt(options) } + if options.defraStore { + return memory.NewDatastore(ctx), nil + } + badgerOpts := badger.DefaultOptions badgerOpts.InMemory = options.inMemory badgerOpts.ValueLogFileSize = options.valueLogFileSize diff --git a/tests/bench/bench_util.go b/tests/bench/bench_util.go index 186dbc0f3e..5993ee50f8 100644 --- a/tests/bench/bench_util.go +++ b/tests/bench/bench_util.go @@ -227,7 +227,7 @@ func newBenchStoreInfo(ctx context.Context, t testing.TB) (client.DB, error) { case "memory": db, err = testutils.NewBadgerMemoryDB(ctx) case "badger": - db, _, err = testutils.NewBadgerFileDB(ctx, t) + db, err = testutils.NewBadgerFileDB(ctx, t) default: return nil, errors.New(fmt.Sprintf("invalid storage engine backend: %s", storage)) } diff --git a/tests/integration/db.go b/tests/integration/db.go index cca6f2f263..329fdeffdd 100644 --- a/tests/integration/db.go +++ b/tests/integration/db.go @@ -17,12 +17,8 @@ import ( "strconv" "testing" - badger "github.com/sourcenetwork/badger/v4" - "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/crypto" - badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" - "github.com/sourcenetwork/defradb/datastore/memory" "github.com/sourcenetwork/defradb/internal/db" "github.com/sourcenetwork/defradb/node" changeDetector "github.com/sourcenetwork/defradb/tests/change_detector" @@ -74,93 +70,46 @@ func init() { } } -func NewBadgerMemoryDB(ctx context.Context, dbopts ...db.Option) (client.DB, error) { - opts := badgerds.Options{ - Options: badger.DefaultOptions("").WithInMemory(true), - } - if encryptionKey != nil { - opts.Options.EncryptionKey = encryptionKey - opts.Options.IndexCacheSize = 100 << 20 - } - rootstore, err := badgerds.NewDatastore("", &opts) - if err != nil { - return nil, err - } - - acp, err := node.NewACP(ctx) - if err != nil { - return nil, err - } - - db, err := db.NewDB(ctx, rootstore, acp, dbopts...) - if err != nil { - return nil, err +func NewBadgerMemoryDB(ctx context.Context) (client.DB, error) { + opts := []node.NodeOpt{ + node.WithStoreOpts(node.WithInMemory(true)), + node.WithDatabaseOpts(db.WithUpdateEvents()), } - return db, nil -} -func NewInMemoryDB(ctx context.Context, dbopts ...db.Option) (client.DB, error) { - acp, err := node.NewACP(ctx) + node, err := node.NewNode(ctx, opts...) if err != nil { return nil, err } - db, err := db.NewDB(ctx, memory.NewDatastore(ctx), acp, dbopts...) - if err != nil { - return nil, err - } - return db, nil + return node.DB, err } -func NewBadgerFileDB(ctx context.Context, t testing.TB, dbopts ...db.Option) (client.DB, string, error) { - var dbPath string - switch { - case databaseDir != "": - // restarting database - dbPath = databaseDir - - case changeDetector.Enabled: - // change detector - dbPath = changeDetector.DatabaseDir(t) - - default: - // default test case - dbPath = t.TempDir() - } - - opts := &badgerds.Options{ - Options: badger.DefaultOptions(dbPath), - } - if encryptionKey != nil { - opts.Options.EncryptionKey = encryptionKey - opts.Options.IndexCacheSize = 100 << 20 - } - rootstore, err := badgerds.NewDatastore(dbPath, opts) - if err != nil { - return nil, "", err - } +func NewBadgerFileDB(ctx context.Context, t testing.TB) (client.DB, error) { + path := t.TempDir() - acp, err := node.NewACP(ctx, node.WithACPPath(dbPath)) - if err != nil { - return nil, "", err + opts := []node.NodeOpt{ + node.WithStoreOpts(node.WithPath(path)), } - db, err := db.NewDB(ctx, rootstore, acp, dbopts...) + node, err := node.NewNode(ctx, opts...) if err != nil { - return nil, "", err + return nil, err } - return db, dbPath, err + return node.DB, err } // setupDatabase returns the database implementation for the current // testing state. The database type on the test state is used to // select the datastore implementation to use. -func setupDatabase(s *state) (impl client.DB, path string, err error) { - dbopts := []db.Option{ +func setupDatabase(s *state) (client.DB, string, error) { + dbOpts := []db.Option{ db.WithUpdateEvents(), db.WithLensPoolSize(lensPoolSize), } + storeOpts := []node.StoreOpt{} + acpOpts := []node.ACPOpt{} + opts := []node.NodeOpt{} if badgerEncryption && encryptionKey == nil { key, err := crypto.GenerateAES256() @@ -170,22 +119,48 @@ func setupDatabase(s *state) (impl client.DB, path string, err error) { encryptionKey = key } + if encryptionKey != nil { + storeOpts = append(storeOpts, node.WithEncryptionKey(encryptionKey)) + } + + var path string switch s.dbt { case badgerIMType: - impl, err = NewBadgerMemoryDB(s.ctx, dbopts...) + storeOpts = append(storeOpts, node.WithInMemory(true)) case badgerFileType: - impl, path, err = NewBadgerFileDB(s.ctx, s.t, dbopts...) + switch { + case databaseDir != "": + // restarting database + path = databaseDir + + case changeDetector.Enabled: + // change detector + path = changeDetector.DatabaseDir(s.t) + + default: + // default test case + path = s.t.TempDir() + } + + storeOpts = append(storeOpts, node.WithPath(path)) + acpOpts = append(acpOpts, node.WithACPPath(path)) case defraIMType: - impl, err = NewInMemoryDB(s.ctx, dbopts...) + storeOpts = append(storeOpts, node.WithDefraStore(true)) default: - err = fmt.Errorf("invalid database type: %v", s.dbt) + return nil, "", fmt.Errorf("invalid database type: %v", s.dbt) } + opts = append(opts, node.WithDatabaseOpts(dbOpts...)) + opts = append(opts, node.WithStoreOpts(storeOpts...)) + opts = append(opts, node.WithACPOpts(acpOpts...)) + + node, err := node.NewNode(s.ctx, opts...) if err != nil { return nil, "", err } - return + + return node.DB, path, nil } diff --git a/tests/integration/events/utils.go b/tests/integration/events/utils.go index d19a96a052..eb514bce2b 100644 --- a/tests/integration/events/utils.go +++ b/tests/integration/events/utils.go @@ -20,7 +20,6 @@ import ( "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/internal/db" testUtils "github.com/sourcenetwork/defradb/tests/integration" ) @@ -70,7 +69,7 @@ func ExecuteRequestTestCase( ) { ctx := context.Background() - db, err := testUtils.NewBadgerMemoryDB(ctx, db.WithUpdateEvents()) + db, err := testUtils.NewBadgerMemoryDB(ctx) require.NoError(t, err) _, err = db.AddSchema(ctx, schema) diff --git a/tests/integration/net/order/utils.go b/tests/integration/net/order/utils.go index adac8c7535..c7075dae22 100644 --- a/tests/integration/net/order/utils.go +++ b/tests/integration/net/order/utils.go @@ -21,7 +21,6 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/errors" - coreDB "github.com/sourcenetwork/defradb/internal/db" "github.com/sourcenetwork/defradb/net" netutils "github.com/sourcenetwork/defradb/net/utils" testutils "github.com/sourcenetwork/defradb/tests/integration" @@ -81,7 +80,7 @@ func setupDefraNode( ctx := context.Background() log.InfoContext(ctx, "Building new memory store") - db, err := testutils.NewBadgerMemoryDB(ctx, coreDB.WithUpdateEvents()) + db, err := testutils.NewBadgerMemoryDB(ctx) if err != nil { return nil, nil, err } From 598f6ba5503ea7c39c2ed3038cdcc7bc6c37b8fa Mon Sep 17 00:00:00 2001 From: Shahzad Lone Date: Wed, 22 May 2024 13:54:10 -0400 Subject: [PATCH 14/78] tools(i): Ignore playground node modules in yamllint (#2639) ## Description Ignore node modules under playground that can cause annoying linter errors with yamllint. ## How has this been tested? `make lint` Specify the platform(s) on which this was tested: - WSL Manjaro --- tools/configs/yamllint.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/configs/yamllint.yaml b/tools/configs/yamllint.yaml index b7f98b844a..8fbe92f357 100644 --- a/tools/configs/yamllint.yaml +++ b/tools/configs/yamllint.yaml @@ -5,6 +5,9 @@ yaml-files: - '*.yml' - '.yamllint' +ignore: + - playground/node_modules/ + rules: # Disabled lint rules comments: disable From 24ac63dd10e41603e43aec26c951abe31799c51c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 May 2024 16:46:47 -0400 Subject: [PATCH 15/78] bot: Bump swagger-ui-react from 5.17.10 to 5.17.12 in /playground (#2636) Bumps [swagger-ui-react](https://github.com/swagger-api/swagger-ui) from 5.17.10 to 5.17.12.
Release notes

Sourced from swagger-ui-react's releases.

Swagger UI v5.17.12 Released!

5.17.12 (2024-05-21)

Bug Fixes

  • config: define missing default options and their types (#9949) (04bbeaa), closes #9945
  • release: fix failed v5.17.11 release (a35a71e)
  • swagger-client: fix OpenAPI 3.1.0 dereferencing (#9950) (10e1a5f), closes #9941
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=swagger-ui-react&package-manager=npm_and_yarn&previous-version=5.17.10&new-version=5.17.12)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Shahzad Lone --- playground/package-lock.json | 407 ++++++++++++++--------------------- playground/package.json | 2 +- 2 files changed, 167 insertions(+), 242 deletions(-) diff --git a/playground/package-lock.json b/playground/package-lock.json index ed3a9ac500..25aa72a5f4 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -12,7 +12,7 @@ "graphql": "^16.8.1", "react": "^18.3.1", "react-dom": "^18.3.1", - "swagger-ui-react": "^5.17.10" + "swagger-ui-react": "^5.17.12" }, "devDependencies": { "@types/react": "^18.3.2", @@ -52,9 +52,9 @@ } }, "node_modules/@braintree/sanitize-url": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/@braintree/sanitize-url/-/sanitize-url-7.0.1.tgz", - "integrity": "sha512-URg8UM6lfC9ZYqFipItRSxYJdgpU5d2Z4KnjsJ+rj6tgAmGme7E+PQNCiud8g0HDaZKMovu2qjfa0f5Ge0Vlsg==" + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/@braintree/sanitize-url/-/sanitize-url-7.0.2.tgz", + "integrity": "sha512-NVf/1YycDMs6+FxS0Tb/W8MjJRDQdXF+tBfDtZ5UZeiRUkTmwKc4vmYCKZTyymfJk1gnMsauvZSX/HiV9jOABw==" }, "node_modules/@codemirror/language": { "version": "6.0.0", @@ -1654,9 +1654,9 @@ ] }, "node_modules/@swagger-api/apidom-ast": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ast/-/apidom-ast-1.0.0-alpha.1.tgz", - "integrity": "sha512-yYkW8OmNbZ1S1U7NA+YiALNMef/4BcJlrZEBZ8Iyqh/Rmty66qFf9/ZIS6RJ5a5OPQdB9Xn7V7WxfYdkrhOyQQ==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ast/-/apidom-ast-1.0.0-alpha.3.tgz", + "integrity": "sha512-C2gPpPlfHXoOyFndgKWGrfUbtyv9fUIuRuUiWJ3X5JIt0cN7/6I+DqGKrolQrA4W3JwPxbtl5N0LixTEwFJRsg==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", "@swagger-api/apidom-error": "^1.0.0-alpha.1", @@ -1667,12 +1667,12 @@ } }, "node_modules/@swagger-api/apidom-core": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-core/-/apidom-core-1.0.0-alpha.1.tgz", - "integrity": "sha512-zPHqGEcdRvD/xfRlJi367GSZ9VXFv7hoh+Ohado5JU/sA8DtVZEiQ+Vfusk3WBIpvvSVezh5Hxyl6P1bTsCLKw==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-core/-/apidom-core-1.0.0-alpha.3.tgz", + "integrity": "sha512-kvACv+NXgMKp5oNdq5RVo7+1b2GVUSnOKRU+SafjnfUHqHgeQw5Fyf+W6iELIdmx0ZzTlQvPRyOLKC15e+rTBg==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-ast": "^1.0.0-alpha.1", + "@swagger-api/apidom-ast": "^1.0.0-alpha.3", "@swagger-api/apidom-error": "^1.0.0-alpha.1", "@types/ramda": "~0.30.0", "minim": "~0.23.8", @@ -1691,12 +1691,12 @@ } }, "node_modules/@swagger-api/apidom-json-pointer": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-json-pointer/-/apidom-json-pointer-1.0.0-alpha.1.tgz", - "integrity": "sha512-Ev8dVTWUCnlS/yOePe4PLz9NdVfyNQB2QGlvtv0zys1AOzoHvxI/xaJCnbroHmHrBkvkyDXwccY2h/LzkMBoVQ==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-json-pointer/-/apidom-json-pointer-1.0.0-alpha.3.tgz", + "integrity": "sha512-hqNVqjzxY2ZDdIvbffgmjgtOJOijEVy5zyMh5mU8Aq+p5NH4WO3W9oCTCTjIoMWQC/Sg9s1S14UddDU0owCLzQ==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-core": "^1.0.0-alpha.3", "@swagger-api/apidom-error": "^1.0.0-alpha.1", "@types/ramda": "~0.30.0", "ramda": "~0.30.0", @@ -1704,15 +1704,15 @@ } }, "node_modules/@swagger-api/apidom-ns-api-design-systems": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-api-design-systems/-/apidom-ns-api-design-systems-1.0.0-alpha.1.tgz", - "integrity": "sha512-bG16p1dY9WlNfSv4K5IUxILnl7GDiwp6Uoik8QGNpTbkSNW1Xky1DWyehmNUOG/P4A62E2aWuWO60WkJYHscSw==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-api-design-systems/-/apidom-ns-api-design-systems-1.0.0-alpha.3.tgz", + "integrity": "sha512-tDCmIwyLByn99sjhI8l9A2OMrN7M+W51E5gyvHmd1QORziol5Cp5bOUe6eh6VKVaXuKTWqxJc8H49+Kqq7+o5A==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-core": "^1.0.0-alpha.3", "@swagger-api/apidom-error": "^1.0.0-alpha.1", - "@swagger-api/apidom-ns-openapi-3-1": "^1.0.0-alpha.1", + "@swagger-api/apidom-ns-openapi-3-1": "^1.0.0-alpha.3", "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0", @@ -1720,14 +1720,14 @@ } }, "node_modules/@swagger-api/apidom-ns-asyncapi-2": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-asyncapi-2/-/apidom-ns-asyncapi-2-1.0.0-alpha.1.tgz", - "integrity": "sha512-oKp4jY24dKeKY+rVQ76q84zmlcKcBtW+sHT3qx3AC0XZlSQRhrsv2x5/9r/MQoov7LLuGH8T6kI+HPMNPCuzDg==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-asyncapi-2/-/apidom-ns-asyncapi-2-1.0.0-alpha.3.tgz", + "integrity": "sha512-H2biAFpzibnl0meeQIItTqnRW9R6icnkjrzdKzqtnHFMxZ5dLAAbwUTB3Ps5au0SnGvVb1/8Vf4UFJQh5Dhe/A==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^1.0.0-alpha.1", - "@swagger-api/apidom-ns-json-schema-draft-7": "^1.0.0-alpha.1", + "@swagger-api/apidom-core": "^1.0.0-alpha.3", + "@swagger-api/apidom-ns-json-schema-draft-7": "^1.0.0-alpha.3", "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0", @@ -1735,13 +1735,13 @@ } }, "node_modules/@swagger-api/apidom-ns-json-schema-draft-4": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-4/-/apidom-ns-json-schema-draft-4-1.0.0-alpha.1.tgz", - "integrity": "sha512-gKmmTnmf4DGSfI6543Ajcqzf+epVW8ufxLkIMiSC1gUES2N9ncIyZ7VF5WKx3duWYokQ0abSnsIlCBDRYjFEWQ==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-4/-/apidom-ns-json-schema-draft-4-1.0.0-alpha.3.tgz", + "integrity": "sha512-kt0xVFf7FZP0tzxqS9wciwzDNj1veIUVZH1reop8XjSnmnL3osL9UReQm7C/D9NHmU4rcqWnL4Oc4m+AbXOs5A==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-ast": "^1.0.0-alpha.1", - "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-ast": "^1.0.0-alpha.3", + "@swagger-api/apidom-core": "^1.0.0-alpha.3", "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0", @@ -1749,15 +1749,15 @@ } }, "node_modules/@swagger-api/apidom-ns-json-schema-draft-6": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-6/-/apidom-ns-json-schema-draft-6-1.0.0-alpha.1.tgz", - "integrity": "sha512-4QSFBuSJQokozbyvOPrcwV8fL/abBcY+QYaF7d5Ft87M/+9HtUKyfon6WSLbhAFpaP8ZLhwvJl1kHFXfA/HenA==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-6/-/apidom-ns-json-schema-draft-6-1.0.0-alpha.3.tgz", + "integrity": "sha512-aiaw91NCSxzAq0wZqe7z5PILK4dTei7YU6W8Xxu9g95yZ/jyLc9AEaNMPEYXUMxzn/fjbGdfHaOH0um7cLcdRw==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-core": "^1.0.0-alpha.3", "@swagger-api/apidom-error": "^1.0.0-alpha.1", - "@swagger-api/apidom-ns-json-schema-draft-4": "^1.0.0-alpha.1", + "@swagger-api/apidom-ns-json-schema-draft-4": "^1.0.0-alpha.3", "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0", @@ -1765,15 +1765,15 @@ } }, "node_modules/@swagger-api/apidom-ns-json-schema-draft-7": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-7/-/apidom-ns-json-schema-draft-7-1.0.0-alpha.1.tgz", - "integrity": "sha512-/s9N8a+ronGXsD7uLfvOijnO/qqO5GWM0RYbAol7p8noYWN5ELg8iJScwn7CqjObRZyjMxrRGBSDAK0SquJnMQ==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-7/-/apidom-ns-json-schema-draft-7-1.0.0-alpha.3.tgz", + "integrity": "sha512-ioq/fTnjGwouGiDs3luav2O0jAWRqDyf5RJbQNYqLfO4sBkDCRCDnWWzBMOmWV1cjwxqxSw+Eh8JsM3G0duNMA==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-core": "^1.0.0-alpha.3", "@swagger-api/apidom-error": "^1.0.0-alpha.1", - "@swagger-api/apidom-ns-json-schema-draft-6": "^1.0.0-alpha.1", + "@swagger-api/apidom-ns-json-schema-draft-6": "^1.0.0-alpha.3", "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0", @@ -1781,15 +1781,15 @@ } }, "node_modules/@swagger-api/apidom-ns-openapi-2": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-2/-/apidom-ns-openapi-2-1.0.0-alpha.1.tgz", - "integrity": "sha512-dUUFPf2LftBa/FSeRo2Me6HAJVziv0qHq5jX0jqFPTaTiIXaNHkO77W2a3308J5kdsejv7S/N8rbujdXFp+MoQ==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-2/-/apidom-ns-openapi-2-1.0.0-alpha.3.tgz", + "integrity": "sha512-SKXglSEFQESs4QAR1+39GSFzCz5Mt8Bw0on0oEFt8ltOsaLeHlZZaX0eKbJXr8c63Y0zbd5KgONadYBBLsuwJg==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-core": "^1.0.0-alpha.3", "@swagger-api/apidom-error": "^1.0.0-alpha.1", - "@swagger-api/apidom-ns-json-schema-draft-4": "^1.0.0-alpha.1", + "@swagger-api/apidom-ns-json-schema-draft-4": "^1.0.0-alpha.3", "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0", @@ -1797,14 +1797,14 @@ } }, "node_modules/@swagger-api/apidom-ns-openapi-3-0": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-3-0/-/apidom-ns-openapi-3-0-1.0.0-alpha.1.tgz", - "integrity": "sha512-Be1XDoy6YSyZWCuNDf5y6iYtOt40A/KWI57TEy+0Eao/SLbaupzTJmErtk96bf4/DhoRvlKiAgQkKt+9bqDZ9w==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-3-0/-/apidom-ns-openapi-3-0-1.0.0-alpha.3.tgz", + "integrity": "sha512-Km9FlwayXGAn0402D4YAeJWFO1kAhP/eCs9TgfhAL4Ak1NiQLSDIs+DfI3EAOgROBoUEE2v2Ki/P6yQAFuIgDg==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-core": "^1.0.0-alpha.3", "@swagger-api/apidom-error": "^1.0.0-alpha.1", - "@swagger-api/apidom-ns-json-schema-draft-4": "^1.0.0-alpha.1", + "@swagger-api/apidom-ns-json-schema-draft-4": "^1.0.0-alpha.3", "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0", @@ -1812,14 +1812,14 @@ } }, "node_modules/@swagger-api/apidom-ns-openapi-3-1": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-3-1/-/apidom-ns-openapi-3-1-1.0.0-alpha.1.tgz", - "integrity": "sha512-u87HFtYCtrqBthRp3y2a5YdCmiVTD7v8hv2hn6lGcUIjBB/1anqBejVbcWZa3tmknuUG+yIHROapq8HxKPkdcw==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-3-1/-/apidom-ns-openapi-3-1-1.0.0-alpha.3.tgz", + "integrity": "sha512-Cl0t+z+ylCPdqGpe8uJslh/DUj3d0oTwlZ0nxUQn1Gocaa5OodZqwyL5NiQj83PLec7MsfHnpfhSwaZIDd+3sA==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-ast": "^1.0.0-alpha.1", - "@swagger-api/apidom-core": "^1.0.0-alpha.1", - "@swagger-api/apidom-ns-openapi-3-0": "^1.0.0-alpha.1", + "@swagger-api/apidom-ast": "^1.0.0-alpha.3", + "@swagger-api/apidom-core": "^1.0.0-alpha.3", + "@swagger-api/apidom-ns-openapi-3-0": "^1.0.0-alpha.3", "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0", @@ -1827,14 +1827,14 @@ } }, "node_modules/@swagger-api/apidom-ns-workflows-1": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-workflows-1/-/apidom-ns-workflows-1-1.0.0-alpha.1.tgz", - "integrity": "sha512-zMSXjWKtmHk+zl/tS3m/PCDJGh6+Gr9revPtxA0OAYvhmKTVhLNX4H8WtP4J+EGAUyjZKB7gussJUodqNR25uQ==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-workflows-1/-/apidom-ns-workflows-1-1.0.0-alpha.3.tgz", + "integrity": "sha512-wdImblzbHOwjuchg15XdZXHhXxoOxWgxwf0NV0qgPEyvhuxvHwpL0tuAYvxiRllPFHPP48dI8aszbFjNOXk1kg==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^1.0.0-alpha.1", - "@swagger-api/apidom-ns-openapi-3-1": "^1.0.0-alpha.1", + "@swagger-api/apidom-core": "^1.0.0-alpha.3", + "@swagger-api/apidom-ns-openapi-3-1": "^1.0.0-alpha.3", "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0", @@ -1842,74 +1842,74 @@ } }, "node_modules/@swagger-api/apidom-parser-adapter-api-design-systems-json": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-api-design-systems-json/-/apidom-parser-adapter-api-design-systems-json-1.0.0-alpha.1.tgz", - "integrity": "sha512-JbLcDtB9o/fblyKfYKJ+F2jVdcTPAvdbv1094qk9GCPl1JnU7A9SpkZKDdIF1WyUnJmDATUnSsDEib8gRfeGZw==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-api-design-systems-json/-/apidom-parser-adapter-api-design-systems-json-1.0.0-alpha.3.tgz", + "integrity": "sha512-E7uQ+g+TxhE6/IM4sv/zD+341HwnWUNk/jqqDNJtPzjfNAUXw9kBOIrU08cJIIAH8wSr/jNwOFLySt2CsqE1eQ==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^1.0.0-alpha.1", - "@swagger-api/apidom-ns-api-design-systems": "^1.0.0-alpha.1", - "@swagger-api/apidom-parser-adapter-json": "^1.0.0-alpha.1", + "@swagger-api/apidom-core": "^1.0.0-alpha.3", + "@swagger-api/apidom-ns-api-design-systems": "^1.0.0-alpha.3", + "@swagger-api/apidom-parser-adapter-json": "^1.0.0-alpha.3", "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-api-design-systems-yaml": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-api-design-systems-yaml/-/apidom-parser-adapter-api-design-systems-yaml-1.0.0-alpha.1.tgz", - "integrity": "sha512-T24Dq4qa/cngkfxUZ6eWULHjEscLutUTO6ltxnKDvyBlxkKURYw8FGBWRn4TSAmk9iK+UVrVg1NoFudtpfN6cQ==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-api-design-systems-yaml/-/apidom-parser-adapter-api-design-systems-yaml-1.0.0-alpha.3.tgz", + "integrity": "sha512-Im83uYqh6vIa4xl7BCcgWlRphIyzo4UFU/BX7jE0XKCNeDPemgkRhxjjyx/vOi6ySZI81bAO1UuGb/joE3uA2w==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^1.0.0-alpha.1", - "@swagger-api/apidom-ns-api-design-systems": "^1.0.0-alpha.1", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^1.0.0-alpha.1", + "@swagger-api/apidom-core": "^1.0.0-alpha.3", + "@swagger-api/apidom-ns-api-design-systems": "^1.0.0-alpha.3", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^1.0.0-alpha.3", "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-asyncapi-json-2": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-asyncapi-json-2/-/apidom-parser-adapter-asyncapi-json-2-1.0.0-alpha.1.tgz", - "integrity": "sha512-gkpDw0+pf3B7MXxjk1nIo7WOKm/t9UvG4MGxDr4fB797v9Rt9fPDv2sMgsS/PFPqsa2zRhTrp2CPOKHCiOzW+A==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-asyncapi-json-2/-/apidom-parser-adapter-asyncapi-json-2-1.0.0-alpha.3.tgz", + "integrity": "sha512-tR8nILhc6n8gGyYXr88EaaQXs0gnOD2/2acQg1XS+WKWP4CX+/qsp7mUSC4DEkHrkbpWbbgBSvrK/Stfuzi6AQ==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^1.0.0-alpha.1", - "@swagger-api/apidom-ns-asyncapi-2": "^1.0.0-alpha.1", - "@swagger-api/apidom-parser-adapter-json": "^1.0.0-alpha.1", + "@swagger-api/apidom-core": "^1.0.0-alpha.3", + "@swagger-api/apidom-ns-asyncapi-2": "^1.0.0-alpha.3", + "@swagger-api/apidom-parser-adapter-json": "^1.0.0-alpha.3", "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-asyncapi-yaml-2": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-asyncapi-yaml-2/-/apidom-parser-adapter-asyncapi-yaml-2-1.0.0-alpha.1.tgz", - "integrity": "sha512-1N2gF6qympDdIXaoCvT+B9P9yghYuGOyAWF91sN5kQLd6VtBlZi+jTnCPLxAd+rtl3h5WIIQyNed6qC/C884Mg==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-asyncapi-yaml-2/-/apidom-parser-adapter-asyncapi-yaml-2-1.0.0-alpha.3.tgz", + "integrity": "sha512-6ffIwxLJTE24/6xhVR7F0DRPfrJOIIcXDa8Qfj3iGLE4sbI97WmXBaSwY0k55lwgndVjr1V2Iu4BTuGJpu74Ag==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^1.0.0-alpha.1", - "@swagger-api/apidom-ns-asyncapi-2": "^1.0.0-alpha.1", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^1.0.0-alpha.1", + "@swagger-api/apidom-core": "^1.0.0-alpha.3", + "@swagger-api/apidom-ns-asyncapi-2": "^1.0.0-alpha.3", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^1.0.0-alpha.3", "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-json": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-json/-/apidom-parser-adapter-json-1.0.0-alpha.1.tgz", - "integrity": "sha512-xNiA3OKGFdf8cHXsVfM2WdOhGDj838XjhXKjKEAbPK+LVe83/QUNRSSL0nxnr0Z6VNJG1J/5y/65Horf3fapow==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-json/-/apidom-parser-adapter-json-1.0.0-alpha.3.tgz", + "integrity": "sha512-GMGHsJzCI0x+40CMh9wQh+zft3T17CDts/ijYWpllPMZDqZ+1rejSvtwZ2/5vUoDATXue/lWBNnNR0DtxgC0EQ==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-ast": "^1.0.0-alpha.1", - "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-ast": "^1.0.0-alpha.3", + "@swagger-api/apidom-core": "^1.0.0-alpha.3", "@swagger-api/apidom-error": "^1.0.0-alpha.1", "@types/ramda": "~0.30.0", "ramda": "~0.30.0", @@ -1920,134 +1920,134 @@ } }, "node_modules/@swagger-api/apidom-parser-adapter-openapi-json-2": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-2/-/apidom-parser-adapter-openapi-json-2-1.0.0-alpha.1.tgz", - "integrity": "sha512-mbv+qcCRV6JpVOW2GEt0JjyeRD+lgbXmF5pcalDyr/+1Iuol8v3XLbwLHyEjR7FxMXj/DSjjfytnSKX1C+PwYA==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-2/-/apidom-parser-adapter-openapi-json-2-1.0.0-alpha.3.tgz", + "integrity": "sha512-RphzjaKtb9JwQPJjg014lreEyBNxEOddhTD7nsgPoloYFXglFk+zSoidMCh9J1v1WsirRksuvk0BNhnDupma5A==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^1.0.0-alpha.1", - "@swagger-api/apidom-ns-openapi-2": "^1.0.0-alpha.1", - "@swagger-api/apidom-parser-adapter-json": "^1.0.0-alpha.1", + "@swagger-api/apidom-core": "^1.0.0-alpha.3", + "@swagger-api/apidom-ns-openapi-2": "^1.0.0-alpha.3", + "@swagger-api/apidom-parser-adapter-json": "^1.0.0-alpha.3", "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-openapi-json-3-0": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-3-0/-/apidom-parser-adapter-openapi-json-3-0-1.0.0-alpha.1.tgz", - "integrity": "sha512-aJIANKczHzVKUNihytvfVJFpUGiATWsiKtMgLxShx+i3JeN/DfkRGOBM4346mldjcEcUBA2zS8UDGvDGRz6oVQ==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-3-0/-/apidom-parser-adapter-openapi-json-3-0-1.0.0-alpha.3.tgz", + "integrity": "sha512-zRLQr0XO99HiVzEEJvB1xYqYl6CaNv5O5y5scDg+I1Vlge3gpQOBaCOZdxkGibmHGSkDxV/pCyjN+YptUlnq3Q==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^1.0.0-alpha.1", - "@swagger-api/apidom-ns-openapi-3-0": "^1.0.0-alpha.1", - "@swagger-api/apidom-parser-adapter-json": "^1.0.0-alpha.1", + "@swagger-api/apidom-core": "^1.0.0-alpha.3", + "@swagger-api/apidom-ns-openapi-3-0": "^1.0.0-alpha.3", + "@swagger-api/apidom-parser-adapter-json": "^1.0.0-alpha.3", "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-openapi-json-3-1": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-3-1/-/apidom-parser-adapter-openapi-json-3-1-1.0.0-alpha.1.tgz", - "integrity": "sha512-QHcx9KltTmS0qEiLP2in391sQJDm7OYT9IFRH/Iy5mde2F7WNcQqY1D8o/YklDKvnkquHRIytDNx/IEjuYwwHQ==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-3-1/-/apidom-parser-adapter-openapi-json-3-1-1.0.0-alpha.3.tgz", + "integrity": "sha512-paky+VbtVg+tSWfH0Sa+ej0nOkBX/MmuI48i33hIBDP9opbh05e6Rn0rHRdVCx53XdBQNKGjzGYY50Q6cpwC9g==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^1.0.0-alpha.1", - "@swagger-api/apidom-ns-openapi-3-1": "^1.0.0-alpha.1", - "@swagger-api/apidom-parser-adapter-json": "^1.0.0-alpha.1", + "@swagger-api/apidom-core": "^1.0.0-alpha.3", + "@swagger-api/apidom-ns-openapi-3-1": "^1.0.0-alpha.3", + "@swagger-api/apidom-parser-adapter-json": "^1.0.0-alpha.3", "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-openapi-yaml-2": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-2/-/apidom-parser-adapter-openapi-yaml-2-1.0.0-alpha.1.tgz", - "integrity": "sha512-gGlz/DV+uENk2KI3YRUbkw93Co/K47vMUIW+jFJ9BuiHJZ34LnylMGtxR/J+4o+4L1WQa3o/czg7NrYG5Xe9pQ==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-2/-/apidom-parser-adapter-openapi-yaml-2-1.0.0-alpha.3.tgz", + "integrity": "sha512-2EkWW33Kv6xxsQTa6trSAtnrQHEkdLOrP4IapARXmcdzS02+NSUjE8wT2wfPsDxGAcv856cCmTvEg6Ea/sZWpQ==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^1.0.0-alpha.1", - "@swagger-api/apidom-ns-openapi-2": "^1.0.0-alpha.1", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^1.0.0-alpha.1", + "@swagger-api/apidom-core": "^1.0.0-alpha.3", + "@swagger-api/apidom-ns-openapi-2": "^1.0.0-alpha.3", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^1.0.0-alpha.3", "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-openapi-yaml-3-0": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-3-0/-/apidom-parser-adapter-openapi-yaml-3-0-1.0.0-alpha.1.tgz", - "integrity": "sha512-oOo6CybNEsTwxMsSUE9xjCoyw9H0MMMPGFxAatFbwxDlqyw32CvN3ydXXaQmu4TauhNDmplJLHtRlceKDzl7OQ==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-3-0/-/apidom-parser-adapter-openapi-yaml-3-0-1.0.0-alpha.3.tgz", + "integrity": "sha512-YKYEImUrp5/ta6X70qUj1NX0DY0Bx7dOqGIY/q/9FZHq3OCsfTHznlF8H4E8B6BAo+u4xyyekvuvql3shQp1hQ==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^1.0.0-alpha.1", - "@swagger-api/apidom-ns-openapi-3-0": "^1.0.0-alpha.1", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^1.0.0-alpha.1", + "@swagger-api/apidom-core": "^1.0.0-alpha.3", + "@swagger-api/apidom-ns-openapi-3-0": "^1.0.0-alpha.3", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^1.0.0-alpha.3", "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-openapi-yaml-3-1": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-3-1/-/apidom-parser-adapter-openapi-yaml-3-1-1.0.0-alpha.1.tgz", - "integrity": "sha512-ePkENiU3HlYNOULgghjQr47UeNo8hXfI+mH7Iw25XGC8VHwt5X4PpXO63kcNu1pLsdscpLPyzVCTz9J3CxMmxg==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-3-1/-/apidom-parser-adapter-openapi-yaml-3-1-1.0.0-alpha.3.tgz", + "integrity": "sha512-X0uotMmUkYWRBPxzpNNZ1288yt3Xt8uXgMrwm/hXAHlUxnRtrWgplPRtlaYzSu2LnRQIluJWYzj8wTmZtGGjBQ==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^1.0.0-alpha.1", - "@swagger-api/apidom-ns-openapi-3-1": "^1.0.0-alpha.1", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^1.0.0-alpha.1", + "@swagger-api/apidom-core": "^1.0.0-alpha.3", + "@swagger-api/apidom-ns-openapi-3-1": "^1.0.0-alpha.3", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^1.0.0-alpha.3", "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-workflows-json-1": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-workflows-json-1/-/apidom-parser-adapter-workflows-json-1-1.0.0-alpha.1.tgz", - "integrity": "sha512-kM2Vmu13eEi+7nZLvLuNF3frZV8nSibD790sqDbtSQOym+cxGB/+iQ9PMeEIddPka7l4to4vDM5HaLW6EKGKAQ==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-workflows-json-1/-/apidom-parser-adapter-workflows-json-1-1.0.0-alpha.3.tgz", + "integrity": "sha512-7zzCK9V0qf7Jd/wUPXSzqENInhbaCbtV+JDB+j6YvqGAN6BR8OjJnGxNQ/du+BUOtaQPEPjBwWXf7gMqmurtEQ==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^1.0.0-alpha.1", - "@swagger-api/apidom-ns-workflows-1": "^1.0.0-alpha.1", - "@swagger-api/apidom-parser-adapter-json": "^1.0.0-alpha.1", + "@swagger-api/apidom-core": "^1.0.0-alpha.3", + "@swagger-api/apidom-ns-workflows-1": "^1.0.0-alpha.3", + "@swagger-api/apidom-parser-adapter-json": "^1.0.0-alpha.3", "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-workflows-yaml-1": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-workflows-yaml-1/-/apidom-parser-adapter-workflows-yaml-1-1.0.0-alpha.1.tgz", - "integrity": "sha512-3arbAoEiEQzz+YaDP6KX768GA0d3CbF8TxtpCnW2U0T2n8qou/kqSYUW2a58EoFDUhEsYyrB6+adwP9H5TSfdA==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-workflows-yaml-1/-/apidom-parser-adapter-workflows-yaml-1-1.0.0-alpha.3.tgz", + "integrity": "sha512-5pIZ4sDFDaMC6LtdQgw0g2quS2dNY5A4M7XJHRpFg+7EmBBiljFsUlfNBfCY1ektdVEIkxp4smDHF9lfS1p7mg==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^1.0.0-alpha.1", - "@swagger-api/apidom-ns-workflows-1": "^1.0.0-alpha.1", - "@swagger-api/apidom-parser-adapter-yaml-1-2": "^1.0.0-alpha.1", + "@swagger-api/apidom-core": "^1.0.0-alpha.3", + "@swagger-api/apidom-ns-workflows-1": "^1.0.0-alpha.3", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^1.0.0-alpha.3", "@types/ramda": "~0.30.0", "ramda": "~0.30.0", "ramda-adjunct": "^5.0.0" } }, "node_modules/@swagger-api/apidom-parser-adapter-yaml-1-2": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-yaml-1-2/-/apidom-parser-adapter-yaml-1-2-1.0.0-alpha.1.tgz", - "integrity": "sha512-sDlW8XV4Q/MSJOr9aw9UwCoSAEyy4FFwi3IGqyLlgXWrh9ViaadwhCFLWxtn/stGyubXo29li+5taPuE3ETrqw==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-yaml-1-2/-/apidom-parser-adapter-yaml-1-2-1.0.0-alpha.3.tgz", + "integrity": "sha512-h5qFWSa3HPZxUBDYU8eFnTB+1fGF70kwqkHfhOkx7LDeFXIdip0j44mkQPSTDIy9i6F7Nn91AaXHTJJMeEEIZQ==", "optional": true, "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-ast": "^1.0.0-alpha.1", - "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-ast": "^1.0.0-alpha.3", + "@swagger-api/apidom-core": "^1.0.0-alpha.3", "@swagger-api/apidom-error": "^1.0.0-alpha.1", "@types/ramda": "~0.30.0", "ramda": "~0.30.0", @@ -2058,12 +2058,12 @@ } }, "node_modules/@swagger-api/apidom-reference": { - "version": "1.0.0-alpha.1", - "resolved": "https://registry.npmjs.org/@swagger-api/apidom-reference/-/apidom-reference-1.0.0-alpha.1.tgz", - "integrity": "sha512-iK8dyU3YsR23UuAHOlCB9OD9vKKsokyx0QGjYZpUP3EHu2gkTnn7m/NDuMpIC8MRHYlQNt42VKWZjQyC3z1nbw==", + "version": "1.0.0-alpha.3", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-reference/-/apidom-reference-1.0.0-alpha.3.tgz", + "integrity": "sha512-ir3QbAE8j7+9e0he381O0l3g4DkGDSKQELu6mKPC+W/w0SkG0gJQBqBq9KCltoAURwMpi94MoUWTFq1UfxgyIQ==", "dependencies": { "@babel/runtime-corejs3": "^7.20.7", - "@swagger-api/apidom-core": "^1.0.0-alpha.1", + "@swagger-api/apidom-core": "^1.0.0-alpha.3", "@types/ramda": "~0.30.0", "axios": "^1.4.0", "minimatch": "^7.4.3", @@ -2611,23 +2611,6 @@ "url": "https://opencollective.com/typescript-eslint" } }, - "node_modules/@typescript-eslint/scope-manager": { - "version": "7.8.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.8.0.tgz", - "integrity": "sha512-viEmZ1LmwsGcnr85gIq+FCYI7nO90DVbE37/ll51hjv9aG+YZMb4WDE2fyWpUR4O/UrhGRpYXK/XajcGTk2B8g==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.8.0", - "@typescript-eslint/visitor-keys": "7.8.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, "node_modules/@typescript-eslint/type-utils": { "version": "7.9.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-7.9.0.tgz", @@ -2713,47 +2696,6 @@ "url": "https://opencollective.com/typescript-eslint" } }, - "node_modules/@typescript-eslint/types": { - "version": "7.8.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.8.0.tgz", - "integrity": "sha512-wf0peJ+ZGlcH+2ZS23aJbOv+ztjeeP8uQ9GgwMJGVLx/Nj9CJt17GWgWWoSmoRVKAX2X+7fzEnAjxdvK2gqCLw==", - "dev": true, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/typescript-estree": { - "version": "7.8.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.8.0.tgz", - "integrity": "sha512-5pfUCOwK5yjPaJQNy44prjCwtr981dO8Qo9J9PwYXZ0MosgAbfEMB008dJ5sNo3+/BN6ytBPuSvXUg9SAqB0dg==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.8.0", - "@typescript-eslint/visitor-keys": "7.8.0", - "debug": "^4.3.4", - "globby": "^11.1.0", - "is-glob": "^4.0.3", - "minimatch": "^9.0.4", - "semver": "^7.6.0", - "ts-api-utils": "^1.3.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, "node_modules/@typescript-eslint/utils": { "version": "7.9.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-7.9.0.tgz", @@ -2851,23 +2793,6 @@ "url": "https://opencollective.com/typescript-eslint" } }, - "node_modules/@typescript-eslint/visitor-keys": { - "version": "7.8.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.8.0.tgz", - "integrity": "sha512-q4/gibTNBQNA0lGyYQCmWRS5D15n8rXh4QjK3KV+MBPlTYHpfBUT3D3PaPR/HeNiI9W6R7FvlkcGhNyAoP+caA==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.8.0", - "eslint-visitor-keys": "^3.4.3" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, "node_modules/@ungap/structured-clone": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", @@ -2991,9 +2916,9 @@ } }, "node_modules/axios": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/axios/-/axios-1.7.0.tgz", - "integrity": "sha512-IiB0wQeKyPRdsFVhBgIo31FbzOyf2M6wYl7/NVutFwFBRMiAbjNiydJIHKeLmPugF4kJLfA1uWZ82Is2QzqqFA==", + "version": "1.7.2", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.7.2.tgz", + "integrity": "sha512-2A8QhOMrbomlDuiLeK9XibIBzuHeRcqqNOHp0Cyp5EoJ1IFDh+XZH3A6BkXtv0K4gFGCI0Y4BM7B1wOEi0Rmgw==", "dependencies": { "follow-redirects": "^1.15.6", "form-data": "^4.0.0", @@ -3412,9 +3337,9 @@ } }, "node_modules/dompurify": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.1.2.tgz", - "integrity": "sha512-hLGGBI1tw5N8qTELr3blKjAML/LY4ANxksbS612UiJyDfyf/2D092Pvm+S7pmeTGJRqvlJkFzBoHBQKgQlOQVg==" + "version": "3.1.4", + "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.1.4.tgz", + "integrity": "sha512-2gnshi6OshmuKil8rMZuQCGiUF3cUxHY3NGDzUAdUx/NPEe5DVnO8BDoAQouvgwnx0R/+a6jUn36Z0FSdq8vww==" }, "node_modules/drange": { "version": "1.1.1", @@ -5859,16 +5784,16 @@ } }, "node_modules/swagger-client": { - "version": "3.28.0", - "resolved": "https://registry.npmjs.org/swagger-client/-/swagger-client-3.28.0.tgz", - "integrity": "sha512-uEi5wm30241FU4ngFJQzrHuWGPMgbbg6foGiAVuR8S8wS/Iwp2f7vbaTWJOpHwqlxedtg2WX/+PSb/BwVnw/Kw==", + "version": "3.28.1", + "resolved": "https://registry.npmjs.org/swagger-client/-/swagger-client-3.28.1.tgz", + "integrity": "sha512-tt3/54GTImgOLrjzl83FZ+koJ7Kq6uuyBNS7mTpZeUQsBi2a/4IvqPcfY2qKhf7CFrbv6lzPm+MmSudrxU8J5g==", "dependencies": { "@babel/runtime-corejs3": "^7.22.15", - "@swagger-api/apidom-core": ">=1.0.0-alpha.1 <1.0.0-beta.0", + "@swagger-api/apidom-core": ">=1.0.0-alpha.3 <1.0.0-beta.0", "@swagger-api/apidom-error": ">=1.0.0-alpha.1 <1.0.0-beta.0", - "@swagger-api/apidom-json-pointer": ">=1.0.0-alpha.1 <1.0.0-beta.0", - "@swagger-api/apidom-ns-openapi-3-1": ">=1.0.0-alpha.1 <1.0.0-beta.0", - "@swagger-api/apidom-reference": ">=1.0.0-alpha.1 <1.0.0-beta.0", + "@swagger-api/apidom-json-pointer": ">=1.0.0-alpha.3 <1.0.0-beta.0", + "@swagger-api/apidom-ns-openapi-3-1": ">=1.0.0-alpha.3 <1.0.0-beta.0", + "@swagger-api/apidom-reference": ">=1.0.0-alpha.3 <1.0.0-beta.0", "cookie": "~0.6.0", "deepmerge": "~4.3.0", "fast-json-patch": "^3.0.0-1", @@ -5891,17 +5816,17 @@ } }, "node_modules/swagger-ui-react": { - "version": "5.17.10", - "resolved": "https://registry.npmjs.org/swagger-ui-react/-/swagger-ui-react-5.17.10.tgz", - "integrity": "sha512-KQH0d3AB256RKCpOA7Zkc2cYIFLIt3nFGJSKIgAkI4PXxlFmru504GNDH0PgWFbitROS9TCki94yZs1a+sJ0Lg==", + "version": "5.17.12", + "resolved": "https://registry.npmjs.org/swagger-ui-react/-/swagger-ui-react-5.17.12.tgz", + "integrity": "sha512-qkDBOx9c3v1m8LyUgyc+Idz8UXLmz7RMDYX0Xlry0kwBQYxkw6AXfQ1bemgkna1sRQCvASmucdm2TYAdx6nlaA==", "dependencies": { "@babel/runtime-corejs3": "^7.24.5", - "@braintree/sanitize-url": "=7.0.1", + "@braintree/sanitize-url": "=7.0.2", "base64-js": "^1.5.1", "classnames": "^2.5.1", "css.escape": "1.5.1", "deep-extend": "0.6.0", - "dompurify": "=3.1.2", + "dompurify": "=3.1.4", "ieee754": "^1.2.1", "immutable": "^3.x.x", "js-file-download": "^0.4.12", @@ -5923,7 +5848,7 @@ "reselect": "^5.1.0", "serialize-error": "^8.1.0", "sha.js": "^2.4.11", - "swagger-client": "^3.27.9", + "swagger-client": "^3.28.1", "url-parse": "^1.5.10", "xml": "=1.0.1", "xml-but-prettier": "^1.0.1", diff --git a/playground/package.json b/playground/package.json index 0300c781e6..6dbfb27af3 100644 --- a/playground/package.json +++ b/playground/package.json @@ -14,7 +14,7 @@ "graphql": "^16.8.1", "react": "^18.3.1", "react-dom": "^18.3.1", - "swagger-ui-react": "^5.17.10" + "swagger-ui-react": "^5.17.12" }, "devDependencies": { "@types/react": "^18.3.2", From 945095a9d53ff37bf947f0e4339f51260a3f8296 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 May 2024 17:54:15 -0400 Subject: [PATCH 16/78] bot: Bump @typescript-eslint/eslint-plugin from 7.9.0 to 7.10.0 in /playground (#2637) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [@typescript-eslint/eslint-plugin](https://github.com/typescript-eslint/typescript-eslint/tree/HEAD/packages/eslint-plugin) from 7.9.0 to 7.10.0.
Release notes

Sourced from @​typescript-eslint/eslint-plugin's releases.

v7.10.0

7.10.0 (2024-05-20)

🚀 Features

  • eslint-plugin: [sort-type-constituents] support case sensitive sorting (#8760)

🩹 Fixes

  • eslint-plugin: [prefer-regexp-exec] fix heuristic to check whether regex may contain global flag (#8764)
  • typescript-estree: don't add in-project files to defaultProjectMatchedFiles (#9097)
  • utils: remove function form type from flat config files and ignores (#9111)

❤️ Thank You

You can read about our versioning strategy and releases on our website.

Changelog

Sourced from @​typescript-eslint/eslint-plugin's changelog.

7.10.0 (2024-05-20)

🚀 Features

  • eslint-plugin: [sort-type-constituents] support case sensitive sorting

🩹 Fixes

  • eslint-plugin: [prefer-regexp-exec] fix heuristic to check whether regex may contain global flag

❤️ Thank You

  • auvred
  • Emanuel Hoogeveen
  • jsfm01
  • Kirk Waiblinger

You can read about our versioning strategy and releases on our website.

Commits
  • c18226e chore(release): publish 7.10.0
  • 8d92ba8 docs: [no-floating-promises] fix capitalization typo (#9118)
  • d951d83 fix(eslint-plugin): [prefer-regexp-exec] fix heuristic to check whether regex...
  • 987a96e feat(eslint-plugin): [sort-type-constituents] support case sensitive sorting ...
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=@typescript-eslint/eslint-plugin&package-manager=npm_and_yarn&previous-version=7.9.0&new-version=7.10.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- playground/package-lock.json | 207 +++++++++-------------------------- playground/package.json | 2 +- 2 files changed, 52 insertions(+), 157 deletions(-) diff --git a/playground/package-lock.json b/playground/package-lock.json index 25aa72a5f4..d60b669d28 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -18,7 +18,7 @@ "@types/react": "^18.3.2", "@types/react-dom": "^18.3.0", "@types/swagger-ui-react": "^4.18.3", - "@typescript-eslint/eslint-plugin": "^7.9.0", + "@typescript-eslint/eslint-plugin": "^7.10.0", "@typescript-eslint/parser": "^7.9.0", "@vitejs/plugin-react-swc": "^3.6.0", "eslint": "^8.57.0", @@ -2429,16 +2429,16 @@ "integrity": "sha512-EwmlvuaxPNej9+T4v5AuBPJa2x2UOJVdjCtDHgcDqitUeOtjnJKJ+apYjVcAoBEMjKW1VVFGZLUb5+qqa09XFA==" }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-7.9.0.tgz", - "integrity": "sha512-6e+X0X3sFe/G/54aC3jt0txuMTURqLyekmEHViqyA2VnxhLMpvA6nqmcjIy+Cr9tLDHPssA74BP5Mx9HQIxBEA==", + "version": "7.10.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-7.10.0.tgz", + "integrity": "sha512-PzCr+a/KAef5ZawX7nbyNwBDtM1HdLIT53aSA2DDlxmxMngZ43O8SIePOeX8H5S+FHXeI6t97mTt/dDdzY4Fyw==", "dev": true, "dependencies": { "@eslint-community/regexpp": "^4.10.0", - "@typescript-eslint/scope-manager": "7.9.0", - "@typescript-eslint/type-utils": "7.9.0", - "@typescript-eslint/utils": "7.9.0", - "@typescript-eslint/visitor-keys": "7.9.0", + "@typescript-eslint/scope-manager": "7.10.0", + "@typescript-eslint/type-utils": "7.10.0", + "@typescript-eslint/utils": "7.10.0", + "@typescript-eslint/visitor-keys": "7.10.0", "graphemer": "^1.4.0", "ignore": "^5.3.1", "natural-compare": "^1.4.0", @@ -2461,53 +2461,6 @@ } } }, - "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/scope-manager": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.9.0.tgz", - "integrity": "sha512-ZwPK4DeCDxr3GJltRz5iZejPFAAr4Wk3+2WIBaj1L5PYK5RgxExu/Y68FFVclN0y6GGwH8q+KgKRCvaTmFBbgQ==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.9.0", - "@typescript-eslint/visitor-keys": "7.9.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/types": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.9.0.tgz", - "integrity": "sha512-oZQD9HEWQanl9UfsbGVcZ2cGaR0YT5476xfWE0oE5kQa2sNK2frxOlkeacLOTh9po4AlUT5rtkGyYM5kew0z5w==", - "dev": true, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/visitor-keys": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.9.0.tgz", - "integrity": "sha512-iESPx2TNLDNGQLyjKhUvIKprlP49XNEK+MvIf9nIO7ZZaZdbnfWKHnXAgufpxqfA0YryH8XToi4+CjBgVnFTSQ==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.9.0", - "eslint-visitor-keys": "^3.4.3" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, "node_modules/@typescript-eslint/parser": { "version": "7.9.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-7.9.0.tgz", @@ -2611,14 +2564,31 @@ "url": "https://opencollective.com/typescript-eslint" } }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "7.10.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.10.0.tgz", + "integrity": "sha512-7L01/K8W/VGl7noe2mgH0K7BE29Sq6KAbVmxurj8GGaPDZXPr8EEQ2seOeAS+mEV9DnzxBQB6ax6qQQ5C6P4xg==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.10.0", + "@typescript-eslint/visitor-keys": "7.10.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, "node_modules/@typescript-eslint/type-utils": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-7.9.0.tgz", - "integrity": "sha512-6Qy8dfut0PFrFRAZsGzuLoM4hre4gjzWJB6sUvdunCYZsYemTkzZNwF1rnGea326PHPT3zn5Lmg32M/xfJfByA==", + "version": "7.10.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-7.10.0.tgz", + "integrity": "sha512-D7tS4WDkJWrVkuzgm90qYw9RdgBcrWmbbRkrLA4d7Pg3w0ttVGDsvYGV19SH8gPR5L7OtcN5J1hTtyenO9xE9g==", "dev": true, "dependencies": { - "@typescript-eslint/typescript-estree": "7.9.0", - "@typescript-eslint/utils": "7.9.0", + "@typescript-eslint/typescript-estree": "7.10.0", + "@typescript-eslint/utils": "7.10.0", "debug": "^4.3.4", "ts-api-utils": "^1.3.0" }, @@ -2638,10 +2608,10 @@ } } }, - "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/types": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.9.0.tgz", - "integrity": "sha512-oZQD9HEWQanl9UfsbGVcZ2cGaR0YT5476xfWE0oE5kQa2sNK2frxOlkeacLOTh9po4AlUT5rtkGyYM5kew0z5w==", + "node_modules/@typescript-eslint/types": { + "version": "7.10.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.10.0.tgz", + "integrity": "sha512-7fNj+Ya35aNyhuqrA1E/VayQX9Elwr8NKZ4WueClR3KwJ7Xx9jcCdOrLW04h51de/+gNbyFMs+IDxh5xIwfbNg==", "dev": true, "engines": { "node": "^18.18.0 || >=20.0.0" @@ -2651,14 +2621,14 @@ "url": "https://opencollective.com/typescript-eslint" } }, - "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/typescript-estree": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.9.0.tgz", - "integrity": "sha512-zBCMCkrb2YjpKV3LA0ZJubtKCDxLttxfdGmwZvTqqWevUPN0FZvSI26FalGFFUZU/9YQK/A4xcQF9o/VVaCKAg==", + "node_modules/@typescript-eslint/typescript-estree": { + "version": "7.10.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.10.0.tgz", + "integrity": "sha512-LXFnQJjL9XIcxeVfqmNj60YhatpRLt6UhdlFwAkjNc6jSUlK8zQOl1oktAP8PlWFzPQC1jny/8Bai3/HPuvN5g==", "dev": true, "dependencies": { - "@typescript-eslint/types": "7.9.0", - "@typescript-eslint/visitor-keys": "7.9.0", + "@typescript-eslint/types": "7.10.0", + "@typescript-eslint/visitor-keys": "7.10.0", "debug": "^4.3.4", "globby": "^11.1.0", "is-glob": "^4.0.3", @@ -2679,33 +2649,16 @@ } } }, - "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/visitor-keys": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.9.0.tgz", - "integrity": "sha512-iESPx2TNLDNGQLyjKhUvIKprlP49XNEK+MvIf9nIO7ZZaZdbnfWKHnXAgufpxqfA0YryH8XToi4+CjBgVnFTSQ==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.9.0", - "eslint-visitor-keys": "^3.4.3" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, "node_modules/@typescript-eslint/utils": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-7.9.0.tgz", - "integrity": "sha512-5KVRQCzZajmT4Ep+NEgjXCvjuypVvYHUW7RHlXzNPuak2oWpVoD1jf5xCP0dPAuNIchjC7uQyvbdaSTFaLqSdA==", + "version": "7.10.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-7.10.0.tgz", + "integrity": "sha512-olzif1Fuo8R8m/qKkzJqT7qwy16CzPRWBvERS0uvyc+DHd8AKbO4Jb7kpAvVzMmZm8TrHnI7hvjN4I05zow+tg==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.4.0", - "@typescript-eslint/scope-manager": "7.9.0", - "@typescript-eslint/types": "7.9.0", - "@typescript-eslint/typescript-estree": "7.9.0" + "@typescript-eslint/scope-manager": "7.10.0", + "@typescript-eslint/types": "7.10.0", + "@typescript-eslint/typescript-estree": "7.10.0" }, "engines": { "node": "^18.18.0 || >=20.0.0" @@ -2718,71 +2671,13 @@ "eslint": "^8.56.0" } }, - "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/scope-manager": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.9.0.tgz", - "integrity": "sha512-ZwPK4DeCDxr3GJltRz5iZejPFAAr4Wk3+2WIBaj1L5PYK5RgxExu/Y68FFVclN0y6GGwH8q+KgKRCvaTmFBbgQ==", + "node_modules/@typescript-eslint/visitor-keys": { + "version": "7.10.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.10.0.tgz", + "integrity": "sha512-9ntIVgsi6gg6FIq9xjEO4VQJvwOqA3jaBFQJ/6TK5AvEup2+cECI6Fh7QiBxmfMHXU0V0J4RyPeOU1VDNzl9cg==", "dev": true, "dependencies": { - "@typescript-eslint/types": "7.9.0", - "@typescript-eslint/visitor-keys": "7.9.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/types": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.9.0.tgz", - "integrity": "sha512-oZQD9HEWQanl9UfsbGVcZ2cGaR0YT5476xfWE0oE5kQa2sNK2frxOlkeacLOTh9po4AlUT5rtkGyYM5kew0z5w==", - "dev": true, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/typescript-estree": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.9.0.tgz", - "integrity": "sha512-zBCMCkrb2YjpKV3LA0ZJubtKCDxLttxfdGmwZvTqqWevUPN0FZvSI26FalGFFUZU/9YQK/A4xcQF9o/VVaCKAg==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.9.0", - "@typescript-eslint/visitor-keys": "7.9.0", - "debug": "^4.3.4", - "globby": "^11.1.0", - "is-glob": "^4.0.3", - "minimatch": "^9.0.4", - "semver": "^7.6.0", - "ts-api-utils": "^1.3.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/visitor-keys": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.9.0.tgz", - "integrity": "sha512-iESPx2TNLDNGQLyjKhUvIKprlP49XNEK+MvIf9nIO7ZZaZdbnfWKHnXAgufpxqfA0YryH8XToi4+CjBgVnFTSQ==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.9.0", + "@typescript-eslint/types": "7.10.0", "eslint-visitor-keys": "^3.4.3" }, "engines": { diff --git a/playground/package.json b/playground/package.json index 6dbfb27af3..8a3e135066 100644 --- a/playground/package.json +++ b/playground/package.json @@ -20,7 +20,7 @@ "@types/react": "^18.3.2", "@types/react-dom": "^18.3.0", "@types/swagger-ui-react": "^4.18.3", - "@typescript-eslint/eslint-plugin": "^7.9.0", + "@typescript-eslint/eslint-plugin": "^7.10.0", "@typescript-eslint/parser": "^7.9.0", "@vitejs/plugin-react-swc": "^3.6.0", "eslint": "^8.57.0", From ba30d9b761643b1c80f6cf00433689fd36b33cc0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 23 May 2024 07:19:01 -0400 Subject: [PATCH 17/78] bot: Bump @typescript-eslint/parser from 7.9.0 to 7.10.0 in /playground (#2635) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [@typescript-eslint/parser](https://github.com/typescript-eslint/typescript-eslint/tree/HEAD/packages/parser) from 7.9.0 to 7.10.0.
Release notes

Sourced from @​typescript-eslint/parser's releases.

v7.10.0

7.10.0 (2024-05-20)

🚀 Features

  • eslint-plugin: [sort-type-constituents] support case sensitive sorting (#8760)

🩹 Fixes

  • eslint-plugin: [prefer-regexp-exec] fix heuristic to check whether regex may contain global flag (#8764)
  • typescript-estree: don't add in-project files to defaultProjectMatchedFiles (#9097)
  • utils: remove function form type from flat config files and ignores (#9111)

❤️ Thank You

You can read about our versioning strategy and releases on our website.

Changelog

Sourced from @​typescript-eslint/parser's changelog.

7.10.0 (2024-05-20)

This was a version bump only for parser to align it with other projects, there were no code changes.

You can read about our versioning strategy and releases on our website.

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=@typescript-eslint/parser&package-manager=npm_and_yarn&previous-version=7.9.0&new-version=7.10.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- playground/package-lock.json | 91 ++++-------------------------------- playground/package.json | 2 +- 2 files changed, 9 insertions(+), 84 deletions(-) diff --git a/playground/package-lock.json b/playground/package-lock.json index d60b669d28..d8a5fcccac 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -19,7 +19,7 @@ "@types/react-dom": "^18.3.0", "@types/swagger-ui-react": "^4.18.3", "@typescript-eslint/eslint-plugin": "^7.10.0", - "@typescript-eslint/parser": "^7.9.0", + "@typescript-eslint/parser": "^7.10.0", "@vitejs/plugin-react-swc": "^3.6.0", "eslint": "^8.57.0", "eslint-plugin-react-hooks": "^4.6.2", @@ -2462,15 +2462,15 @@ } }, "node_modules/@typescript-eslint/parser": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-7.9.0.tgz", - "integrity": "sha512-qHMJfkL5qvgQB2aLvhUSXxbK7OLnDkwPzFalg458pxQgfxKDfT1ZDbHQM/I6mDIf/svlMkj21kzKuQ2ixJlatQ==", + "version": "7.10.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-7.10.0.tgz", + "integrity": "sha512-2EjZMA0LUW5V5tGQiaa2Gys+nKdfrn2xiTIBLR4fxmPmVSvgPcKNW+AE/ln9k0A4zDUti0J/GZXMDupQoI+e1w==", "dev": true, "dependencies": { - "@typescript-eslint/scope-manager": "7.9.0", - "@typescript-eslint/types": "7.9.0", - "@typescript-eslint/typescript-estree": "7.9.0", - "@typescript-eslint/visitor-keys": "7.9.0", + "@typescript-eslint/scope-manager": "7.10.0", + "@typescript-eslint/types": "7.10.0", + "@typescript-eslint/typescript-estree": "7.10.0", + "@typescript-eslint/visitor-keys": "7.10.0", "debug": "^4.3.4" }, "engines": { @@ -2489,81 +2489,6 @@ } } }, - "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/scope-manager": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.9.0.tgz", - "integrity": "sha512-ZwPK4DeCDxr3GJltRz5iZejPFAAr4Wk3+2WIBaj1L5PYK5RgxExu/Y68FFVclN0y6GGwH8q+KgKRCvaTmFBbgQ==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.9.0", - "@typescript-eslint/visitor-keys": "7.9.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/types": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.9.0.tgz", - "integrity": "sha512-oZQD9HEWQanl9UfsbGVcZ2cGaR0YT5476xfWE0oE5kQa2sNK2frxOlkeacLOTh9po4AlUT5rtkGyYM5kew0z5w==", - "dev": true, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/typescript-estree": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.9.0.tgz", - "integrity": "sha512-zBCMCkrb2YjpKV3LA0ZJubtKCDxLttxfdGmwZvTqqWevUPN0FZvSI26FalGFFUZU/9YQK/A4xcQF9o/VVaCKAg==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.9.0", - "@typescript-eslint/visitor-keys": "7.9.0", - "debug": "^4.3.4", - "globby": "^11.1.0", - "is-glob": "^4.0.3", - "minimatch": "^9.0.4", - "semver": "^7.6.0", - "ts-api-utils": "^1.3.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/visitor-keys": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.9.0.tgz", - "integrity": "sha512-iESPx2TNLDNGQLyjKhUvIKprlP49XNEK+MvIf9nIO7ZZaZdbnfWKHnXAgufpxqfA0YryH8XToi4+CjBgVnFTSQ==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.9.0", - "eslint-visitor-keys": "^3.4.3" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, "node_modules/@typescript-eslint/scope-manager": { "version": "7.10.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.10.0.tgz", diff --git a/playground/package.json b/playground/package.json index 8a3e135066..41ba60950f 100644 --- a/playground/package.json +++ b/playground/package.json @@ -21,7 +21,7 @@ "@types/react-dom": "^18.3.0", "@types/swagger-ui-react": "^4.18.3", "@typescript-eslint/eslint-plugin": "^7.10.0", - "@typescript-eslint/parser": "^7.9.0", + "@typescript-eslint/parser": "^7.10.0", "@vitejs/plugin-react-swc": "^3.6.0", "eslint": "^8.57.0", "eslint-plugin-react-hooks": "^4.6.2", From 3c8871ae6500aedffd4b382bb6631b2544577f40 Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Thu, 23 May 2024 14:27:31 -0400 Subject: [PATCH 18/78] test(i): Don't setup http and p2p by default in test suite (#2644) ## Relevant issue(s) Resolves #2643 ## Description Don't setup http and p2p by default in test suite. This is a regression I introduced in https://github.com/sourcenetwork/defradb/pull/2641 (sorry for the bother) - we set p2p and http up later. As the new, unused, P2P and http servers werent cleaned up at the end of the test, this had quite a bad effect on test suite memory consumption. Later, I think the surrounding code that sets up p2p and http could probably be changed later to make better use of `node.New`, but IMO this is good enough for now (we have other things to do). --- tests/integration/db.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tests/integration/db.go b/tests/integration/db.go index 329fdeffdd..d8dedbc07c 100644 --- a/tests/integration/db.go +++ b/tests/integration/db.go @@ -109,7 +109,14 @@ func setupDatabase(s *state) (client.DB, string, error) { } storeOpts := []node.StoreOpt{} acpOpts := []node.ACPOpt{} - opts := []node.NodeOpt{} + opts := []node.NodeOpt{ + // The test framework sets this up elsewhere when required so that it may be wrapped + // into a [client.DB]. + node.WithDisableAPI(true), + // The p2p is configured in the tests by [ConfigureNode] actions, we disable it here + // to keep the tests as lightweight as possible. + node.WithDisableP2P(true), + } if badgerEncryption && encryptionKey == nil { key, err := crypto.GenerateAES256() From dbc82191396dc66e76c4d2fa191ab3e769948e3b Mon Sep 17 00:00:00 2001 From: Shahzad Lone Date: Thu, 23 May 2024 15:46:54 -0400 Subject: [PATCH 19/78] docs: Streamline cli documentation (#2646) ## Relevant issue(s) Resolves #2166 Resovles #2645 ## Description - Delete old cli dir - Update make command to generate in the new website dir - Actually update the documents in the new directory. ## How has this been tested? `make docs:cli` Specify the platform(s) on which this was tested: - WSL2 --- Makefile | 3 +- docs/cli/defradb.md | 38 ------------ docs/cli/defradb_client.md | 49 --------------- docs/cli/defradb_client_dump.md | 38 ------------ docs/cli/defradb_client_query.md | 60 ------------------- docs/cli/defradb_client_schema.md | 43 ------------- docs/cli/defradb_client_schema_add.md | 59 ------------------ docs/cli/defradb_client_schema_patch.md | 58 ------------------ docs/cli/defradb_server-dump.md | 36 ----------- docs/cli/defradb_start.md | 49 --------------- docs/cli/defradb_version.md | 38 ------------ docs/website/references/cli/defradb.md | 40 +++++++------ docs/website/references/cli/defradb_client.md | 56 ++++++++++------- .../references}/cli/defradb_client_acp.md | 0 .../cli/defradb_client_acp_policy.md | 0 .../cli/defradb_client_acp_policy_add.md | 0 .../references}/cli/defradb_client_backup.md | 0 .../cli/defradb_client_backup_export.md | 0 .../cli/defradb_client_backup_import.md | 0 .../references/cli/defradb_client_blocks.md | 28 --------- .../cli/defradb_client_blocks_get.md | 31 ---------- .../cli/defradb_client_collection.md | 0 .../cli/defradb_client_collection_create.md | 0 .../cli/defradb_client_collection_delete.md | 0 .../cli/defradb_client_collection_describe.md | 0 .../cli/defradb_client_collection_docIDs.md | 0 .../cli/defradb_client_collection_get.md | 0 .../cli/defradb_client_collection_patch.md | 0 .../cli/defradb_client_collection_update.md | 0 .../references/cli/defradb_client_dump.md | 35 ++++++----- .../references}/cli/defradb_client_index.md | 0 .../cli/defradb_client_index_create.md | 0 .../cli/defradb_client_index_drop.md | 0 .../cli/defradb_client_index_list.md | 0 .../references}/cli/defradb_client_p2p.md | 0 .../cli/defradb_client_p2p_collection.md | 0 .../cli/defradb_client_p2p_collection_add.md | 0 .../defradb_client_p2p_collection_getall.md | 0 .../defradb_client_p2p_collection_remove.md | 0 .../cli/defradb_client_p2p_info.md | 0 .../cli/defradb_client_p2p_replicator.md | 0 .../defradb_client_p2p_replicator_delete.md | 0 .../defradb_client_p2p_replicator_getall.md | 0 .../cli/defradb_client_p2p_replicator_set.md | 0 .../references/cli/defradb_client_peerid.md | 31 ---------- .../references/cli/defradb_client_ping.md | 31 ---------- .../references/cli/defradb_client_query.md | 50 ++++++++++------ .../references/cli/defradb_client_rpc.md | 34 ----------- .../cli/defradb_client_rpc_addreplicator.md | 37 ------------ .../cli/defradb_client_rpc_p2pcollection.md | 35 ----------- .../defradb_client_rpc_p2pcollection_add.md | 36 ----------- ...defradb_client_rpc_p2pcollection_getall.md | 36 ----------- ...defradb_client_rpc_p2pcollection_remove.md | 36 ----------- .../cli/defradb_client_rpc_replicator.md | 37 ------------ .../defradb_client_rpc_replicator_delete.md | 37 ------------ .../defradb_client_rpc_replicator_getall.md | 37 ------------ .../cli/defradb_client_rpc_replicator_set.md | 39 ------------ .../references/cli/defradb_client_schema.md | 44 ++++++++------ .../cli/defradb_client_schema_add.md | 46 ++++++++------ .../cli/defradb_client_schema_describe.md | 0 .../cli/defradb_client_schema_migration.md | 0 .../defradb_client_schema_migration_down.md | 0 .../defradb_client_schema_migration_reload.md | 0 ...db_client_schema_migration_set-registry.md | 0 .../defradb_client_schema_migration_set.md | 0 .../cli/defradb_client_schema_migration_up.md | 0 .../cli/defradb_client_schema_patch.md | 49 ++++++++------- .../cli/defradb_client_schema_set-active.md | 0 .../references}/cli/defradb_client_tx.md | 0 .../cli/defradb_client_tx_commit.md | 0 .../cli/defradb_client_tx_create.md | 0 .../cli/defradb_client_tx_discard.md | 0 .../references}/cli/defradb_client_view.md | 0 .../cli/defradb_client_view_add.md | 0 docs/website/references/cli/defradb_init.md | 36 ----------- .../references}/cli/defradb_keyring.md | 0 .../references}/cli/defradb_keyring_export.md | 0 .../cli/defradb_keyring_generate.md | 0 .../references}/cli/defradb_keyring_import.md | 0 .../references/cli/defradb_server-dump.md | 32 +++++----- docs/website/references/cli/defradb_start.md | 55 +++++++++-------- .../website/references/cli/defradb_version.md | 29 +++++---- 82 files changed, 258 insertions(+), 1170 deletions(-) delete mode 100644 docs/cli/defradb.md delete mode 100644 docs/cli/defradb_client.md delete mode 100644 docs/cli/defradb_client_dump.md delete mode 100644 docs/cli/defradb_client_query.md delete mode 100644 docs/cli/defradb_client_schema.md delete mode 100644 docs/cli/defradb_client_schema_add.md delete mode 100644 docs/cli/defradb_client_schema_patch.md delete mode 100644 docs/cli/defradb_server-dump.md delete mode 100644 docs/cli/defradb_start.md delete mode 100644 docs/cli/defradb_version.md rename docs/{ => website/references}/cli/defradb_client_acp.md (100%) rename docs/{ => website/references}/cli/defradb_client_acp_policy.md (100%) rename docs/{ => website/references}/cli/defradb_client_acp_policy_add.md (100%) rename docs/{ => website/references}/cli/defradb_client_backup.md (100%) rename docs/{ => website/references}/cli/defradb_client_backup_export.md (100%) rename docs/{ => website/references}/cli/defradb_client_backup_import.md (100%) delete mode 100644 docs/website/references/cli/defradb_client_blocks.md delete mode 100644 docs/website/references/cli/defradb_client_blocks_get.md rename docs/{ => website/references}/cli/defradb_client_collection.md (100%) rename docs/{ => website/references}/cli/defradb_client_collection_create.md (100%) rename docs/{ => website/references}/cli/defradb_client_collection_delete.md (100%) rename docs/{ => website/references}/cli/defradb_client_collection_describe.md (100%) rename docs/{ => website/references}/cli/defradb_client_collection_docIDs.md (100%) rename docs/{ => website/references}/cli/defradb_client_collection_get.md (100%) rename docs/{ => website/references}/cli/defradb_client_collection_patch.md (100%) rename docs/{ => website/references}/cli/defradb_client_collection_update.md (100%) rename docs/{ => website/references}/cli/defradb_client_index.md (100%) rename docs/{ => website/references}/cli/defradb_client_index_create.md (100%) rename docs/{ => website/references}/cli/defradb_client_index_drop.md (100%) rename docs/{ => website/references}/cli/defradb_client_index_list.md (100%) rename docs/{ => website/references}/cli/defradb_client_p2p.md (100%) rename docs/{ => website/references}/cli/defradb_client_p2p_collection.md (100%) rename docs/{ => website/references}/cli/defradb_client_p2p_collection_add.md (100%) rename docs/{ => website/references}/cli/defradb_client_p2p_collection_getall.md (100%) rename docs/{ => website/references}/cli/defradb_client_p2p_collection_remove.md (100%) rename docs/{ => website/references}/cli/defradb_client_p2p_info.md (100%) rename docs/{ => website/references}/cli/defradb_client_p2p_replicator.md (100%) rename docs/{ => website/references}/cli/defradb_client_p2p_replicator_delete.md (100%) rename docs/{ => website/references}/cli/defradb_client_p2p_replicator_getall.md (100%) rename docs/{ => website/references}/cli/defradb_client_p2p_replicator_set.md (100%) delete mode 100644 docs/website/references/cli/defradb_client_peerid.md delete mode 100644 docs/website/references/cli/defradb_client_ping.md delete mode 100644 docs/website/references/cli/defradb_client_rpc.md delete mode 100644 docs/website/references/cli/defradb_client_rpc_addreplicator.md delete mode 100644 docs/website/references/cli/defradb_client_rpc_p2pcollection.md delete mode 100644 docs/website/references/cli/defradb_client_rpc_p2pcollection_add.md delete mode 100644 docs/website/references/cli/defradb_client_rpc_p2pcollection_getall.md delete mode 100644 docs/website/references/cli/defradb_client_rpc_p2pcollection_remove.md delete mode 100644 docs/website/references/cli/defradb_client_rpc_replicator.md delete mode 100644 docs/website/references/cli/defradb_client_rpc_replicator_delete.md delete mode 100644 docs/website/references/cli/defradb_client_rpc_replicator_getall.md delete mode 100644 docs/website/references/cli/defradb_client_rpc_replicator_set.md rename docs/{ => website/references}/cli/defradb_client_schema_describe.md (100%) rename docs/{ => website/references}/cli/defradb_client_schema_migration.md (100%) rename docs/{ => website/references}/cli/defradb_client_schema_migration_down.md (100%) rename docs/{ => website/references}/cli/defradb_client_schema_migration_reload.md (100%) rename docs/{ => website/references}/cli/defradb_client_schema_migration_set-registry.md (100%) rename docs/{ => website/references}/cli/defradb_client_schema_migration_set.md (100%) rename docs/{ => website/references}/cli/defradb_client_schema_migration_up.md (100%) rename docs/{ => website/references}/cli/defradb_client_schema_set-active.md (100%) rename docs/{ => website/references}/cli/defradb_client_tx.md (100%) rename docs/{ => website/references}/cli/defradb_client_tx_commit.md (100%) rename docs/{ => website/references}/cli/defradb_client_tx_create.md (100%) rename docs/{ => website/references}/cli/defradb_client_tx_discard.md (100%) rename docs/{ => website/references}/cli/defradb_client_view.md (100%) rename docs/{ => website/references}/cli/defradb_client_view_add.md (100%) delete mode 100644 docs/website/references/cli/defradb_init.md rename docs/{ => website/references}/cli/defradb_keyring.md (100%) rename docs/{ => website/references}/cli/defradb_keyring_export.md (100%) rename docs/{ => website/references}/cli/defradb_keyring_generate.md (100%) rename docs/{ => website/references}/cli/defradb_keyring_import.md (100%) diff --git a/Makefile b/Makefile index 658b514a4b..bc4093ddbf 100644 --- a/Makefile +++ b/Makefile @@ -359,7 +359,8 @@ docs: .PHONY: docs\:cli docs\:cli: - go run cmd/genclidocs/main.go -o docs/cli/ + rm -f docs/website/references/cli/*.md + go run cmd/genclidocs/main.go -o docs/website/references/cli .PHONY: docs\:manpages docs\:manpages: diff --git a/docs/cli/defradb.md b/docs/cli/defradb.md deleted file mode 100644 index 3edc08b8d9..0000000000 --- a/docs/cli/defradb.md +++ /dev/null @@ -1,38 +0,0 @@ -## defradb - -DefraDB Edge Database - -### Synopsis - -DefraDB is the edge database to power the user-centric future. - -Start a DefraDB node, interact with a local or remote node, and much more. - - -### Options - -``` - -h, --help help for defradb - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --no-keyring Disable the keyring and generate ephemeral keys - --no-log-color Disable colored log output - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") -``` - -### SEE ALSO - -* [defradb client](defradb_client.md) - Interact with a DefraDB node -* [defradb keyring](defradb_keyring.md) - Manage DefraDB private keys -* [defradb server-dump](defradb_server-dump.md) - Dumps the state of the entire database -* [defradb start](defradb_start.md) - Start a DefraDB node -* [defradb version](defradb_version.md) - Display the version information of DefraDB and its components - diff --git a/docs/cli/defradb_client.md b/docs/cli/defradb_client.md deleted file mode 100644 index 73c029153f..0000000000 --- a/docs/cli/defradb_client.md +++ /dev/null @@ -1,49 +0,0 @@ -## defradb client - -Interact with a DefraDB node - -### Synopsis - -Interact with a DefraDB node. -Execute queries, add schema types, obtain node info, etc. - -### Options - -``` - -h, --help help for client - -i, --identity string ACP Identity - --tx uint Transaction ID -``` - -### Options inherited from parent commands - -``` - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --no-keyring Disable the keyring and generate ephemeral keys - --no-log-color Disable colored log output - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") -``` - -### SEE ALSO - -* [defradb](defradb.md) - DefraDB Edge Database -* [defradb client acp](defradb_client_acp.md) - Interact with the access control system of a DefraDB node -* [defradb client backup](defradb_client_backup.md) - Interact with the backup utility -* [defradb client collection](defradb_client_collection.md) - Interact with a collection. -* [defradb client dump](defradb_client_dump.md) - Dump the contents of DefraDB node-side -* [defradb client index](defradb_client_index.md) - Manage collections' indexes of a running DefraDB instance -* [defradb client p2p](defradb_client_p2p.md) - Interact with the DefraDB P2P system -* [defradb client query](defradb_client_query.md) - Send a DefraDB GraphQL query request -* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node -* [defradb client tx](defradb_client_tx.md) - Create, commit, and discard DefraDB transactions -* [defradb client view](defradb_client_view.md) - Manage views within a running DefraDB instance - diff --git a/docs/cli/defradb_client_dump.md b/docs/cli/defradb_client_dump.md deleted file mode 100644 index ca831313cd..0000000000 --- a/docs/cli/defradb_client_dump.md +++ /dev/null @@ -1,38 +0,0 @@ -## defradb client dump - -Dump the contents of DefraDB node-side - -``` -defradb client dump [flags] -``` - -### Options - -``` - -h, --help help for dump -``` - -### Options inherited from parent commands - -``` - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --no-keyring Disable the keyring and generate ephemeral keys - --no-log-color Disable colored log output - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") -``` - -### SEE ALSO - -* [defradb client](defradb_client.md) - Interact with a DefraDB node - diff --git a/docs/cli/defradb_client_query.md b/docs/cli/defradb_client_query.md deleted file mode 100644 index 2dcea07526..0000000000 --- a/docs/cli/defradb_client_query.md +++ /dev/null @@ -1,60 +0,0 @@ -## defradb client query - -Send a DefraDB GraphQL query request - -### Synopsis - -Send a DefraDB GraphQL query request to the database. - -A query request can be sent as a single argument. Example command: - defradb client query 'query { ... }' - -Do a query request from a file by using the '-f' flag. Example command: - defradb client query -f request.graphql - -Do a query request from a file and with an identity. Example command: - defradb client query -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j -f request.graphql - -Or it can be sent via stdin by using the '-' special syntax. Example command: - cat request.graphql | defradb client query - - -A GraphQL client such as GraphiQL (https://github.com/graphql/graphiql) can be used to interact -with the database more conveniently. - -To learn more about the DefraDB GraphQL Query Language, refer to https://docs.source.network. - -``` -defradb client query [-i --identity] [request] [flags] -``` - -### Options - -``` - -f, --file string File containing the query request - -h, --help help for query -``` - -### Options inherited from parent commands - -``` - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --no-keyring Disable the keyring and generate ephemeral keys - --no-log-color Disable colored log output - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") -``` - -### SEE ALSO - -* [defradb client](defradb_client.md) - Interact with a DefraDB node - diff --git a/docs/cli/defradb_client_schema.md b/docs/cli/defradb_client_schema.md deleted file mode 100644 index c69bdaee8f..0000000000 --- a/docs/cli/defradb_client_schema.md +++ /dev/null @@ -1,43 +0,0 @@ -## defradb client schema - -Interact with the schema system of a DefraDB node - -### Synopsis - -Make changes, updates, or look for existing schema types. - -### Options - -``` - -h, --help help for schema -``` - -### Options inherited from parent commands - -``` - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --no-keyring Disable the keyring and generate ephemeral keys - --no-log-color Disable colored log output - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") -``` - -### SEE ALSO - -* [defradb client](defradb_client.md) - Interact with a DefraDB node -* [defradb client schema add](defradb_client_schema_add.md) - Add new schema -* [defradb client schema describe](defradb_client_schema_describe.md) - View schema descriptions. -* [defradb client schema migration](defradb_client_schema_migration.md) - Interact with the schema migration system of a running DefraDB instance -* [defradb client schema patch](defradb_client_schema_patch.md) - Patch an existing schema type -* [defradb client schema set-active](defradb_client_schema_set-active.md) - Set the active collection version - diff --git a/docs/cli/defradb_client_schema_add.md b/docs/cli/defradb_client_schema_add.md deleted file mode 100644 index 9e70bb1f17..0000000000 --- a/docs/cli/defradb_client_schema_add.md +++ /dev/null @@ -1,59 +0,0 @@ -## defradb client schema add - -Add new schema - -### Synopsis - -Add new schema. - -Schema Object with a '@policy(id:".." resource: "..")' linked will only be accepted if: - - ACP is available (i.e. ACP is not disabled). - - The specified resource adheres to the Document Access Control DPI Rules. - - Learn more about [ACP & DPI Rules](/acp/README.md) - -Example: add from an argument string: - defradb client schema add 'type Foo { ... }' - -Example: add from file: - defradb client schema add -f schema.graphql - -Example: add from stdin: - cat schema.graphql | defradb client schema add - - -Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network. - -``` -defradb client schema add [schema] [flags] -``` - -### Options - -``` - -f, --file string File to load a schema from - -h, --help help for add -``` - -### Options inherited from parent commands - -``` - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --no-keyring Disable the keyring and generate ephemeral keys - --no-log-color Disable colored log output - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") -``` - -### SEE ALSO - -* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node - diff --git a/docs/cli/defradb_client_schema_patch.md b/docs/cli/defradb_client_schema_patch.md deleted file mode 100644 index 6c884d0a0f..0000000000 --- a/docs/cli/defradb_client_schema_patch.md +++ /dev/null @@ -1,58 +0,0 @@ -## defradb client schema patch - -Patch an existing schema type - -### Synopsis - -Patch an existing schema. - -Uses JSON Patch to modify schema types. - -Example: patch from an argument string: - defradb client schema patch '[{ "op": "add", "path": "...", "value": {...} }]' '{"lenses": [...' - -Example: patch from file: - defradb client schema patch -p patch.json - -Example: patch from stdin: - cat patch.json | defradb client schema patch - - -To learn more about the DefraDB GraphQL Schema Language, refer to https://docs.source.network. - -``` -defradb client schema patch [schema] [migration] [flags] -``` - -### Options - -``` - -h, --help help for patch - -t, --lens-file string File to load a lens config from - -p, --patch-file string File to load a patch from - --set-active Set the active schema version for all collections using the root schem -``` - -### Options inherited from parent commands - -``` - -i, --identity string ACP Identity - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --no-keyring Disable the keyring and generate ephemeral keys - --no-log-color Disable colored log output - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --tx uint Transaction ID - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") -``` - -### SEE ALSO - -* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node - diff --git a/docs/cli/defradb_server-dump.md b/docs/cli/defradb_server-dump.md deleted file mode 100644 index 403d72c972..0000000000 --- a/docs/cli/defradb_server-dump.md +++ /dev/null @@ -1,36 +0,0 @@ -## defradb server-dump - -Dumps the state of the entire database - -``` -defradb server-dump [flags] -``` - -### Options - -``` - -h, --help help for server-dump -``` - -### Options inherited from parent commands - -``` - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --no-keyring Disable the keyring and generate ephemeral keys - --no-log-color Disable colored log output - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") -``` - -### SEE ALSO - -* [defradb](defradb.md) - DefraDB Edge Database - diff --git a/docs/cli/defradb_start.md b/docs/cli/defradb_start.md deleted file mode 100644 index 0f2bed427f..0000000000 --- a/docs/cli/defradb_start.md +++ /dev/null @@ -1,49 +0,0 @@ -## defradb start - -Start a DefraDB node - -### Synopsis - -Start a DefraDB node. - -``` -defradb start [flags] -``` - -### Options - -``` - --allowed-origins stringArray List of origins to allow for CORS requests - -h, --help help for start - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) - --peers stringArray List of peers to connect to - --privkeypath string Path to the private key for tls - --pubkeypath string Path to the public key for tls - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) -``` - -### Options inherited from parent commands - -``` - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --no-keyring Disable the keyring and generate ephemeral keys - --no-log-color Disable colored log output - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") -``` - -### SEE ALSO - -* [defradb](defradb.md) - DefraDB Edge Database - diff --git a/docs/cli/defradb_version.md b/docs/cli/defradb_version.md deleted file mode 100644 index c82cd43df8..0000000000 --- a/docs/cli/defradb_version.md +++ /dev/null @@ -1,38 +0,0 @@ -## defradb version - -Display the version information of DefraDB and its components - -``` -defradb version [flags] -``` - -### Options - -``` - -f, --format string Version output format. Options are text, json - --full Display the full version information - -h, --help help for version -``` - -### Options inherited from parent commands - -``` - --keyring-backend string Keyring backend to use. Options are file or system (default "file") - --keyring-namespace string Service name to use when using the system backend (default "defradb") - --keyring-path string Path to store encrypted keys when using the file backend (default "keys") - --log-format string Log format to use. Options are text or json (default "text") - --log-level string Log level to use. Options are debug, info, error, fatal (default "info") - --log-output string Log output path. Options are stderr or stdout. (default "stderr") - --log-overrides string Logger config overrides. Format ,=,...;,... - --log-source Include source location in logs - --log-stacktrace Include stacktrace in error and fatal logs - --no-keyring Disable the keyring and generate ephemeral keys - --no-log-color Disable colored log output - --rootdir string Directory for persistent data (default: $HOME/.defradb) - --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") -``` - -### SEE ALSO - -* [defradb](defradb.md) - DefraDB Edge Database - diff --git a/docs/website/references/cli/defradb.md b/docs/website/references/cli/defradb.md index 3a38cb52a0..3edc08b8d9 100644 --- a/docs/website/references/cli/defradb.md +++ b/docs/website/references/cli/defradb.md @@ -1,35 +1,37 @@ -# defradb +## defradb DefraDB Edge Database -## Synopsis +### Synopsis DefraDB is the edge database to power the user-centric future. -Start a database node, issue a request to a local or remote node, and much more. +Start a DefraDB node, interact with a local or remote node, and much more. -DefraDB is released under the BSL license, (c) 2022 Democratized Data Foundation. -See https://docs.source.network/BSL.txt for more information. - -## Options +### Options ``` - -h, --help help for defradb - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + -h, --help help for defradb + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` -## SEE ALSO +### SEE ALSO -* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client -* [defradb init](defradb_init.md) - Initialize DefraDB's root directory and configuration file +* [defradb client](defradb_client.md) - Interact with a DefraDB node +* [defradb keyring](defradb_keyring.md) - Manage DefraDB private keys * [defradb server-dump](defradb_server-dump.md) - Dumps the state of the entire database * [defradb start](defradb_start.md) - Start a DefraDB node * [defradb version](defradb_version.md) - Display the version information of DefraDB and its components diff --git a/docs/website/references/cli/defradb_client.md b/docs/website/references/cli/defradb_client.md index 81656acca2..73c029153f 100644 --- a/docs/website/references/cli/defradb_client.md +++ b/docs/website/references/cli/defradb_client.md @@ -1,39 +1,49 @@ -# client +## defradb client -Interact with a running DefraDB node as a client +Interact with a DefraDB node -## Synopsis +### Synopsis -Interact with a running DefraDB node as a client. -Execute queries, add schema types, and run debug routines. +Interact with a DefraDB node. +Execute queries, add schema types, obtain node info, etc. -## Options +### Options ``` - -h, --help help for client + -h, --help help for client + -i, --identity string ACP Identity + --tx uint Transaction ID ``` -## Options inherited from parent commands +### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` -## SEE ALSO +### SEE ALSO * [defradb](defradb.md) - DefraDB Edge Database -* [defradb client blocks](defradb_client_blocks.md) - Interact with the database's blockstore -* [defradb client dump](defradb_client_dump.md) - Dump the contents of a database node-side -* [defradb client peerid](defradb_client_peerid.md) - Get the peer ID of the DefraDB node -* [defradb client ping](defradb_client_ping.md) - Ping to test connection to a node +* [defradb client acp](defradb_client_acp.md) - Interact with the access control system of a DefraDB node +* [defradb client backup](defradb_client_backup.md) - Interact with the backup utility +* [defradb client collection](defradb_client_collection.md) - Interact with a collection. +* [defradb client dump](defradb_client_dump.md) - Dump the contents of DefraDB node-side +* [defradb client index](defradb_client_index.md) - Manage collections' indexes of a running DefraDB instance +* [defradb client p2p](defradb_client_p2p.md) - Interact with the DefraDB P2P system * [defradb client query](defradb_client_query.md) - Send a DefraDB GraphQL query request -* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB gRPC server -* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a running DefraDB instance +* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node +* [defradb client tx](defradb_client_tx.md) - Create, commit, and discard DefraDB transactions +* [defradb client view](defradb_client_view.md) - Manage views within a running DefraDB instance diff --git a/docs/cli/defradb_client_acp.md b/docs/website/references/cli/defradb_client_acp.md similarity index 100% rename from docs/cli/defradb_client_acp.md rename to docs/website/references/cli/defradb_client_acp.md diff --git a/docs/cli/defradb_client_acp_policy.md b/docs/website/references/cli/defradb_client_acp_policy.md similarity index 100% rename from docs/cli/defradb_client_acp_policy.md rename to docs/website/references/cli/defradb_client_acp_policy.md diff --git a/docs/cli/defradb_client_acp_policy_add.md b/docs/website/references/cli/defradb_client_acp_policy_add.md similarity index 100% rename from docs/cli/defradb_client_acp_policy_add.md rename to docs/website/references/cli/defradb_client_acp_policy_add.md diff --git a/docs/cli/defradb_client_backup.md b/docs/website/references/cli/defradb_client_backup.md similarity index 100% rename from docs/cli/defradb_client_backup.md rename to docs/website/references/cli/defradb_client_backup.md diff --git a/docs/cli/defradb_client_backup_export.md b/docs/website/references/cli/defradb_client_backup_export.md similarity index 100% rename from docs/cli/defradb_client_backup_export.md rename to docs/website/references/cli/defradb_client_backup_export.md diff --git a/docs/cli/defradb_client_backup_import.md b/docs/website/references/cli/defradb_client_backup_import.md similarity index 100% rename from docs/cli/defradb_client_backup_import.md rename to docs/website/references/cli/defradb_client_backup_import.md diff --git a/docs/website/references/cli/defradb_client_blocks.md b/docs/website/references/cli/defradb_client_blocks.md deleted file mode 100644 index 9f1a50f296..0000000000 --- a/docs/website/references/cli/defradb_client_blocks.md +++ /dev/null @@ -1,28 +0,0 @@ -# client blocks - -Interact with the database's blockstore - -## Options - -``` - -h, --help help for blocks -``` - -## Options inherited from parent commands - -``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") -``` - -## SEE ALSO - -* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client -* [defradb client blocks get](defradb_client_blocks_get.md) - Get a block by its CID from the blockstore. - diff --git a/docs/website/references/cli/defradb_client_blocks_get.md b/docs/website/references/cli/defradb_client_blocks_get.md deleted file mode 100644 index 2ddfcb8453..0000000000 --- a/docs/website/references/cli/defradb_client_blocks_get.md +++ /dev/null @@ -1,31 +0,0 @@ -# client blocks get - -Get a block by its CID from the blockstore. - -``` -defradb client blocks get [CID] [flags] -``` - -## Options - -``` - -h, --help help for get -``` - -## Options inherited from parent commands - -``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") -``` - -## SEE ALSO - -* [defradb client blocks](defradb_client_blocks.md) - Interact with the database's blockstore - diff --git a/docs/cli/defradb_client_collection.md b/docs/website/references/cli/defradb_client_collection.md similarity index 100% rename from docs/cli/defradb_client_collection.md rename to docs/website/references/cli/defradb_client_collection.md diff --git a/docs/cli/defradb_client_collection_create.md b/docs/website/references/cli/defradb_client_collection_create.md similarity index 100% rename from docs/cli/defradb_client_collection_create.md rename to docs/website/references/cli/defradb_client_collection_create.md diff --git a/docs/cli/defradb_client_collection_delete.md b/docs/website/references/cli/defradb_client_collection_delete.md similarity index 100% rename from docs/cli/defradb_client_collection_delete.md rename to docs/website/references/cli/defradb_client_collection_delete.md diff --git a/docs/cli/defradb_client_collection_describe.md b/docs/website/references/cli/defradb_client_collection_describe.md similarity index 100% rename from docs/cli/defradb_client_collection_describe.md rename to docs/website/references/cli/defradb_client_collection_describe.md diff --git a/docs/cli/defradb_client_collection_docIDs.md b/docs/website/references/cli/defradb_client_collection_docIDs.md similarity index 100% rename from docs/cli/defradb_client_collection_docIDs.md rename to docs/website/references/cli/defradb_client_collection_docIDs.md diff --git a/docs/cli/defradb_client_collection_get.md b/docs/website/references/cli/defradb_client_collection_get.md similarity index 100% rename from docs/cli/defradb_client_collection_get.md rename to docs/website/references/cli/defradb_client_collection_get.md diff --git a/docs/cli/defradb_client_collection_patch.md b/docs/website/references/cli/defradb_client_collection_patch.md similarity index 100% rename from docs/cli/defradb_client_collection_patch.md rename to docs/website/references/cli/defradb_client_collection_patch.md diff --git a/docs/cli/defradb_client_collection_update.md b/docs/website/references/cli/defradb_client_collection_update.md similarity index 100% rename from docs/cli/defradb_client_collection_update.md rename to docs/website/references/cli/defradb_client_collection_update.md diff --git a/docs/website/references/cli/defradb_client_dump.md b/docs/website/references/cli/defradb_client_dump.md index fdc2a38a3b..ca831313cd 100644 --- a/docs/website/references/cli/defradb_client_dump.md +++ b/docs/website/references/cli/defradb_client_dump.md @@ -1,31 +1,38 @@ -# client dump +## defradb client dump -Dump the contents of a database node-side +Dump the contents of DefraDB node-side ``` defradb client dump [flags] ``` -## Options +### Options ``` -h, --help help for dump ``` -## Options inherited from parent commands +### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` -## SEE ALSO +### SEE ALSO -* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client +* [defradb client](defradb_client.md) - Interact with a DefraDB node diff --git a/docs/cli/defradb_client_index.md b/docs/website/references/cli/defradb_client_index.md similarity index 100% rename from docs/cli/defradb_client_index.md rename to docs/website/references/cli/defradb_client_index.md diff --git a/docs/cli/defradb_client_index_create.md b/docs/website/references/cli/defradb_client_index_create.md similarity index 100% rename from docs/cli/defradb_client_index_create.md rename to docs/website/references/cli/defradb_client_index_create.md diff --git a/docs/cli/defradb_client_index_drop.md b/docs/website/references/cli/defradb_client_index_drop.md similarity index 100% rename from docs/cli/defradb_client_index_drop.md rename to docs/website/references/cli/defradb_client_index_drop.md diff --git a/docs/cli/defradb_client_index_list.md b/docs/website/references/cli/defradb_client_index_list.md similarity index 100% rename from docs/cli/defradb_client_index_list.md rename to docs/website/references/cli/defradb_client_index_list.md diff --git a/docs/cli/defradb_client_p2p.md b/docs/website/references/cli/defradb_client_p2p.md similarity index 100% rename from docs/cli/defradb_client_p2p.md rename to docs/website/references/cli/defradb_client_p2p.md diff --git a/docs/cli/defradb_client_p2p_collection.md b/docs/website/references/cli/defradb_client_p2p_collection.md similarity index 100% rename from docs/cli/defradb_client_p2p_collection.md rename to docs/website/references/cli/defradb_client_p2p_collection.md diff --git a/docs/cli/defradb_client_p2p_collection_add.md b/docs/website/references/cli/defradb_client_p2p_collection_add.md similarity index 100% rename from docs/cli/defradb_client_p2p_collection_add.md rename to docs/website/references/cli/defradb_client_p2p_collection_add.md diff --git a/docs/cli/defradb_client_p2p_collection_getall.md b/docs/website/references/cli/defradb_client_p2p_collection_getall.md similarity index 100% rename from docs/cli/defradb_client_p2p_collection_getall.md rename to docs/website/references/cli/defradb_client_p2p_collection_getall.md diff --git a/docs/cli/defradb_client_p2p_collection_remove.md b/docs/website/references/cli/defradb_client_p2p_collection_remove.md similarity index 100% rename from docs/cli/defradb_client_p2p_collection_remove.md rename to docs/website/references/cli/defradb_client_p2p_collection_remove.md diff --git a/docs/cli/defradb_client_p2p_info.md b/docs/website/references/cli/defradb_client_p2p_info.md similarity index 100% rename from docs/cli/defradb_client_p2p_info.md rename to docs/website/references/cli/defradb_client_p2p_info.md diff --git a/docs/cli/defradb_client_p2p_replicator.md b/docs/website/references/cli/defradb_client_p2p_replicator.md similarity index 100% rename from docs/cli/defradb_client_p2p_replicator.md rename to docs/website/references/cli/defradb_client_p2p_replicator.md diff --git a/docs/cli/defradb_client_p2p_replicator_delete.md b/docs/website/references/cli/defradb_client_p2p_replicator_delete.md similarity index 100% rename from docs/cli/defradb_client_p2p_replicator_delete.md rename to docs/website/references/cli/defradb_client_p2p_replicator_delete.md diff --git a/docs/cli/defradb_client_p2p_replicator_getall.md b/docs/website/references/cli/defradb_client_p2p_replicator_getall.md similarity index 100% rename from docs/cli/defradb_client_p2p_replicator_getall.md rename to docs/website/references/cli/defradb_client_p2p_replicator_getall.md diff --git a/docs/cli/defradb_client_p2p_replicator_set.md b/docs/website/references/cli/defradb_client_p2p_replicator_set.md similarity index 100% rename from docs/cli/defradb_client_p2p_replicator_set.md rename to docs/website/references/cli/defradb_client_p2p_replicator_set.md diff --git a/docs/website/references/cli/defradb_client_peerid.md b/docs/website/references/cli/defradb_client_peerid.md deleted file mode 100644 index cf3f175646..0000000000 --- a/docs/website/references/cli/defradb_client_peerid.md +++ /dev/null @@ -1,31 +0,0 @@ -# client peerid - -Get the peer ID of the DefraDB node - -``` -defradb client peerid [flags] -``` - -## Options - -``` - -h, --help help for peerid -``` - -## Options inherited from parent commands - -``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") -``` - -## SEE ALSO - -* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client - diff --git a/docs/website/references/cli/defradb_client_ping.md b/docs/website/references/cli/defradb_client_ping.md deleted file mode 100644 index 6115c5f493..0000000000 --- a/docs/website/references/cli/defradb_client_ping.md +++ /dev/null @@ -1,31 +0,0 @@ -# client ping - -Ping to test connection to a node - -``` -defradb client ping [flags] -``` - -## Options - -``` - -h, --help help for ping -``` - -## Options inherited from parent commands - -``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") -``` - -## SEE ALSO - -* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client - diff --git a/docs/website/references/cli/defradb_client_query.md b/docs/website/references/cli/defradb_client_query.md index b602633b78..2dcea07526 100644 --- a/docs/website/references/cli/defradb_client_query.md +++ b/docs/website/references/cli/defradb_client_query.md @@ -1,16 +1,22 @@ -# client query +## defradb client query Send a DefraDB GraphQL query request -## Synopsis +### Synopsis Send a DefraDB GraphQL query request to the database. A query request can be sent as a single argument. Example command: -defradb client query 'query { ... }' + defradb client query 'query { ... }' + +Do a query request from a file by using the '-f' flag. Example command: + defradb client query -f request.graphql + +Do a query request from a file and with an identity. Example command: + defradb client query -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j -f request.graphql Or it can be sent via stdin by using the '-' special syntax. Example command: -cat request.graphql | defradb client query - + cat request.graphql | defradb client query - A GraphQL client such as GraphiQL (https://github.com/graphql/graphiql) can be used to interact with the database more conveniently. @@ -18,29 +24,37 @@ with the database more conveniently. To learn more about the DefraDB GraphQL Query Language, refer to https://docs.source.network. ``` -defradb client query [query request] [flags] +defradb client query [-i --identity] [request] [flags] ``` -## Options +### Options ``` - -h, --help help for query + -f, --file string File containing the query request + -h, --help help for query ``` -## Options inherited from parent commands +### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` -## SEE ALSO +### SEE ALSO -* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client +* [defradb client](defradb_client.md) - Interact with a DefraDB node diff --git a/docs/website/references/cli/defradb_client_rpc.md b/docs/website/references/cli/defradb_client_rpc.md deleted file mode 100644 index 1044e78c2e..0000000000 --- a/docs/website/references/cli/defradb_client_rpc.md +++ /dev/null @@ -1,34 +0,0 @@ -# client rpc - -Interact with a DefraDB gRPC server - -## Synopsis - -Interact with a DefraDB gRPC server. - -## Options - -``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") - -h, --help help for rpc -``` - -## Options inherited from parent commands - -``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") -``` - -## SEE ALSO - -* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client -* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Interact with the P2P collection system -* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Interact with the replicator system - diff --git a/docs/website/references/cli/defradb_client_rpc_addreplicator.md b/docs/website/references/cli/defradb_client_rpc_addreplicator.md deleted file mode 100644 index a7c5c9fe7a..0000000000 --- a/docs/website/references/cli/defradb_client_rpc_addreplicator.md +++ /dev/null @@ -1,37 +0,0 @@ -# client rpc addreplicator - -Add a new replicator - -## Synopsis - -Use this command if you wish to add a new target replicator -for the p2p data sync system. - -``` -defradb client rpc addreplicator [flags] -``` - -## Options - -``` - -h, --help help for addreplicator -``` - -## Options inherited from parent commands - -``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") -``` - -## SEE ALSO - -* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB gRPC server - diff --git a/docs/website/references/cli/defradb_client_rpc_p2pcollection.md b/docs/website/references/cli/defradb_client_rpc_p2pcollection.md deleted file mode 100644 index 37edd5e76d..0000000000 --- a/docs/website/references/cli/defradb_client_rpc_p2pcollection.md +++ /dev/null @@ -1,35 +0,0 @@ -# client rpc p2pcollection - -Interact with the P2P collection system - -## Synopsis - -Add, delete, or get the list of P2P collections - -## Options - -``` - -h, --help help for p2pcollection -``` - -## Options inherited from parent commands - -``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") -``` - -## SEE ALSO - -* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB gRPC server -* [defradb client rpc p2pcollection add](defradb_client_rpc_p2pcollection_add.md) - Add P2P collections -* [defradb client rpc p2pcollection getall](defradb_client_rpc_p2pcollection_getall.md) - Get all P2P collections -* [defradb client rpc p2pcollection remove](defradb_client_rpc_p2pcollection_remove.md) - Add P2P collections - diff --git a/docs/website/references/cli/defradb_client_rpc_p2pcollection_add.md b/docs/website/references/cli/defradb_client_rpc_p2pcollection_add.md deleted file mode 100644 index 902ff41cb7..0000000000 --- a/docs/website/references/cli/defradb_client_rpc_p2pcollection_add.md +++ /dev/null @@ -1,36 +0,0 @@ -# client rpc p2pcollection add - -Add P2P collections - -## Synopsis - -Use this command if you wish to add new P2P collections to the pubsub topics - -``` -defradb client rpc p2pcollection add [collectionID] [flags] -``` - -## Options - -``` - -h, --help help for add -``` - -## Options inherited from parent commands - -``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") -``` - -## SEE ALSO - -* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Interact with the P2P collection system - diff --git a/docs/website/references/cli/defradb_client_rpc_p2pcollection_getall.md b/docs/website/references/cli/defradb_client_rpc_p2pcollection_getall.md deleted file mode 100644 index 92d53377c0..0000000000 --- a/docs/website/references/cli/defradb_client_rpc_p2pcollection_getall.md +++ /dev/null @@ -1,36 +0,0 @@ -# client rpc p2pcollection getall - -Get all P2P collections - -## Synopsis - -Use this command if you wish to get all P2P collections in the pubsub topics - -``` -defradb client rpc p2pcollection getall [flags] -``` - -## Options - -``` - -h, --help help for getall -``` - -## Options inherited from parent commands - -``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") -``` - -## SEE ALSO - -* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Interact with the P2P collection system - diff --git a/docs/website/references/cli/defradb_client_rpc_p2pcollection_remove.md b/docs/website/references/cli/defradb_client_rpc_p2pcollection_remove.md deleted file mode 100644 index 9f8214dc46..0000000000 --- a/docs/website/references/cli/defradb_client_rpc_p2pcollection_remove.md +++ /dev/null @@ -1,36 +0,0 @@ -# client rpc p2pcollection remove - -Add P2P collections - -## Synopsis - -Use this command if you wish to remove P2P collections from the pubsub topics - -``` -defradb client rpc p2pcollection remove [collectionID] [flags] -``` - -## Options - -``` - -h, --help help for remove -``` - -## Options inherited from parent commands - -``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") -``` - -## SEE ALSO - -* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Interact with the P2P collection system - diff --git a/docs/website/references/cli/defradb_client_rpc_replicator.md b/docs/website/references/cli/defradb_client_rpc_replicator.md deleted file mode 100644 index cdb87fad32..0000000000 --- a/docs/website/references/cli/defradb_client_rpc_replicator.md +++ /dev/null @@ -1,37 +0,0 @@ -# client rpc replicator - -Interact with the replicator system - -## Synopsis - -Add, delete, or get the list of persisted replicators - -## Options - -``` - -c, --collection stringArray Define the collection for the replicator - -f, --full Set the replicator to act on all collections - -h, --help help for replicator -``` - -## Options inherited from parent commands - -``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") -``` - -## SEE ALSO - -* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB gRPC server -* [defradb client rpc replicator delete](defradb_client_rpc_replicator_delete.md) - Delete a replicator -* [defradb client rpc replicator getall](defradb_client_rpc_replicator_getall.md) - Get all replicators -* [defradb client rpc replicator set](defradb_client_rpc_replicator_set.md) - Set a P2P replicator - diff --git a/docs/website/references/cli/defradb_client_rpc_replicator_delete.md b/docs/website/references/cli/defradb_client_rpc_replicator_delete.md deleted file mode 100644 index 392481b3e9..0000000000 --- a/docs/website/references/cli/defradb_client_rpc_replicator_delete.md +++ /dev/null @@ -1,37 +0,0 @@ -# client rpc replicator delete - -Delete a replicator - -## Synopsis - -Use this command if you wish to remove the target replicator -for the p2p data sync system. - -``` -defradb client rpc replicator delete [-f, --full | -c, --collection] [flags] -``` - -## Options - -``` - -h, --help help for delete -``` - -## Options inherited from parent commands - -``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") -``` - -## SEE ALSO - -* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Interact with the replicator system - diff --git a/docs/website/references/cli/defradb_client_rpc_replicator_getall.md b/docs/website/references/cli/defradb_client_rpc_replicator_getall.md deleted file mode 100644 index 79d891a5c9..0000000000 --- a/docs/website/references/cli/defradb_client_rpc_replicator_getall.md +++ /dev/null @@ -1,37 +0,0 @@ -# client rpc replicator getall - -Get all replicators - -## Synopsis - -Use this command if you wish to get all the replicators -for the p2p data sync system. - -``` -defradb client rpc replicator getall [flags] -``` - -## Options - -``` - -h, --help help for getall -``` - -## Options inherited from parent commands - -``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") -``` - -## SEE ALSO - -* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Interact with the replicator system - diff --git a/docs/website/references/cli/defradb_client_rpc_replicator_set.md b/docs/website/references/cli/defradb_client_rpc_replicator_set.md deleted file mode 100644 index 5b94f1a1ab..0000000000 --- a/docs/website/references/cli/defradb_client_rpc_replicator_set.md +++ /dev/null @@ -1,39 +0,0 @@ -# client rpc replicator set - -Set a P2P replicator - -## Synopsis - -Use this command if you wish to add a new target replicator -for the p2p data sync system or add schemas to an existing one - -``` -defradb client rpc replicator set [-f, --full | -c, --collection] [flags] -``` - -## Options - -``` - -c, --collection stringArray Define the collection for the replicator - -f, --full Set the replicator to act on all collections - -h, --help help for set -``` - -## Options inherited from parent commands - -``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") -``` - -## SEE ALSO - -* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Interact with the replicator system - diff --git a/docs/website/references/cli/defradb_client_schema.md b/docs/website/references/cli/defradb_client_schema.md index 140c7fe635..c69bdaee8f 100644 --- a/docs/website/references/cli/defradb_client_schema.md +++ b/docs/website/references/cli/defradb_client_schema.md @@ -1,33 +1,43 @@ -# client schema +## defradb client schema -Interact with the schema system of a running DefraDB instance +Interact with the schema system of a DefraDB node -## Synopsis +### Synopsis -Make changes, updates, or look for existing schema types to a DefraDB node. +Make changes, updates, or look for existing schema types. -## Options +### Options ``` -h, --help help for schema ``` -## Options inherited from parent commands +### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` -## SEE ALSO +### SEE ALSO -* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client -* [defradb client schema add](defradb_client_schema_add.md) - Add a new schema type to DefraDB +* [defradb client](defradb_client.md) - Interact with a DefraDB node +* [defradb client schema add](defradb_client_schema_add.md) - Add new schema +* [defradb client schema describe](defradb_client_schema_describe.md) - View schema descriptions. +* [defradb client schema migration](defradb_client_schema_migration.md) - Interact with the schema migration system of a running DefraDB instance * [defradb client schema patch](defradb_client_schema_patch.md) - Patch an existing schema type +* [defradb client schema set-active](defradb_client_schema_set-active.md) - Set the active collection version diff --git a/docs/website/references/cli/defradb_client_schema_add.md b/docs/website/references/cli/defradb_client_schema_add.md index 0909eb5789..9e70bb1f17 100644 --- a/docs/website/references/cli/defradb_client_schema_add.md +++ b/docs/website/references/cli/defradb_client_schema_add.md @@ -1,10 +1,15 @@ -# client schema add +## defradb client schema add -Add a new schema type to DefraDB +Add new schema -## Synopsis +### Synopsis -Add a new schema type to DefraDB. +Add new schema. + +Schema Object with a '@policy(id:".." resource: "..")' linked will only be accepted if: + - ACP is available (i.e. ACP is not disabled). + - The specified resource adheres to the Document Access Control DPI Rules. + - Learn more about [ACP & DPI Rules](/acp/README.md) Example: add from an argument string: defradb client schema add 'type Foo { ... }' @@ -15,33 +20,40 @@ Example: add from file: Example: add from stdin: cat schema.graphql | defradb client schema add - -To learn more about the DefraDB GraphQL Schema Language, refer to https://docs.source.network. +Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network. ``` defradb client schema add [schema] [flags] ``` -## Options +### Options ``` -f, --file string File to load a schema from -h, --help help for add ``` -## Options inherited from parent commands +### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` -## SEE ALSO +### SEE ALSO -* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a running DefraDB instance +* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node diff --git a/docs/cli/defradb_client_schema_describe.md b/docs/website/references/cli/defradb_client_schema_describe.md similarity index 100% rename from docs/cli/defradb_client_schema_describe.md rename to docs/website/references/cli/defradb_client_schema_describe.md diff --git a/docs/cli/defradb_client_schema_migration.md b/docs/website/references/cli/defradb_client_schema_migration.md similarity index 100% rename from docs/cli/defradb_client_schema_migration.md rename to docs/website/references/cli/defradb_client_schema_migration.md diff --git a/docs/cli/defradb_client_schema_migration_down.md b/docs/website/references/cli/defradb_client_schema_migration_down.md similarity index 100% rename from docs/cli/defradb_client_schema_migration_down.md rename to docs/website/references/cli/defradb_client_schema_migration_down.md diff --git a/docs/cli/defradb_client_schema_migration_reload.md b/docs/website/references/cli/defradb_client_schema_migration_reload.md similarity index 100% rename from docs/cli/defradb_client_schema_migration_reload.md rename to docs/website/references/cli/defradb_client_schema_migration_reload.md diff --git a/docs/cli/defradb_client_schema_migration_set-registry.md b/docs/website/references/cli/defradb_client_schema_migration_set-registry.md similarity index 100% rename from docs/cli/defradb_client_schema_migration_set-registry.md rename to docs/website/references/cli/defradb_client_schema_migration_set-registry.md diff --git a/docs/cli/defradb_client_schema_migration_set.md b/docs/website/references/cli/defradb_client_schema_migration_set.md similarity index 100% rename from docs/cli/defradb_client_schema_migration_set.md rename to docs/website/references/cli/defradb_client_schema_migration_set.md diff --git a/docs/cli/defradb_client_schema_migration_up.md b/docs/website/references/cli/defradb_client_schema_migration_up.md similarity index 100% rename from docs/cli/defradb_client_schema_migration_up.md rename to docs/website/references/cli/defradb_client_schema_migration_up.md diff --git a/docs/website/references/cli/defradb_client_schema_patch.md b/docs/website/references/cli/defradb_client_schema_patch.md index 307e7b83fa..6c884d0a0f 100644 --- a/docs/website/references/cli/defradb_client_schema_patch.md +++ b/docs/website/references/cli/defradb_client_schema_patch.md @@ -1,18 +1,18 @@ -# client schema patch +## defradb client schema patch Patch an existing schema type -## Synopsis +### Synopsis Patch an existing schema. -Uses JSON PATCH formatting as a DDL. +Uses JSON Patch to modify schema types. Example: patch from an argument string: - defradb client schema patch '[{ "op": "add", "path": "...", "value": {...} }]' + defradb client schema patch '[{ "op": "add", "path": "...", "value": {...} }]' '{"lenses": [...' Example: patch from file: - defradb client schema patch -f patch.json + defradb client schema patch -p patch.json Example: patch from stdin: cat patch.json | defradb client schema patch - @@ -20,30 +20,39 @@ Example: patch from stdin: To learn more about the DefraDB GraphQL Schema Language, refer to https://docs.source.network. ``` -defradb client schema patch [schema] [flags] +defradb client schema patch [schema] [migration] [flags] ``` -## Options +### Options ``` - -f, --file string File to load a patch from - -h, --help help for patch + -h, --help help for patch + -t, --lens-file string File to load a lens config from + -p, --patch-file string File to load a patch from + --set-active Set the active schema version for all collections using the root schem ``` -## Options inherited from parent commands +### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + -i, --identity string ACP Identity + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` -## SEE ALSO +### SEE ALSO -* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a running DefraDB instance +* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node diff --git a/docs/cli/defradb_client_schema_set-active.md b/docs/website/references/cli/defradb_client_schema_set-active.md similarity index 100% rename from docs/cli/defradb_client_schema_set-active.md rename to docs/website/references/cli/defradb_client_schema_set-active.md diff --git a/docs/cli/defradb_client_tx.md b/docs/website/references/cli/defradb_client_tx.md similarity index 100% rename from docs/cli/defradb_client_tx.md rename to docs/website/references/cli/defradb_client_tx.md diff --git a/docs/cli/defradb_client_tx_commit.md b/docs/website/references/cli/defradb_client_tx_commit.md similarity index 100% rename from docs/cli/defradb_client_tx_commit.md rename to docs/website/references/cli/defradb_client_tx_commit.md diff --git a/docs/cli/defradb_client_tx_create.md b/docs/website/references/cli/defradb_client_tx_create.md similarity index 100% rename from docs/cli/defradb_client_tx_create.md rename to docs/website/references/cli/defradb_client_tx_create.md diff --git a/docs/cli/defradb_client_tx_discard.md b/docs/website/references/cli/defradb_client_tx_discard.md similarity index 100% rename from docs/cli/defradb_client_tx_discard.md rename to docs/website/references/cli/defradb_client_tx_discard.md diff --git a/docs/cli/defradb_client_view.md b/docs/website/references/cli/defradb_client_view.md similarity index 100% rename from docs/cli/defradb_client_view.md rename to docs/website/references/cli/defradb_client_view.md diff --git a/docs/cli/defradb_client_view_add.md b/docs/website/references/cli/defradb_client_view_add.md similarity index 100% rename from docs/cli/defradb_client_view_add.md rename to docs/website/references/cli/defradb_client_view_add.md diff --git a/docs/website/references/cli/defradb_init.md b/docs/website/references/cli/defradb_init.md deleted file mode 100644 index 5b7f2071ce..0000000000 --- a/docs/website/references/cli/defradb_init.md +++ /dev/null @@ -1,36 +0,0 @@ -# init - -Initialize DefraDB's root directory and configuration file - -## Synopsis - -Initialize a directory for configuration and data at the given path. - -``` -defradb init [flags] -``` - -## Options - -``` - -h, --help help for init - --reinitialize Reinitialize the configuration file - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") -``` - -## Options inherited from parent commands - -``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") -``` - -## SEE ALSO - -* [defradb](defradb.md) - DefraDB Edge Database - diff --git a/docs/cli/defradb_keyring.md b/docs/website/references/cli/defradb_keyring.md similarity index 100% rename from docs/cli/defradb_keyring.md rename to docs/website/references/cli/defradb_keyring.md diff --git a/docs/cli/defradb_keyring_export.md b/docs/website/references/cli/defradb_keyring_export.md similarity index 100% rename from docs/cli/defradb_keyring_export.md rename to docs/website/references/cli/defradb_keyring_export.md diff --git a/docs/cli/defradb_keyring_generate.md b/docs/website/references/cli/defradb_keyring_generate.md similarity index 100% rename from docs/cli/defradb_keyring_generate.md rename to docs/website/references/cli/defradb_keyring_generate.md diff --git a/docs/cli/defradb_keyring_import.md b/docs/website/references/cli/defradb_keyring_import.md similarity index 100% rename from docs/cli/defradb_keyring_import.md rename to docs/website/references/cli/defradb_keyring_import.md diff --git a/docs/website/references/cli/defradb_server-dump.md b/docs/website/references/cli/defradb_server-dump.md index 91641d1125..403d72c972 100644 --- a/docs/website/references/cli/defradb_server-dump.md +++ b/docs/website/references/cli/defradb_server-dump.md @@ -1,4 +1,4 @@ -# server-dump +## defradb server-dump Dumps the state of the entire database @@ -6,27 +6,31 @@ Dumps the state of the entire database defradb server-dump [flags] ``` -## Options +### Options ``` - -h, --help help for server-dump - --store string Datastore to use. Options are badger, memory (default "badger") + -h, --help help for server-dump ``` -## Options inherited from parent commands +### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` -## SEE ALSO +### SEE ALSO * [defradb](defradb.md) - DefraDB Edge Database diff --git a/docs/website/references/cli/defradb_start.md b/docs/website/references/cli/defradb_start.md index d393f7f3a5..0f2bed427f 100644 --- a/docs/website/references/cli/defradb_start.md +++ b/docs/website/references/cli/defradb_start.md @@ -1,46 +1,49 @@ -# start +## defradb start Start a DefraDB node -## Synopsis +### Synopsis -Start a new instance of DefraDB node. +Start a DefraDB node. ``` defradb start [flags] ``` -## Options +### Options ``` - --email string Email address used by the CA for notifications (default "example@example.com") - -h, --help help for start - --max-txn-retries int Specify the maximum number of retries per transaction (default 5) - --no-p2p Disable the peer-to-peer network synchronization system - --p2paddr string Listener address for the p2p network (formatted as a libp2p MultiAddr) (default "/ip4/0.0.0.0/tcp/9171") - --peers string List of peers to connect to - --privkeypath string Path to the private key for tls (default "certs/server.crt") - --pubkeypath string Path to the public key for tls (default "certs/server.key") - --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tcpaddr string Listener address for the tcp gRPC server (formatted as a libp2p MultiAddr) (default "/ip4/0.0.0.0/tcp/9161") - --tls Enable serving the API over https - --valuelogfilesize ByteSize Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1GiB) + --allowed-origins stringArray List of origins to allow for CORS requests + -h, --help help for start + --max-txn-retries int Specify the maximum number of retries per transaction (default 5) + --no-p2p Disable the peer-to-peer network synchronization system + --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171]) + --peers stringArray List of peers to connect to + --privkeypath string Path to the private key for tls + --pubkeypath string Path to the public key for tls + --store string Specify the datastore to use (supported: badger, memory) (default "badger") + --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824) ``` -## Options inherited from parent commands +### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` -## SEE ALSO +### SEE ALSO * [defradb](defradb.md) - DefraDB Edge Database diff --git a/docs/website/references/cli/defradb_version.md b/docs/website/references/cli/defradb_version.md index de817ade10..c82cd43df8 100644 --- a/docs/website/references/cli/defradb_version.md +++ b/docs/website/references/cli/defradb_version.md @@ -1,4 +1,4 @@ -# version +## defradb version Display the version information of DefraDB and its components @@ -6,7 +6,7 @@ Display the version information of DefraDB and its components defradb version [flags] ``` -## Options +### Options ``` -f, --format string Version output format. Options are text, json @@ -14,20 +14,25 @@ defradb version [flags] -h, --help help for version ``` -## Options inherited from parent commands +### Options inherited from parent commands ``` - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") ``` -## SEE ALSO +### SEE ALSO * [defradb](defradb.md) - DefraDB Edge Database From 150eb3f3bf60286d42d4b4ac019babbf7be5e1c3 Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Fri, 24 May 2024 09:28:54 -0700 Subject: [PATCH 20/78] fix: Make node options composable (#2648) ## Relevant issue(s) Resolves #2642 ## Description This PR fixes an issue where node subsystem options were not composable. ## Tasks - [x] I made sure the code is well commented, particularly hard-to-understand areas. - [x] I made sure the repository-held documentation is changed accordingly. - [x] I made sure the pull request title adheres to the conventional commit style (the subset used in the project can be found in [tools/configs/chglog/config.yml](tools/configs/chglog/config.yml)). - [x] I made sure to discuss its limitations such as threats to validity, vulnerability to mistake and misuse, robustness to invalidation of assumptions, resource requirements, ... ## How has this been tested? `make test` Specify the platform(s) on which this was tested: - MacOS --- cli/start.go | 59 ++++++++++---------------- node/node.go | 91 ++++++++++++++++++++--------------------- node/node_test.go | 59 -------------------------- tests/integration/db.go | 29 +++++-------- 4 files changed, 75 insertions(+), 163 deletions(-) diff --git a/cli/start.go b/cli/start.go index 118cf726fb..9b863a1f07 100644 --- a/cli/start.go +++ b/cli/start.go @@ -99,48 +99,41 @@ func MakeStartCommand() *cobra.Command { RunE: func(cmd *cobra.Command, args []string) error { cfg := mustGetContextConfig(cmd) - dbOpts := []db.Option{ - db.WithUpdateEvents(), - db.WithMaxRetries(cfg.GetInt("datastore.MaxTxnRetries")), + var peers []peer.AddrInfo + if val := cfg.GetStringSlice("net.peers"); len(val) > 0 { + addrs, err := netutils.ParsePeers(val) + if err != nil { + return errors.Wrap(fmt.Sprintf("failed to parse bootstrap peers %s", val), err) + } + peers = addrs } - netOpts := []net.NodeOpt{ + opts := []node.Option{ + node.WithPath(cfg.GetString("datastore.badger.path")), + node.WithInMemory(cfg.GetString("datastore.store") == configStoreMemory), + node.WithDisableP2P(cfg.GetBool("net.p2pDisabled")), + node.WithACPType(node.LocalACPType), + node.WithPeers(peers...), + // db options + db.WithUpdateEvents(), + db.WithMaxRetries(cfg.GetInt("datastore.MaxTxnRetries")), + // net node options net.WithListenAddresses(cfg.GetStringSlice("net.p2pAddresses")...), net.WithEnablePubSub(cfg.GetBool("net.pubSubEnabled")), net.WithEnableRelay(cfg.GetBool("net.relayEnabled")), - } - - serverOpts := []http.ServerOpt{ + // http server options http.WithAddress(cfg.GetString("api.address")), http.WithAllowedOrigins(cfg.GetStringSlice("api.allowed-origins")...), http.WithTLSCertPath(cfg.GetString("api.pubKeyPath")), http.WithTLSKeyPath(cfg.GetString("api.privKeyPath")), } - storeOpts := []node.StoreOpt{ - node.WithPath(cfg.GetString("datastore.badger.path")), - node.WithInMemory(cfg.GetString("datastore.store") == configStoreMemory), - } - - acpOpts := []node.ACPOpt{ - node.WithACPType(node.LocalACPType), - } - - var peers []peer.AddrInfo - if val := cfg.GetStringSlice("net.peers"); len(val) > 0 { - addrs, err := netutils.ParsePeers(val) - if err != nil { - return errors.Wrap(fmt.Sprintf("failed to parse bootstrap peers %s", val), err) - } - peers = addrs - } - if cfg.GetString("datastore.store") != configStoreMemory { rootDir := mustGetContextRootDir(cmd) // TODO-ACP: Infuture when we add support for the --no-acp flag when admin signatures are in, // we can allow starting of db without acp. Currently that can only be done programmatically. // https://github.com/sourcenetwork/defradb/issues/2271 - acpOpts = append(acpOpts, node.WithACPPath(rootDir)) + opts = append(opts, node.WithACPPath(rootDir)) } if !cfg.GetBool("keyring.disabled") { @@ -153,23 +146,13 @@ func MakeStartCommand() *cobra.Command { if err != nil { return NewErrKeyringHelp(err) } - netOpts = append(netOpts, net.WithPrivateKey(peerKey)) + opts = append(opts, net.WithPrivateKey(peerKey)) // load the optional encryption key encryptionKey, err := kr.Get(encryptionKeyName) if err != nil && !errors.Is(err, keyring.ErrNotFound) { return err } - storeOpts = append(storeOpts, node.WithEncryptionKey(encryptionKey)) - } - - opts := []node.NodeOpt{ - node.WithPeers(peers...), - node.WithStoreOpts(storeOpts...), - node.WithDatabaseOpts(dbOpts...), - node.WithNetOpts(netOpts...), - node.WithServerOpts(serverOpts...), - node.WithDisableP2P(cfg.GetBool("net.p2pDisabled")), - node.WithACPOpts(acpOpts...), + opts = append(opts, node.WithEncryptionKey(encryptionKey)) } n, err := node.NewNode(cmd.Context(), opts...) diff --git a/node/node.go b/node/node.go index 1293468855..bb3163834c 100644 --- a/node/node.go +++ b/node/node.go @@ -27,13 +27,19 @@ import ( var log = corelog.NewLogger("node") +// Option is a generic option that applies to any subsystem. +// +// Invalid option types will be silently ignored. Valid option types are: +// - `ACPOpt` +// - `NodeOpt` +// - `StoreOpt` +// - `db.Option` +// - `http.ServerOpt` +// - `net.NodeOpt` +type Option any + // Options contains start configuration values. type Options struct { - storeOpts []StoreOpt - dbOpts []db.Option - netOpts []net.NodeOpt - serverOpts []http.ServerOpt - acpOpts []ACPOpt peers []peer.AddrInfo disableP2P bool disableAPI bool @@ -47,41 +53,6 @@ func DefaultOptions() *Options { // NodeOpt is a function for setting configuration values. type NodeOpt func(*Options) -// WithStoreOpts sets the store options. -func WithStoreOpts(opts ...StoreOpt) NodeOpt { - return func(o *Options) { - o.storeOpts = opts - } -} - -// WithACPOpts sets the ACP options. -func WithACPOpts(opts ...ACPOpt) NodeOpt { - return func(o *Options) { - o.acpOpts = opts - } -} - -// WithDatabaseOpts sets the database options. -func WithDatabaseOpts(opts ...db.Option) NodeOpt { - return func(o *Options) { - o.dbOpts = opts - } -} - -// WithNetOpts sets the net / p2p options. -func WithNetOpts(opts ...net.NodeOpt) NodeOpt { - return func(o *Options) { - o.netOpts = opts - } -} - -// WithServerOpts sets the api server options. -func WithServerOpts(opts ...http.ServerOpt) NodeOpt { - return func(o *Options) { - o.serverOpts = opts - } -} - // WithDisableP2P sets the disable p2p flag. func WithDisableP2P(disable bool) NodeOpt { return func(o *Options) { @@ -111,23 +82,49 @@ type Node struct { } // NewNode returns a new node instance configured with the given options. -func NewNode(ctx context.Context, opts ...NodeOpt) (*Node, error) { +func NewNode(ctx context.Context, opts ...Option) (*Node, error) { + var ( + dbOpts []db.Option + acpOpts []ACPOpt + netOpts []net.NodeOpt + storeOpts []StoreOpt + serverOpts []http.ServerOpt + ) + options := DefaultOptions() for _, opt := range opts { - opt(options) + switch t := opt.(type) { + case ACPOpt: + acpOpts = append(acpOpts, t) + + case NodeOpt: + t(options) + + case StoreOpt: + storeOpts = append(storeOpts, t) + + case db.Option: + dbOpts = append(dbOpts, t) + + case http.ServerOpt: + serverOpts = append(serverOpts, t) + + case net.NodeOpt: + netOpts = append(netOpts, t) + } } - rootstore, err := NewStore(ctx, options.storeOpts...) + rootstore, err := NewStore(ctx, storeOpts...) if err != nil { return nil, err } - acp, err := NewACP(ctx, options.acpOpts...) + acp, err := NewACP(ctx, acpOpts...) if err != nil { return nil, err } - db, err := db.NewDB(ctx, rootstore, acp, options.dbOpts...) + db, err := db.NewDB(ctx, rootstore, acp, dbOpts...) if err != nil { return nil, err } @@ -135,7 +132,7 @@ func NewNode(ctx context.Context, opts ...NodeOpt) (*Node, error) { var node *net.Node if !options.disableP2P { // setup net node - node, err = net.NewNode(ctx, db, options.netOpts...) + node, err = net.NewNode(ctx, db, netOpts...) if err != nil { return nil, err } @@ -156,7 +153,7 @@ func NewNode(ctx context.Context, opts ...NodeOpt) (*Node, error) { if err != nil { return nil, err } - server, err = http.NewServer(handler, options.serverOpts...) + server, err = http.NewServer(handler, serverOpts...) if err != nil { return nil, err } diff --git a/node/node_test.go b/node/node_test.go index 6da528f999..d3bf4c5048 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -11,51 +11,13 @@ package node import ( - "context" "testing" - "time" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "github.com/sourcenetwork/defradb/http" - "github.com/sourcenetwork/defradb/internal/db" - "github.com/sourcenetwork/defradb/net" ) -func TestWithStoreOpts(t *testing.T) { - storeOpts := []StoreOpt{WithPath("test")} - - options := &Options{} - WithStoreOpts(storeOpts...)(options) - assert.Equal(t, storeOpts, options.storeOpts) -} - -func TestWithDatabaseOpts(t *testing.T) { - dbOpts := []db.Option{db.WithMaxRetries(10)} - - options := &Options{} - WithDatabaseOpts(dbOpts...)(options) - assert.Equal(t, dbOpts, options.dbOpts) -} - -func TestWithNetOpts(t *testing.T) { - netOpts := []net.NodeOpt{net.WithEnablePubSub(true)} - - options := &Options{} - WithNetOpts(netOpts...)(options) - assert.Equal(t, netOpts, options.netOpts) -} - -func TestWithServerOpts(t *testing.T) { - serverOpts := []http.ServerOpt{http.WithAddress("127.0.0.1:8080")} - - options := &Options{} - WithServerOpts(serverOpts...)(options) - assert.Equal(t, serverOpts, options.serverOpts) -} - func TestWithDisableP2P(t *testing.T) { options := &Options{} WithDisableP2P(true)(options) @@ -78,24 +40,3 @@ func TestWithPeers(t *testing.T) { require.Len(t, options.peers, 1) assert.Equal(t, *peer, options.peers[0]) } - -func TestNodeStart(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - opts := []NodeOpt{ - WithStoreOpts(WithPath(t.TempDir())), - WithDatabaseOpts(db.WithUpdateEvents()), - } - - node, err := NewNode(ctx, opts...) - require.NoError(t, err) - - err = node.Start(ctx) - require.NoError(t, err) - - <-time.After(5 * time.Second) - - err = node.Close(ctx) - require.NoError(t, err) -} diff --git a/tests/integration/db.go b/tests/integration/db.go index d8dedbc07c..db2217a04d 100644 --- a/tests/integration/db.go +++ b/tests/integration/db.go @@ -71,9 +71,9 @@ func init() { } func NewBadgerMemoryDB(ctx context.Context) (client.DB, error) { - opts := []node.NodeOpt{ - node.WithStoreOpts(node.WithInMemory(true)), - node.WithDatabaseOpts(db.WithUpdateEvents()), + opts := []node.Option{ + node.WithInMemory(true), + db.WithUpdateEvents(), } node, err := node.NewNode(ctx, opts...) @@ -87,8 +87,8 @@ func NewBadgerMemoryDB(ctx context.Context) (client.DB, error) { func NewBadgerFileDB(ctx context.Context, t testing.TB) (client.DB, error) { path := t.TempDir() - opts := []node.NodeOpt{ - node.WithStoreOpts(node.WithPath(path)), + opts := []node.Option{ + node.WithPath(path), } node, err := node.NewNode(ctx, opts...) @@ -103,13 +103,9 @@ func NewBadgerFileDB(ctx context.Context, t testing.TB) (client.DB, error) { // testing state. The database type on the test state is used to // select the datastore implementation to use. func setupDatabase(s *state) (client.DB, string, error) { - dbOpts := []db.Option{ + opts := []node.Option{ db.WithUpdateEvents(), db.WithLensPoolSize(lensPoolSize), - } - storeOpts := []node.StoreOpt{} - acpOpts := []node.ACPOpt{} - opts := []node.NodeOpt{ // The test framework sets this up elsewhere when required so that it may be wrapped // into a [client.DB]. node.WithDisableAPI(true), @@ -127,13 +123,13 @@ func setupDatabase(s *state) (client.DB, string, error) { } if encryptionKey != nil { - storeOpts = append(storeOpts, node.WithEncryptionKey(encryptionKey)) + opts = append(opts, node.WithEncryptionKey(encryptionKey)) } var path string switch s.dbt { case badgerIMType: - storeOpts = append(storeOpts, node.WithInMemory(true)) + opts = append(opts, node.WithInMemory(true)) case badgerFileType: switch { @@ -150,20 +146,15 @@ func setupDatabase(s *state) (client.DB, string, error) { path = s.t.TempDir() } - storeOpts = append(storeOpts, node.WithPath(path)) - acpOpts = append(acpOpts, node.WithACPPath(path)) + opts = append(opts, node.WithPath(path), node.WithACPPath(path)) case defraIMType: - storeOpts = append(storeOpts, node.WithDefraStore(true)) + opts = append(opts, node.WithDefraStore(true)) default: return nil, "", fmt.Errorf("invalid database type: %v", s.dbt) } - opts = append(opts, node.WithDatabaseOpts(dbOpts...)) - opts = append(opts, node.WithStoreOpts(storeOpts...)) - opts = append(opts, node.WithACPOpts(acpOpts...)) - node, err := node.NewNode(s.ctx, opts...) if err != nil { return nil, "", err From 761243e886b97d0e3b83e695871eb038c09a4c13 Mon Sep 17 00:00:00 2001 From: Shahzad Lone Date: Tue, 28 May 2024 17:15:04 -0400 Subject: [PATCH 21/78] ci(i): Add workflow to ensure cli docs are in sync (#2647) ## Relevant issue(s) Resolves #2165 ## Description - Detects if cli documents need updating. - If Documentation is up to date the action passes, else fails. - Run `make docs` or `make docs:cli` to generate the documentation. - Introducing as a non-required check, but would argue it should be required. ## Tasks - [x] I made sure the code is well commented, particularly hard-to-understand areas. - [x] I made sure the repository-held documentation is changed accordingly. - [x] I made sure the pull request title adheres to the conventional commit style (the subset used in the project can be found in [tools/configs/chglog/config.yml](tools/configs/chglog/config.yml)). - [x] I made sure to discuss its limitations such as threats to validity, vulnerability to mistake and misuse, robustness to invalidation of assumptions, resource requirements, ... ## How has this been tested? - Manually testing through action run and by using `act` utility. - Failed action: https://github.com/sourcenetwork/defradb/actions/runs/9214068142/job/25349425467?pr=2647 Specify the platform(s) on which this was tested: - WSL2 --- .github/workflows/check-cli-documentation.yml | 61 +++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 .github/workflows/check-cli-documentation.yml diff --git a/.github/workflows/check-cli-documentation.yml b/.github/workflows/check-cli-documentation.yml new file mode 100644 index 0000000000..06a7bd7bdc --- /dev/null +++ b/.github/workflows/check-cli-documentation.yml @@ -0,0 +1,61 @@ +# Copyright 2024 Democratized Data Foundation +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +# This workflow checks that all CLI documentation is up to date. +# If the documentation is not up to date then this action will fail. +name: Check CLI Documentation Workflow + +on: + pull_request: + branches: + - master + - develop + + push: + tags: + - 'v[0-9]+.[0-9]+.[0-9]+' + branches: + - master + - develop + +jobs: + check-cli-documentation: + name: Check cli documentation job + + runs-on: ubuntu-latest + + steps: + - name: Checkout code into the directory + uses: actions/checkout@v3 + + # This check is there as a safety to ensure we start clean (without any changes). + # If there are ever changes here, the rest of the job will output false result. + - name: Check no changes exist initially + uses: tj-actions/verify-changed-files@v20 + with: + fail-if-changed: true + files: | + docs/website/references/cli + + - name: Setup Go environment explicitly + uses: actions/setup-go@v3 + with: + go-version: "1.21" + check-latest: true + + - name: Try generating cli documentation + run: make docs:cli + + - name: Check no new changes exist + uses: tj-actions/verify-changed-files@v20 + with: + fail-if-changed: true + files: | + docs/website/references/cli From 5829f9daad2b4974ae0c214cb05ac42995e5b303 Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Fri, 31 May 2024 15:25:43 -0400 Subject: [PATCH 22/78] refactor: Extract Defra specific logic from ACPLocal type (#2656) ## Relevant issue(s) Resolves #2655 ## Description Extracts Defra specific logic from the ACPLocal type so that it may be re-used for SourceHub ACP. None of the code has changed, it's just moved around. --- acp/acp_local.go | 227 ++++++--------------------- acp/acp_local_test.go | 20 +-- acp/source_hub_client.go | 323 +++++++++++++++++++++++++++++++++++++++ net/peer_test.go | 4 +- node/acp.go | 8 +- 5 files changed, 386 insertions(+), 196 deletions(-) create mode 100644 acp/source_hub_client.go diff --git a/acp/acp_local.go b/acp/acp_local.go index e569efd5d0..51e071604a 100644 --- a/acp/acp_local.go +++ b/acp/acp_local.go @@ -14,17 +14,9 @@ import ( "context" protoTypes "github.com/cosmos/gogoproto/types" - "github.com/sourcenetwork/corelog" "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/sourcehub/x/acp/embedded" "github.com/sourcenetwork/sourcehub/x/acp/types" - "github.com/valyala/fastjson" - - "github.com/sourcenetwork/defradb/errors" -) - -var ( - _ ACP = (*ACPLocal)(nil) ) // ACPLocal represents a local acp implementation that makes no remote calls. @@ -33,6 +25,8 @@ type ACPLocal struct { localACP *embedded.LocalACP } +var _ sourceHubClient = (*ACPLocal)(nil) + func (l *ACPLocal) Init(ctx context.Context, path string) { if path == "" { l.pathToStore = immutable.None[string]() @@ -75,22 +69,9 @@ func (l *ACPLocal) AddPolicy( ctx context.Context, creatorID string, policy string, + policyMarshalType types.PolicyMarshalingType, + creationTime *protoTypes.Timestamp, ) (string, error) { - // Having a creator identity is a MUST requirement for adding a policy. - if creatorID == "" { - return "", ErrPolicyCreatorMustNotBeEmpty - } - - if policy == "" { - return "", ErrPolicyDataMustNotBeEmpty - } - - // Assume policy is in YAML format by default. - policyMarshalType := types.PolicyMarshalingType_SHORT_YAML - if isJSON := fastjson.Validate(policy) == nil; isJSON { // Detect JSON format. - policyMarshalType = types.PolicyMarshalingType_SHORT_JSON - } - createPolicy := types.MsgCreatePolicy{ Creator: creatorID, Policy: policy, @@ -102,160 +83,68 @@ func (l *ACPLocal) AddPolicy( l.localACP.GetCtx(), &createPolicy, ) - if err != nil { - return "", NewErrFailedToAddPolicyWithACP(err, "Local", creatorID) + return "", err } - policyID := createPolicyResponse.Policy.Id - log.InfoContext(ctx, "Created Policy", corelog.Any("PolicyID", policyID)) - - return policyID, nil + return createPolicyResponse.Policy.Id, nil } -func (l *ACPLocal) ValidateResourceExistsOnValidDPI( +func (l *ACPLocal) Policy( ctx context.Context, policyID string, - resourceName string, -) error { - if policyID == "" && resourceName == "" { - return ErrNoPolicyArgs - } - - if policyID == "" { - return ErrPolicyIDMustNotBeEmpty - } - - if resourceName == "" { - return ErrResourceNameMustNotBeEmpty - } - - queryPolicyRequest := types.QueryPolicyRequest{Id: policyID} +) (*types.Policy, error) { queryPolicyResponse, err := l.localACP.GetQueryService().Policy( l.localACP.GetCtx(), - &queryPolicyRequest, + &types.QueryPolicyRequest{Id: policyID}, ) - if err != nil { - if errors.Is(err, types.ErrPolicyNotFound) { - return newErrPolicyDoesNotExistWithACP(err, policyID) - } else { - return newErrPolicyValidationFailedWithACP(err, policyID) - } - } - - // So far we validated that the policy exists, now lets validate that resource exists. - resourceResponse := queryPolicyResponse.Policy.GetResourceByName(resourceName) - if resourceResponse == nil { - return newErrResourceDoesNotExistOnTargetPolicy(resourceName, policyID) + return nil, err } - // Now that we have validated that policyID exists and it contains a corresponding - // resource with the matching name, validate that all required permissions - // for DPI actually exist on the target resource. - for _, requiredPermission := range dpiRequiredPermissions { - permissionResponse := resourceResponse.GetPermissionByName(requiredPermission) - if permissionResponse == nil { - return newErrResourceIsMissingRequiredPermission( - resourceName, - requiredPermission, - policyID, - ) - } - - // Now we need to ensure that the "owner" relation has access to all the required - // permissions for DPI. This is important because even if the policy has the required - // permissions under the resource, it's possible that those permissions are not granted - // to the "owner" relation, this will help users not shoot themseleves in the foot. - // TODO-ACP: Better validation, once sourcehub implements meta-policies. - // Issue: https://github.com/sourcenetwork/defradb/issues/2359 - if err := validateDPIExpressionOfRequiredPermission( - permissionResponse.Expression, - requiredPermission, - ); err != nil { - return err - } - } - - return nil + return queryPolicyResponse.Policy, nil } -func (l *ACPLocal) RegisterDocObject( +func (l *ACPLocal) RegisterObject( ctx context.Context, actorID string, policyID string, resourceName string, - docID string, -) error { - registerDoc := types.MsgRegisterObject{ - Creator: actorID, - PolicyId: policyID, - Object: types.NewObject(resourceName, docID), - CreationTime: protoTypes.TimestampNow(), - } - + objectID string, + creationTime *protoTypes.Timestamp, +) (types.RegistrationResult, error) { registerDocResponse, err := l.localACP.GetMsgService().RegisterObject( l.localACP.GetCtx(), - ®isterDoc, + &types.MsgRegisterObject{ + Creator: actorID, + PolicyId: policyID, + Object: types.NewObject(resourceName, objectID), + CreationTime: creationTime, + }, ) - if err != nil { - return NewErrFailedToRegisterDocWithACP(err, "Local", policyID, actorID, resourceName, docID) + return types.RegistrationResult(0), err } - switch registerDocResponse.Result { - case types.RegistrationResult_NoOp: - return ErrObjectDidNotRegister - - case types.RegistrationResult_Registered: - log.InfoContext( - ctx, - "Document registered with local acp", - corelog.Any("PolicyID", policyID), - corelog.Any("Creator", actorID), - corelog.Any("Resource", resourceName), - corelog.Any("DocID", docID), - ) - return nil - - case types.RegistrationResult_Unarchived: - log.InfoContext( - ctx, - "Document re-registered (unarchived object) with local acp", - corelog.Any("PolicyID", policyID), - corelog.Any("Creator", actorID), - corelog.Any("Resource", resourceName), - corelog.Any("DocID", docID), - ) - return nil - } - - return ErrObjectDidNotRegister + return registerDocResponse.Result, nil } -func (l *ACPLocal) IsDocRegistered( +func (l *ACPLocal) ObjectOwner( ctx context.Context, policyID string, resourceName string, - docID string, -) (bool, error) { - queryObjectOwner := types.QueryObjectOwnerRequest{ - PolicyId: policyID, - Object: types.NewObject(resourceName, docID), - } - - queryObjectOwnerResponse, err := l.localACP.GetQueryService().ObjectOwner( + objectID string, +) (*types.QueryObjectOwnerResponse, error) { + return l.localACP.GetQueryService().ObjectOwner( l.localACP.GetCtx(), - &queryObjectOwner, + &types.QueryObjectOwnerRequest{ + PolicyId: policyID, + Object: types.NewObject(resourceName, objectID), + }, ) - if err != nil { - return false, NewErrFailedToCheckIfDocIsRegisteredWithACP(err, "Local", policyID, resourceName, docID) - } - - return queryObjectOwnerResponse.IsRegistered, nil } -func (l *ACPLocal) CheckDocAccess( +func (l *ACPLocal) VerifyAccessRequest( ctx context.Context, permission DPIPermission, actorID string, @@ -263,48 +152,26 @@ func (l *ACPLocal) CheckDocAccess( resourceName string, docID string, ) (bool, error) { - checkDoc := types.QueryVerifyAccessRequestRequest{ - PolicyId: policyID, - AccessRequest: &types.AccessRequest{ - Operations: []*types.Operation{ - { - Object: types.NewObject(resourceName, docID), - Permission: permission.String(), + checkDocResponse, err := l.localACP.GetQueryService().VerifyAccessRequest( + l.localACP.GetCtx(), + &types.QueryVerifyAccessRequestRequest{ + PolicyId: policyID, + AccessRequest: &types.AccessRequest{ + Operations: []*types.Operation{ + { + Object: types.NewObject(resourceName, docID), + Permission: permission.String(), + }, + }, + Actor: &types.Actor{ + Id: actorID, }, - }, - Actor: &types.Actor{ - Id: actorID, }, }, - } - - checkDocResponse, err := l.localACP.GetQueryService().VerifyAccessRequest( - l.localACP.GetCtx(), - &checkDoc, ) if err != nil { - return false, NewErrFailedToVerifyDocAccessWithACP(err, "Local", policyID, actorID, resourceName, docID) + return false, err } - if checkDocResponse.Valid { - log.InfoContext( - ctx, - "Document accessible", - corelog.Any("PolicyID", policyID), - corelog.Any("ActorID", actorID), - corelog.Any("Resource", resourceName), - corelog.Any("DocID", docID), - ) - return true, nil - } else { - log.InfoContext( - ctx, - "Document inaccessible", - corelog.Any("PolicyID", policyID), - corelog.Any("ActorID", actorID), - corelog.Any("Resource", resourceName), - corelog.Any("DocID", docID), - ) - return false, nil - } + return checkDocResponse.Valid, nil } diff --git a/acp/acp_local_test.go b/acp/acp_local_test.go index 9abdcb04d1..ed613a829c 100644 --- a/acp/acp_local_test.go +++ b/acp/acp_local_test.go @@ -46,7 +46,7 @@ resources: func Test_LocalACP_InMemory_StartAndClose_NoError(t *testing.T) { ctx := context.Background() - var localACP ACPLocal + localACP := NewLocalACP() localACP.Init(ctx, "") err := localACP.Start(ctx) @@ -62,7 +62,7 @@ func Test_LocalACP_PersistentMemory_StartAndClose_NoError(t *testing.T) { require.NotEqual(t, "", acpPath) ctx := context.Background() - var localACP ACPLocal + localACP := NewLocalACP() localACP.Init(ctx, acpPath) err := localACP.Start(ctx) @@ -74,7 +74,7 @@ func Test_LocalACP_PersistentMemory_StartAndClose_NoError(t *testing.T) { func Test_LocalACP_InMemory_AddPolicy_CanCreateTwice(t *testing.T) { ctx := context.Background() - var localACP ACPLocal + localACP := NewLocalACP() localACP.Init(ctx, "") errStart := localACP.Start(ctx) @@ -123,7 +123,7 @@ func Test_LocalACP_PersistentMemory_AddPolicy_CanNotCreateTwice(t *testing.T) { require.NotEqual(t, "", acpPath) ctx := context.Background() - var localACP ACPLocal + localACP := NewLocalACP() localACP.Init(ctx, acpPath) errStart := localACP.Start(ctx) @@ -165,7 +165,7 @@ func Test_LocalACP_PersistentMemory_AddPolicy_CanNotCreateTwice(t *testing.T) { func Test_LocalACP_InMemory_ValidateResourseExistsOrNot_ErrIfDoesntExist(t *testing.T) { ctx := context.Background() - var localACP ACPLocal + localACP := NewLocalACP() localACP.Init(ctx, "") errStart := localACP.Start(ctx) @@ -215,7 +215,7 @@ func Test_LocalACP_PersistentMemory_ValidateResourseExistsOrNot_ErrIfDoesntExist require.NotEqual(t, "", acpPath) ctx := context.Background() - var localACP ACPLocal + localACP := NewLocalACP() localACP.Init(ctx, acpPath) errStart := localACP.Start(ctx) @@ -278,7 +278,7 @@ func Test_LocalACP_PersistentMemory_ValidateResourseExistsOrNot_ErrIfDoesntExist func Test_LocalACP_InMemory_IsDocRegistered_TrueIfRegisteredFalseIfNotAndErrorOtherwise(t *testing.T) { ctx := context.Background() - var localACP ACPLocal + localACP := NewLocalACP() localACP.Init(ctx, "") errStart := localACP.Start(ctx) @@ -358,7 +358,7 @@ func Test_LocalACP_PersistentMemory_IsDocRegistered_TrueIfRegisteredFalseIfNotAn require.NotEqual(t, "", acpPath) ctx := context.Background() - var localACP ACPLocal + localACP := NewLocalACP() localACP.Init(ctx, acpPath) errStart := localACP.Start(ctx) @@ -454,7 +454,7 @@ func Test_LocalACP_PersistentMemory_IsDocRegistered_TrueIfRegisteredFalseIfNotAn func Test_LocalACP_InMemory_CheckDocAccess_TrueIfHaveAccessFalseIfNotErrorOtherwise(t *testing.T) { ctx := context.Background() - var localACP ACPLocal + localACP := NewLocalACP() localACP.Init(ctx, "") errStart := localACP.Start(ctx) @@ -540,7 +540,7 @@ func Test_LocalACP_PersistentMemory_CheckDocAccess_TrueIfHaveAccessFalseIfNotErr require.NotEqual(t, "", acpPath) ctx := context.Background() - var localACP ACPLocal + localACP := NewLocalACP() localACP.Init(ctx, acpPath) errStart := localACP.Start(ctx) diff --git a/acp/source_hub_client.go b/acp/source_hub_client.go new file mode 100644 index 0000000000..b41544d68f --- /dev/null +++ b/acp/source_hub_client.go @@ -0,0 +1,323 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package acp + +import ( + "context" + + protoTypes "github.com/cosmos/gogoproto/types" + "github.com/sourcenetwork/corelog" + "github.com/sourcenetwork/sourcehub/x/acp/types" + "github.com/valyala/fastjson" + + "github.com/sourcenetwork/defradb/errors" +) + +// sourceHubClient is a private abstraction to allow multiple ACP implementations +// based off of the SourceHub libraries to share the same Defra-specific logic via the +// sourceHubBridge. +type sourceHubClient interface { + // Init initializes the acp, with an absolute path. The provided path indicates where the + // persistent data will be stored for acp. + // + // If the path is empty then acp will run in memory. + Init(ctx context.Context, path string) + + // Start starts the acp, using the initialized path. Will recover acp state + // from a previous run if under the same path. + // + // If the path is empty then acp will run in memory. + Start(ctx context.Context) error + + // AddPolicy attempts to add the given policy. Upon success a policyID is returned, + // otherwise returns error. + AddPolicy( + ctx context.Context, + creatorID string, + policy string, + policyMarshalingType types.PolicyMarshalingType, + creationTime *protoTypes.Timestamp, + ) (string, error) + + // Policy returns a policy of the given policyID if one is found. + Policy( + ctx context.Context, + policyID string, + ) (*types.Policy, error) + + // RegisterObject registers the object to have access control. + // No error is returned upon successful registering of an object. + RegisterObject( + ctx context.Context, + actorID string, + policyID string, + resourceName string, + objectID string, + creationTime *protoTypes.Timestamp, + ) (types.RegistrationResult, error) + + // ObjectOwner returns the owner of the object of the given objectID. + ObjectOwner( + ctx context.Context, + policyID string, + resourceName string, + objectID string, + ) (*types.QueryObjectOwnerResponse, error) + + // VerifyAccessRequest returns true if the check was successfull and the request has access to the object. If + // the check was successful but the request does not have access to the object, then returns false. + // Otherwise if check failed then an error is returned (and the boolean result should not be used). + VerifyAccessRequest( + ctx context.Context, + permission DPIPermission, + actorID string, + policyID string, + resourceName string, + docID string, + ) (bool, error) + + // Close closes any resources in use by acp. + Close() error +} + +// sourceHubBridge wraps a sourceHubClient, hosting the Defra-specific logic away from client-specific +// code. +type sourceHubBridge struct { + client sourceHubClient +} + +var _ ACP = (*sourceHubBridge)(nil) + +func NewLocalACP() ACP { + return &sourceHubBridge{ + client: &ACPLocal{}, + } +} + +func (a *sourceHubBridge) Init(ctx context.Context, path string) { + a.client.Init(ctx, path) +} + +func (a *sourceHubBridge) Start(ctx context.Context) error { + return a.client.Start(ctx) +} + +func (a *sourceHubBridge) AddPolicy(ctx context.Context, creatorID string, policy string) (string, error) { + // Having a creator identity is a MUST requirement for adding a policy. + if creatorID == "" { + return "", ErrPolicyCreatorMustNotBeEmpty + } + + if policy == "" { + return "", ErrPolicyDataMustNotBeEmpty + } + + // Assume policy is in YAML format by default. + policyMarshalType := types.PolicyMarshalingType_SHORT_YAML + if isJSON := fastjson.Validate(policy) == nil; isJSON { // Detect JSON format. + policyMarshalType = types.PolicyMarshalingType_SHORT_JSON + } + + policyID, err := a.client.AddPolicy( + ctx, + creatorID, + policy, + policyMarshalType, + protoTypes.TimestampNow(), + ) + + if err != nil { + return "", NewErrFailedToAddPolicyWithACP(err, "Local", creatorID) + } + + log.InfoContext(ctx, "Created Policy", corelog.Any("PolicyID", policyID)) + + return policyID, nil +} + +func (a *sourceHubBridge) ValidateResourceExistsOnValidDPI( + ctx context.Context, + policyID string, + resourceName string, +) error { + if policyID == "" && resourceName == "" { + return ErrNoPolicyArgs + } + + if policyID == "" { + return ErrPolicyIDMustNotBeEmpty + } + + if resourceName == "" { + return ErrResourceNameMustNotBeEmpty + } + + policy, err := a.client.Policy(ctx, policyID) + + if err != nil { + if errors.Is(err, types.ErrPolicyNotFound) { + return newErrPolicyDoesNotExistWithACP(err, policyID) + } else { + return newErrPolicyValidationFailedWithACP(err, policyID) + } + } + + // So far we validated that the policy exists, now lets validate that resource exists. + resourceResponse := policy.GetResourceByName(resourceName) + if resourceResponse == nil { + return newErrResourceDoesNotExistOnTargetPolicy(resourceName, policyID) + } + + // Now that we have validated that policyID exists and it contains a corresponding + // resource with the matching name, validate that all required permissions + // for DPI actually exist on the target resource. + for _, requiredPermission := range dpiRequiredPermissions { + permissionResponse := resourceResponse.GetPermissionByName(requiredPermission) + if permissionResponse == nil { + return newErrResourceIsMissingRequiredPermission( + resourceName, + requiredPermission, + policyID, + ) + } + + // Now we need to ensure that the "owner" relation has access to all the required + // permissions for DPI. This is important because even if the policy has the required + // permissions under the resource, it's possible that those permissions are not granted + // to the "owner" relation, this will help users not shoot themseleves in the foot. + // TODO-ACP: Better validation, once sourcehub implements meta-policies. + // Issue: https://github.com/sourcenetwork/defradb/issues/2359 + if err := validateDPIExpressionOfRequiredPermission( + permissionResponse.Expression, + requiredPermission, + ); err != nil { + return err + } + } + + return nil +} + +func (a *sourceHubBridge) RegisterDocObject( + ctx context.Context, + actorID string, + policyID string, + resourceName string, + docID string, +) error { + registerDocResult, err := a.client.RegisterObject( + ctx, + actorID, + policyID, + resourceName, + docID, + protoTypes.TimestampNow(), + ) + + if err != nil { + return NewErrFailedToRegisterDocWithACP(err, "Local", policyID, actorID, resourceName, docID) + } + + switch registerDocResult { + case types.RegistrationResult_NoOp: + return ErrObjectDidNotRegister + + case types.RegistrationResult_Registered: + log.InfoContext( + ctx, + "Document registered with local acp", + corelog.Any("PolicyID", policyID), + corelog.Any("Creator", actorID), + corelog.Any("Resource", resourceName), + corelog.Any("DocID", docID), + ) + return nil + + case types.RegistrationResult_Unarchived: + log.InfoContext( + ctx, + "Document re-registered (unarchived object) with local acp", + corelog.Any("PolicyID", policyID), + corelog.Any("Creator", actorID), + corelog.Any("Resource", resourceName), + corelog.Any("DocID", docID), + ) + return nil + } + + return ErrObjectDidNotRegister +} + +func (a *sourceHubBridge) IsDocRegistered( + ctx context.Context, + policyID string, + resourceName string, + docID string, +) (bool, error) { + queryObjectOwnerResponse, err := a.client.ObjectOwner( + ctx, + policyID, + resourceName, + docID, + ) + if err != nil { + return false, NewErrFailedToCheckIfDocIsRegisteredWithACP(err, "Local", policyID, resourceName, docID) + } + + return queryObjectOwnerResponse.IsRegistered, nil +} + +func (a *sourceHubBridge) CheckDocAccess( + ctx context.Context, + permission DPIPermission, + actorID string, + policyID string, + resourceName string, + docID string, +) (bool, error) { + isValid, err := a.client.VerifyAccessRequest( + ctx, + permission, + actorID, + policyID, + resourceName, + docID, + ) + if err != nil { + return false, NewErrFailedToVerifyDocAccessWithACP(err, "Local", policyID, actorID, resourceName, docID) + } + + if isValid { + log.InfoContext( + ctx, + "Document accessible", + corelog.Any("PolicyID", policyID), + corelog.Any("ActorID", actorID), + corelog.Any("Resource", resourceName), + corelog.Any("DocID", docID), + ) + return true, nil + } else { + log.InfoContext( + ctx, + "Document inaccessible", + corelog.Any("PolicyID", policyID), + corelog.Any("ActorID", actorID), + corelog.Any("Resource", resourceName), + corelog.Any("DocID", docID), + ) + return false, nil + } +} + +func (a *sourceHubBridge) Close() error { + return a.client.Close() +} diff --git a/net/peer_test.go b/net/peer_test.go index 41977b4664..fdd1feb583 100644 --- a/net/peer_test.go +++ b/net/peer_test.go @@ -71,9 +71,9 @@ const randomMultiaddr = "/ip4/127.0.0.1/tcp/0" func newTestNode(ctx context.Context, t *testing.T) (client.DB, *Node) { store := memory.NewDatastore(ctx) - var acpLocal acp.ACPLocal + acpLocal := acp.NewLocalACP() acpLocal.Init(context.Background(), "") - db, err := db.NewDB(ctx, store, immutable.Some[acp.ACP](&acpLocal), db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, immutable.Some[acp.ACP](acpLocal), db.WithUpdateEvents()) require.NoError(t, err) n, err := NewNode( diff --git a/node/acp.go b/node/acp.go index 77b14cc0e9..0df33269e9 100644 --- a/node/acp.go +++ b/node/acp.go @@ -71,13 +71,13 @@ func NewACP(ctx context.Context, opts ...ACPOpt) (immutable.Option[acp.ACP], err return acp.NoACP, nil case LocalACPType: - var acpLocal acp.ACPLocal + acpLocal := acp.NewLocalACP() acpLocal.Init(ctx, options.path) - return immutable.Some[acp.ACP](&acpLocal), nil + return immutable.Some[acp.ACP](acpLocal), nil default: - var acpLocal acp.ACPLocal + acpLocal := acp.NewLocalACP() acpLocal.Init(ctx, options.path) - return immutable.Some[acp.ACP](&acpLocal), nil + return immutable.Some[acp.ACP](acpLocal), nil } } From c5ce2f3b0156509dde3f47ac783c921844236f06 Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Fri, 31 May 2024 15:13:56 -0700 Subject: [PATCH 23/78] feat: Add authentication for ACP (#2649) ## Relevant issue(s) Resolves #2017 ## Description This PR adds ACP identity authentication via HTTP. Notable changes: - `acp/identity` has been replaced with the `acp.Identity` struct - `identity.PrivateKey` is the private key of the identity - `identity.PublicKey` is the public key of the identity - `identity.Address` is the bech32 formatted address for the identity - keys are all `secp256k1` - `http` can authenticate requests using a jwt bearer token - ~a random `audience` value is generated on every http server startup~ - audience must be set to the defradb host name - ~api route `/audience` returns the random audience value~ - `http.Client` will create a signed token if an `acp.PrivateKeyIdentity` is set - jwt token subject is the identity public key - `cli` `--identity` flag is now a hex encoded private key Todo: - [x] ensure acp docs are updated ## Tasks - [x] I made sure the code is well commented, particularly hard-to-understand areas. - [x] I made sure the repository-held documentation is changed accordingly. - [x] I made sure the pull request title adheres to the conventional commit style (the subset used in the project can be found in [tools/configs/chglog/config.yml](tools/configs/chglog/config.yml)). - [x] I made sure to discuss its limitations such as threats to validity, vulnerability to mistake and misuse, robustness to invalidation of assumptions, resource requirements, ... ## How has this been tested? `make test` Specify the platform(s) on which this was tested: - MacOS --- acp/README.md | 67 ++++++--- acp/identity/identity.go | 63 +++++--- cli/acp_policy_add.go | 9 +- cli/client.go | 3 +- cli/collection.go | 3 +- cli/collection_create.go | 3 +- cli/collection_delete.go | 3 +- cli/collection_get.go | 2 +- cli/collection_list_doc_ids.go | 2 +- cli/collection_update.go | 2 +- cli/request.go | 2 +- cli/utils.go | 14 +- crypto/crypto.go | 18 ++- docs/website/references/cli/defradb_client.md | 2 +- .../references/cli/defradb_client_acp.md | 2 +- .../cli/defradb_client_acp_policy.md | 2 +- .../cli/defradb_client_acp_policy_add.md | 11 +- .../references/cli/defradb_client_backup.md | 2 +- .../cli/defradb_client_backup_export.md | 2 +- .../cli/defradb_client_backup_import.md | 2 +- .../cli/defradb_client_collection.md | 2 +- .../cli/defradb_client_collection_create.md | 5 +- .../cli/defradb_client_collection_delete.md | 5 +- .../cli/defradb_client_collection_describe.md | 2 +- .../cli/defradb_client_collection_docIDs.md | 4 +- .../cli/defradb_client_collection_get.md | 4 +- .../cli/defradb_client_collection_patch.md | 2 +- .../cli/defradb_client_collection_update.md | 4 +- .../references/cli/defradb_client_dump.md | 2 +- .../references/cli/defradb_client_index.md | 2 +- .../cli/defradb_client_index_create.md | 2 +- .../cli/defradb_client_index_drop.md | 2 +- .../cli/defradb_client_index_list.md | 2 +- .../references/cli/defradb_client_p2p.md | 2 +- .../cli/defradb_client_p2p_collection.md | 2 +- .../cli/defradb_client_p2p_collection_add.md | 2 +- .../defradb_client_p2p_collection_getall.md | 2 +- .../defradb_client_p2p_collection_remove.md | 2 +- .../references/cli/defradb_client_p2p_info.md | 2 +- .../cli/defradb_client_p2p_replicator.md | 2 +- .../defradb_client_p2p_replicator_delete.md | 2 +- .../defradb_client_p2p_replicator_getall.md | 2 +- .../cli/defradb_client_p2p_replicator_set.md | 2 +- .../references/cli/defradb_client_query.md | 4 +- .../references/cli/defradb_client_schema.md | 2 +- .../cli/defradb_client_schema_add.md | 2 +- .../cli/defradb_client_schema_describe.md | 2 +- .../cli/defradb_client_schema_migration.md | 2 +- .../defradb_client_schema_migration_down.md | 2 +- .../defradb_client_schema_migration_reload.md | 2 +- ...db_client_schema_migration_set-registry.md | 2 +- .../defradb_client_schema_migration_set.md | 2 +- .../cli/defradb_client_schema_migration_up.md | 2 +- .../cli/defradb_client_schema_patch.md | 2 +- .../cli/defradb_client_schema_set-active.md | 2 +- .../references/cli/defradb_client_tx.md | 2 +- .../cli/defradb_client_tx_commit.md | 2 +- .../cli/defradb_client_tx_create.md | 2 +- .../cli/defradb_client_tx_discard.md | 2 +- .../references/cli/defradb_client_view.md | 2 +- .../references/cli/defradb_client_view_add.md | 2 +- examples/schema/permissioned/users.graphql | 2 +- go.mod | 48 +++--- go.sum | 95 ++++++------ http/auth.go | 115 ++++++++++++++ http/auth_test.go | 140 ++++++++++++++++++ http/client.go | 6 +- http/client_collection.go | 5 +- http/errors.go | 20 +-- http/handler.go | 4 +- http/http_client.go | 27 +++- http/middleware.go | 27 ---- http/openapi.go | 14 +- http/utils.go | 10 +- internal/db/db.go | 4 +- internal/db/fetcher/mocks/fetcher.go | 2 +- internal/db/permission/check.go | 2 +- internal/db/permission/register.go | 2 +- net/peer_test.go | 32 +++- tests/clients/cli/wrapper_cli.go | 5 +- tests/integration/acp.go | 4 +- .../integration/acp/add_policy/basic_test.go | 4 +- .../acp/add_policy/with_empty_args_test.go | 7 +- .../with_extra_perms_and_relations_test.go | 2 +- .../acp/add_policy/with_extra_perms_test.go | 2 +- .../add_policy/with_extra_relations_test.go | 2 +- .../with_invalid_creator_arg_test.go | 17 ++- .../add_policy/with_managed_relation_test.go | 2 +- .../add_policy/with_multi_policies_test.go | 20 +-- .../with_multiple_resources_test.go | 2 +- .../acp/add_policy/with_no_perms_test.go | 8 +- .../acp/add_policy/with_no_resources_test.go | 6 +- .../acp/add_policy/with_perm_expr_test.go | 4 +- .../with_permissionless_owner_test.go | 6 +- .../add_policy/with_unused_relations_test.go | 2 +- tests/integration/acp/fixture.go | 23 ++- tests/integration/acp/index/create_test.go | 8 +- tests/integration/acp/index/fixture.go | 4 +- tests/integration/acp/index/query_test.go | 12 +- .../acp/index/query_with_relation_test.go | 6 +- tests/integration/acp/p2p/replicator_test.go | 4 +- tests/integration/acp/p2p/subscribe_test.go | 4 +- tests/integration/acp/query/fixture.go | 6 +- .../acp/register_and_delete_test.go | 20 +-- .../integration/acp/register_and_read_test.go | 20 +-- .../acp/register_and_update_test.go | 28 ++-- .../add_dpi/accept_basic_dpi_fmts_test.go | 4 +- .../accept_extra_permissions_on_dpi_test.go | 6 +- .../accept_managed_relation_on_dpi_test.go | 2 +- ...ept_mixed_resources_on_partial_dpi_test.go | 2 +- .../schema/add_dpi/accept_multi_dpis_test.go | 4 +- .../accept_multi_resources_on_dpi_test.go | 4 +- ...cept_same_resource_on_diff_schemas_test.go | 2 +- .../reject_empty_arg_on_schema_test.go | 4 +- .../reject_invalid_arg_type_on_schema_test.go | 4 +- ...ect_invalid_owner_read_perm_on_dpi_test.go | 10 +- ...alid_owner_read_perm_symbol_on_dpi_test.go | 6 +- ...ct_invalid_owner_write_perm_on_dpi_test.go | 10 +- ...lid_owner_write_perm_symbol_on_dpi_test.go | 6 +- .../schema/add_dpi/reject_missing_dpi_test.go | 6 +- .../reject_missing_id_arg_on_schema_test.go | 4 +- .../reject_missing_perms_on_dpi_test.go | 2 +- ...ect_missing_resource_arg_on_schema_test.go | 4 +- .../reject_missing_resource_on_dpi_test.go | 2 +- ...ect_mixed_resources_on_partial_dpi_test.go | 2 +- .../updates/remove/policy_test.go | 4 +- tests/integration/test_case.go | 9 +- tests/integration/utils2.go | 19 ++- 128 files changed, 803 insertions(+), 406 deletions(-) create mode 100644 http/auth.go create mode 100644 http/auth_test.go diff --git a/acp/README.md b/acp/README.md index 697a60a0c2..29244103fa 100644 --- a/acp/README.md +++ b/acp/README.md @@ -145,9 +145,31 @@ Here are some valid expression examples. Assuming these `expr` are under a requi - `expr: owner +reader` - `expr: owner+reader` - ## DAC Usage CLI: +### Authentication + +To perform authenticated operations you will need to generate a `secp256k1` key pair. + +The command below will generate a new secp256k1 private key and print the 256 bit X coordinate as a hexadecimal value. + +```sh +openssl ecparam -name secp256k1 -genkey | openssl ec -text -noout | head -n5 | tail -n3 | tr -d '\n:\ ' +``` + +Copy the private key hex from the output. + +```sh +read EC key +e3b722906ee4e56368f581cd8b18ab0f48af1ea53e635e3f7b8acd076676f6ac +``` + +Use the private key to generate authentication tokens for each request. + +```sh +defradb client ... --identity e3b722906ee4e56368f581cd8b18ab0f48af1ea53e635e3f7b8acd076676f6ac +``` + ### Adding a Policy: We have in `examples/dpi_policy/user_dpi_policy.yml`: @@ -176,14 +198,13 @@ resources: CLI Command: ```sh -defradb client acp policy add -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j -f examples/dpi_policy/user_dpi_policy.yml - +defradb client acp policy add -f examples/dpi_policy/user_dpi_policy.yml --identity e3b722906ee4e56368f581cd8b18ab0f48af1ea53e635e3f7b8acd076676f6ac ``` Result: ```json { - "PolicyID": "24ab8cba6d6f0bcfe4d2712c7d95c09dd1b8076ea5a8896476413fd6c891c18c" + "PolicyID": "50d354a91ab1b8fce8a0ae4693de7616fb1d82cfc540f25cfbe11eb0195a5765" } ``` @@ -192,7 +213,7 @@ Result: We have in `examples/schema/permissioned/users.graphql`: ```graphql type Users @policy( - id: "24ab8cba6d6f0bcfe4d2712c7d95c09dd1b8076ea5a8896476413fd6c891c18c", + id: "50d354a91ab1b8fce8a0ae4693de7616fb1d82cfc540f25cfbe11eb0195a5765", resource: "users" ) { name: String @@ -230,7 +251,7 @@ Result: ], "Indexes": [], "Policy": { - "ID": "24ab8cba6d6f0bcfe4d2712c7d95c09dd1b8076ea5a8896476413fd6c891c18c", + "ID": "50d354a91ab1b8fce8a0ae4693de7616fb1d82cfc540f25cfbe11eb0195a5765", "ResourceName": "users" } } @@ -242,7 +263,7 @@ Result: CLI Command: ```sh -defradb client collection create -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name Users '[{ "name": "SecretShahzad" }, { "name": "SecretLone" }]' +defradb client collection create --name Users '[{ "name": "SecretShahzad" }, { "name": "SecretLone" }]' --identity e3b722906ee4e56368f581cd8b18ab0f48af1ea53e635e3f7b8acd076676f6ac ``` ### Create public documents (without identity) @@ -255,7 +276,7 @@ defradb client collection create --name Users '[{ "name": "PublicShahzad" }, { ### Get all docIDs without an identity (shows only public): CLI Command: ```sh -defradb client collection docIDs -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j +defradb client collection docIDs --identity e3b722906ee4e56368f581cd8b18ab0f48af1ea53e635e3f7b8acd076676f6ac ``` Result: @@ -273,7 +294,7 @@ Result: ### Get all docIDs with an identity (shows public and owned documents): ```sh -defradb client collection docIDs -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j +defradb client collection docIDs --identity e3b722906ee4e56368f581cd8b18ab0f48af1ea53e635e3f7b8acd076676f6ac ``` Result: @@ -300,7 +321,7 @@ Result: ### Access the private document (including field names): CLI Command: ```sh -defradb client collection get -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name Users "bae-a5830219-b8e7-5791-9836-2e494816fc0a" +defradb client collection get --name Users "bae-a5830219-b8e7-5791-9836-2e494816fc0a" --identity e3b722906ee4e56368f581cd8b18ab0f48af1ea53e635e3f7b8acd076676f6ac ``` Result: @@ -325,7 +346,7 @@ Error: ### Accessing the private document with wrong identity: CLI Command: ```sh -defradb client collection get -i cosmos1x25hhksxhu86r45hqwk28dd70qzux3262hdrll --name Users "bae-a5830219-b8e7-5791-9836-2e494816fc0a" +defradb client collection get --name Users "bae-a5830219-b8e7-5791-9836-2e494816fc0a" --identity 4d092126012ebaf56161716018a71630d99443d9d5217e9d8502bb5c5456f2c5 ``` Error: @@ -336,7 +357,7 @@ Error: ### Update private document: CLI Command: ```sh -defradb client collection update -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name Users --docID "bae-a5830219-b8e7-5791-9836-2e494816fc0a" --updater '{ "name": "SecretUpdatedShahzad" }' +defradb client collection update --name Users --docID "bae-a5830219-b8e7-5791-9836-2e494816fc0a" --updater '{ "name": "SecretUpdatedShahzad" }' --identity e3b722906ee4e56368f581cd8b18ab0f48af1ea53e635e3f7b8acd076676f6ac ``` Result: @@ -352,7 +373,7 @@ Result: #### Check if it actually got updated: CLI Command: ```sh -defradb client collection get -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name Users "bae-a5830219-b8e7-5791-9836-2e494816fc0a" +defradb client collection get --name Users "bae-a5830219-b8e7-5791-9836-2e494816fc0a" --identity e3b722906ee4e56368f581cd8b18ab0f48af1ea53e635e3f7b8acd076676f6ac ``` Result: @@ -368,7 +389,7 @@ Result: ### Delete private document: CLI Command: ```sh -defradb client collection delete -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name Users --docID "bae-a5830219-b8e7-5791-9836-2e494816fc0a" +defradb client collection delete --name Users --docID "bae-a5830219-b8e7-5791-9836-2e494816fc0a" --identity e3b722906ee4e56368f581cd8b18ab0f48af1ea53e635e3f7b8acd076676f6ac ``` Result: @@ -384,7 +405,7 @@ Result: #### Check if it actually got deleted: CLI Command: ```sh -defradb client collection get -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name Users "bae-a5830219-b8e7-5791-9836-2e494816fc0a" +defradb client collection get --name Users "bae-a5830219-b8e7-5791-9836-2e494816fc0a" --identity e3b722906ee4e56368f581cd8b18ab0f48af1ea53e635e3f7b8acd076676f6ac ``` Error: @@ -408,9 +429,21 @@ Error: ## DAC Usage HTTP: -HTTP requests work similar to their CLI counter parts, the main difference is that the identity will just be specified within the Auth Header like so: `Authorization: Basic `. -Note: The `Basic` label will change to `Bearer ` after JWS Authentication Tokens are supported. +### Authentication + +To perform authenticated operations you will need to build and sign a JWT token with the following required fields: + +- `sub` public key of the identity +- `aud` host name of the defradb api + +> The `exp` and `nbf` fields should also be set to short-lived durations. + +The JWT must be signed with the `secp256k1` private key of the identity you wish to perform actions as. + +The signed token must be set on the `Authorization` header of the HTTP request with the `bearer ` prefix prepended to it. + +If authentication fails for any reason a `403` forbidden response will be returned. ## _AAC DPI Rules (coming soon)_ ## _AAC Usage: (coming soon)_ diff --git a/acp/identity/identity.go b/acp/identity/identity.go index 108c183748..66fec280db 100644 --- a/acp/identity/identity.go +++ b/acp/identity/identity.go @@ -8,34 +8,51 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -/* -Package identity provides defradb identity. -*/ - package identity -import "github.com/sourcenetwork/immutable" +import ( + cosmosSecp256k1 "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" + "github.com/cosmos/cosmos-sdk/types" + "github.com/decred/dcrd/dcrec/secp256k1/v4" + "github.com/sourcenetwork/immutable" +) -// Identity is the unique identifier for an actor. -type Identity string +// None specifies an anonymous actor. +var None = immutable.None[Identity]() -var ( - // None is an empty identity. - None = immutable.None[Identity]() -) +// Identity describes a unique actor. +type Identity struct { + // PublicKey is the actor's public key. + PublicKey *secp256k1.PublicKey + // PrivateKey is the actor's private key. + PrivateKey *secp256k1.PrivateKey + // Address is the actor's unique address. + // + // The address is derived from the actor's public key. + Address string +} + +// FromPrivateKey returns a new identity using the given private key. +func FromPrivateKey(privateKey *secp256k1.PrivateKey) immutable.Option[Identity] { + pubKey := privateKey.PubKey() + return immutable.Some(Identity{ + Address: AddressFromPublicKey(pubKey), + PublicKey: pubKey, + PrivateKey: privateKey, + }) +} -// New makes a new identity if the input is not empty otherwise, returns None. -func New(identity string) immutable.Option[Identity] { - // TODO-ACP: There will be more validation once sourcehub gets some utilities. - // Then a validation function would do the validation, will likely do outside this function. - // https://github.com/sourcenetwork/defradb/issues/2358 - if identity == "" { - return None - } - return immutable.Some(Identity(identity)) +// FromPublicKey returns a new identity using the given public key. +func FromPublicKey(publicKey *secp256k1.PublicKey) immutable.Option[Identity] { + return immutable.Some(Identity{ + Address: AddressFromPublicKey(publicKey), + PublicKey: publicKey, + }) } -// String returns the string representation of the identity. -func (i Identity) String() string { - return string(i) +// AddressFromPublicKey returns the unique address of the given public key. +func AddressFromPublicKey(publicKey *secp256k1.PublicKey) string { + pub := cosmosSecp256k1.PubKey{Key: publicKey.SerializeCompressed()} + // conversion from well known types should never cause a panic + return types.MustBech32ifyAddressBytes("cosmos", pub.Address().Bytes()) } diff --git a/cli/acp_policy_add.go b/cli/acp_policy_add.go index bca5e95abd..b0db4f63c0 100644 --- a/cli/acp_policy_add.go +++ b/cli/acp_policy_add.go @@ -37,7 +37,8 @@ Notes: - Learn more about [ACP & DPI Rules](/acp/README.md) Example: add from an argument string: - defradb client acp policy add -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j ' + defradb client acp policy add -i 028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f \ +' description: A Valid DefraDB Policy Interface actor: @@ -61,10 +62,12 @@ resources: ' Example: add from file: - defradb client acp policy add -i cosmos17r39df0hdcrgnmmw4mvu7qgk5nu888c7uvv37y -f policy.yml + defradb client acp policy add -f policy.yml \ + -i 028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f Example: add from file, verbose flags: - defradb client acp policy add --identity cosmos1kpw734v54g0t0d8tcye8ee5jc3gld0tcr2q473 --file policy.yml + defradb client acp policy add --file policy.yml \ + --identity 028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f Example: add from stdin: cat policy.yml | defradb client acp policy add - diff --git a/cli/client.go b/cli/client.go index 06460ca70d..d6ae8256dd 100644 --- a/cli/client.go +++ b/cli/client.go @@ -38,7 +38,8 @@ Execute queries, add schema types, obtain node info, etc.`, return setContextDB(cmd) }, } - cmd.PersistentFlags().StringVarP(&identity, "identity", "i", "", "ACP Identity") + cmd.PersistentFlags().StringVarP(&identity, "identity", "i", "", + "Hex formatted private key used to authenticate with ACP") cmd.PersistentFlags().Uint64Var(&txID, "tx", 0, "Transaction ID") return cmd } diff --git a/cli/collection.go b/cli/collection.go index cdf3d41f5a..ad7f54ec5c 100644 --- a/cli/collection.go +++ b/cli/collection.go @@ -80,7 +80,8 @@ func MakeCollectionCommand() *cobra.Command { }, } cmd.PersistentFlags().Uint64Var(&txID, "tx", 0, "Transaction ID") - cmd.PersistentFlags().StringVarP(&identity, "identity", "i", "", "ACP Identity") + cmd.PersistentFlags().StringVarP(&identity, "identity", "i", "", + "Hex formatted private key used to authenticate with ACP") cmd.PersistentFlags().StringVar(&name, "name", "", "Collection name") cmd.PersistentFlags().StringVar(&schemaRoot, "schema", "", "Collection schema Root") cmd.PersistentFlags().StringVar(&versionID, "version", "", "Collection version ID") diff --git a/cli/collection_create.go b/cli/collection_create.go index df7d8794b5..994911a14c 100644 --- a/cli/collection_create.go +++ b/cli/collection_create.go @@ -30,7 +30,8 @@ Example: create from string: defradb client collection create --name User '{ "name": "Bob" }' Example: create from string, with identity: - defradb client collection create -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User '{ "name": "Bob" }' + defradb client collection create --name User '{ "name": "Bob" }' \ + -i 028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f Example: create multiple from string: defradb client collection create --name User '[{ "name": "Alice" }, { "name": "Bob" }]' diff --git a/cli/collection_delete.go b/cli/collection_delete.go index a9776d1985..c3abfcd80e 100644 --- a/cli/collection_delete.go +++ b/cli/collection_delete.go @@ -28,7 +28,8 @@ Example: delete by docID: defradb client collection delete --name User --docID bae-123 Example: delete by docID with identity: - defradb client collection delete -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User --docID bae-123 + defradb client collection delete --name User --docID bae-123 \ + -i 028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f Example: delete by filter: defradb client collection delete --name User --filter '{ "_gte": { "points": 100 } }' diff --git a/cli/collection_get.go b/cli/collection_get.go index 9ad5566f62..5a310a148c 100644 --- a/cli/collection_get.go +++ b/cli/collection_get.go @@ -27,7 +27,7 @@ Example: defradb client collection get --name User bae-123 Example to get a private document we must use an identity: - defradb client collection get -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User bae-123 + defradb client collection get -i 028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f --name User bae-123 `, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cli/collection_list_doc_ids.go b/cli/collection_list_doc_ids.go index 168bb74a5a..bc6b298f32 100644 --- a/cli/collection_list_doc_ids.go +++ b/cli/collection_list_doc_ids.go @@ -26,7 +26,7 @@ Example: list all docID(s): defradb client collection docIDs --name User Example: list all docID(s), with an identity: - defradb client collection docIDs -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User + defradb client collection docIDs -i 028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f --name User `, RunE: func(cmd *cobra.Command, args []string) error { col, ok := tryGetContextCollection(cmd) diff --git a/cli/collection_update.go b/cli/collection_update.go index 3e676edce9..fb7e352249 100644 --- a/cli/collection_update.go +++ b/cli/collection_update.go @@ -37,7 +37,7 @@ Example: update by docID: --docID bae-123 --updater '{ "verified": true }' Example: update private docID, with identity: - defradb client collection update -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User \ + defradb client collection update -i 028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f --name User \ --docID bae-123 --updater '{ "verified": true }' `, Args: cobra.RangeArgs(0, 1), diff --git a/cli/request.go b/cli/request.go index 3dba0c197d..796a5091c0 100644 --- a/cli/request.go +++ b/cli/request.go @@ -38,7 +38,7 @@ Do a query request from a file by using the '-f' flag. Example command: defradb client query -f request.graphql Do a query request from a file and with an identity. Example command: - defradb client query -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j -f request.graphql + defradb client query -i 028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f -f request.graphql Or it can be sent via stdin by using the '-' special syntax. Example command: cat request.graphql | defradb client query - diff --git a/cli/utils.go b/cli/utils.go index 8c394d79eb..c82bba0990 100644 --- a/cli/utils.go +++ b/cli/utils.go @@ -12,11 +12,13 @@ package cli import ( "context" + "encoding/hex" "encoding/json" "os" "path/filepath" "syscall" + "github.com/decred/dcrd/dcrec/secp256k1/v4" "github.com/spf13/cobra" "github.com/spf13/viper" "golang.org/x/term" @@ -139,12 +141,16 @@ func setContextTransaction(cmd *cobra.Command, txId uint64) error { } // setContextIdentity sets the identity for the current command context. -func setContextIdentity(cmd *cobra.Command, identity string) error { - // TODO-ACP: `https://github.com/sourcenetwork/defradb/issues/2358` do the validation here. - if identity == "" { +func setContextIdentity(cmd *cobra.Command, privateKeyHex string) error { + if privateKeyHex == "" { return nil } - ctx := db.SetContextIdentity(cmd.Context(), acpIdentity.New(identity)) + data, err := hex.DecodeString(privateKeyHex) + if err != nil { + return err + } + privKey := secp256k1.PrivKeyFromBytes(data) + ctx := db.SetContextIdentity(cmd.Context(), acpIdentity.FromPrivateKey(privKey)) cmd.SetContext(ctx) return nil } diff --git a/crypto/crypto.go b/crypto/crypto.go index 2e2bc333c1..acf374adb2 100644 --- a/crypto/crypto.go +++ b/crypto/crypto.go @@ -13,13 +13,18 @@ package crypto import ( "crypto/ed25519" "crypto/rand" + + "github.com/decred/dcrd/dcrec/secp256k1/v4" ) +// GenerateSecp256k1 generates a new secp256k1 private key. +func GenerateSecp256k1() (*secp256k1.PrivateKey, error) { + return secp256k1.GeneratePrivateKey() +} + // GenerateAES256 generates a new random AES-256 bit key. func GenerateAES256() ([]byte, error) { - data := make([]byte, 32) - _, err := rand.Read(data) - return data, err + return RandomBytes(32) } // GenerateEd25519 generates a new random Ed25519 private key. @@ -27,3 +32,10 @@ func GenerateEd25519() (ed25519.PrivateKey, error) { _, priv, err := ed25519.GenerateKey(rand.Reader) return priv, err } + +// RandomBytes returns a random slice of bytes of the given size. +func RandomBytes(size int) ([]byte, error) { + data := make([]byte, size) + _, err := rand.Read(data) + return data, err +} diff --git a/docs/website/references/cli/defradb_client.md b/docs/website/references/cli/defradb_client.md index 73c029153f..592f267e72 100644 --- a/docs/website/references/cli/defradb_client.md +++ b/docs/website/references/cli/defradb_client.md @@ -11,7 +11,7 @@ Execute queries, add schema types, obtain node info, etc. ``` -h, --help help for client - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --tx uint Transaction ID ``` diff --git a/docs/website/references/cli/defradb_client_acp.md b/docs/website/references/cli/defradb_client_acp.md index b8d92ee2bd..d80da76887 100644 --- a/docs/website/references/cli/defradb_client_acp.md +++ b/docs/website/references/cli/defradb_client_acp.md @@ -19,7 +19,7 @@ Learn more about [ACP](/acp/README.md) ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_acp_policy.md b/docs/website/references/cli/defradb_client_acp_policy.md index 3330a7e3dc..8374c92c15 100644 --- a/docs/website/references/cli/defradb_client_acp_policy.md +++ b/docs/website/references/cli/defradb_client_acp_policy.md @@ -15,7 +15,7 @@ Interact with the acp policy features of DefraDB instance ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_acp_policy_add.md b/docs/website/references/cli/defradb_client_acp_policy_add.md index 6b1e1865f6..ed2938e28e 100644 --- a/docs/website/references/cli/defradb_client_acp_policy_add.md +++ b/docs/website/references/cli/defradb_client_acp_policy_add.md @@ -15,7 +15,8 @@ Notes: - Learn more about [ACP & DPI Rules](/acp/README.md) Example: add from an argument string: - defradb client acp policy add -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j ' + defradb client acp policy add -i 028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f \ +' description: A Valid DefraDB Policy Interface actor: @@ -39,10 +40,12 @@ resources: ' Example: add from file: - defradb client acp policy add -i cosmos17r39df0hdcrgnmmw4mvu7qgk5nu888c7uvv37y -f policy.yml + defradb client acp policy add -f policy.yml \ + -i 028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f Example: add from file, verbose flags: - defradb client acp policy add --identity cosmos1kpw734v54g0t0d8tcye8ee5jc3gld0tcr2q473 --file policy.yml + defradb client acp policy add --file policy.yml \ + --identity 028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f Example: add from stdin: cat policy.yml | defradb client acp policy add - @@ -63,7 +66,7 @@ defradb client acp policy add [-i --identity] [policy] [flags] ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_backup.md b/docs/website/references/cli/defradb_client_backup.md index a75f1edd43..11bdb78c23 100644 --- a/docs/website/references/cli/defradb_client_backup.md +++ b/docs/website/references/cli/defradb_client_backup.md @@ -16,7 +16,7 @@ Currently only supports JSON format. ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_backup_export.md b/docs/website/references/cli/defradb_client_backup_export.md index 5a4aa314cf..07a3e1bd85 100644 --- a/docs/website/references/cli/defradb_client_backup_export.md +++ b/docs/website/references/cli/defradb_client_backup_export.md @@ -30,7 +30,7 @@ defradb client backup export [-c --collections | -p --pretty | -f --format] [flags] ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_collection.md b/docs/website/references/cli/defradb_client_collection.md index 24242d0966..bd732d0b07 100644 --- a/docs/website/references/cli/defradb_client_collection.md +++ b/docs/website/references/cli/defradb_client_collection.md @@ -11,7 +11,7 @@ Create, read, update, and delete documents within a collection. ``` --get-inactive Get inactive collections as well as active -h, --help help for collection - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --name string Collection name --schema string Collection schema Root --tx uint Transaction ID diff --git a/docs/website/references/cli/defradb_client_collection_create.md b/docs/website/references/cli/defradb_client_collection_create.md index c44c99e6c0..425be82753 100644 --- a/docs/website/references/cli/defradb_client_collection_create.md +++ b/docs/website/references/cli/defradb_client_collection_create.md @@ -10,7 +10,8 @@ Example: create from string: defradb client collection create --name User '{ "name": "Bob" }' Example: create from string, with identity: - defradb client collection create -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User '{ "name": "Bob" }' + defradb client collection create --name User '{ "name": "Bob" }' \ + -i 028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f Example: create multiple from string: defradb client collection create --name User '[{ "name": "Alice" }, { "name": "Bob" }]' @@ -37,7 +38,7 @@ defradb client collection create [-i --identity] [flags] ``` --get-inactive Get inactive collections as well as active - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_collection_delete.md b/docs/website/references/cli/defradb_client_collection_delete.md index 93fd056f76..af36d60e15 100644 --- a/docs/website/references/cli/defradb_client_collection_delete.md +++ b/docs/website/references/cli/defradb_client_collection_delete.md @@ -10,7 +10,8 @@ Example: delete by docID: defradb client collection delete --name User --docID bae-123 Example: delete by docID with identity: - defradb client collection delete -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User --docID bae-123 + defradb client collection delete --name User --docID bae-123 \ + -i 028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f Example: delete by filter: defradb client collection delete --name User --filter '{ "_gte": { "points": 100 } }' @@ -32,7 +33,7 @@ defradb client collection delete [-i --identity] [--filter --docID [flags] ``` --get-inactive Get inactive collections as well as active - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_collection_patch.md b/docs/website/references/cli/defradb_client_collection_patch.md index 13596cf36b..d27306cc43 100644 --- a/docs/website/references/cli/defradb_client_collection_patch.md +++ b/docs/website/references/cli/defradb_client_collection_patch.md @@ -34,7 +34,7 @@ defradb client collection patch [patch] [flags] ``` --get-inactive Get inactive collections as well as active - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_collection_update.md b/docs/website/references/cli/defradb_client_collection_update.md index b23575bbe7..37ef8249a6 100644 --- a/docs/website/references/cli/defradb_client_collection_update.md +++ b/docs/website/references/cli/defradb_client_collection_update.md @@ -18,7 +18,7 @@ Example: update by docID: --docID bae-123 --updater '{ "verified": true }' Example: update private docID, with identity: - defradb client collection update -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User \ + defradb client collection update -i 028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f --name User \ --docID bae-123 --updater '{ "verified": true }' @@ -39,7 +39,7 @@ defradb client collection update [-i --identity] [--filter --docID --fields [-n - ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_index_drop.md b/docs/website/references/cli/defradb_client_index_drop.md index 40a2bf4067..1ccf845e02 100644 --- a/docs/website/references/cli/defradb_client_index_drop.md +++ b/docs/website/references/cli/defradb_client_index_drop.md @@ -24,7 +24,7 @@ defradb client index drop -c --collection -n --name [flags] ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_index_list.md b/docs/website/references/cli/defradb_client_index_list.md index dbae25798a..ad66d3524b 100644 --- a/docs/website/references/cli/defradb_client_index_list.md +++ b/docs/website/references/cli/defradb_client_index_list.md @@ -26,7 +26,7 @@ defradb client index list [-c --collection ] [flags] ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_p2p.md b/docs/website/references/cli/defradb_client_p2p.md index 4801449245..8896ed0b29 100644 --- a/docs/website/references/cli/defradb_client_p2p.md +++ b/docs/website/references/cli/defradb_client_p2p.md @@ -15,7 +15,7 @@ Interact with the DefraDB P2P system ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_p2p_collection.md b/docs/website/references/cli/defradb_client_p2p_collection.md index 0882eb93bc..d0eea4ee83 100644 --- a/docs/website/references/cli/defradb_client_p2p_collection.md +++ b/docs/website/references/cli/defradb_client_p2p_collection.md @@ -16,7 +16,7 @@ The selected collections synchronize their events on the pubsub network. ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_p2p_collection_add.md b/docs/website/references/cli/defradb_client_p2p_collection_add.md index 72258df075..c9e4d60a79 100644 --- a/docs/website/references/cli/defradb_client_p2p_collection_add.md +++ b/docs/website/references/cli/defradb_client_p2p_collection_add.md @@ -27,7 +27,7 @@ defradb client p2p collection add [collectionIDs] [flags] ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_p2p_collection_getall.md b/docs/website/references/cli/defradb_client_p2p_collection_getall.md index 9e181e41d9..82f9cd9588 100644 --- a/docs/website/references/cli/defradb_client_p2p_collection_getall.md +++ b/docs/website/references/cli/defradb_client_p2p_collection_getall.md @@ -20,7 +20,7 @@ defradb client p2p collection getall [flags] ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_p2p_collection_remove.md b/docs/website/references/cli/defradb_client_p2p_collection_remove.md index c08ef717ed..6faffa5204 100644 --- a/docs/website/references/cli/defradb_client_p2p_collection_remove.md +++ b/docs/website/references/cli/defradb_client_p2p_collection_remove.md @@ -27,7 +27,7 @@ defradb client p2p collection remove [collectionIDs] [flags] ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_p2p_info.md b/docs/website/references/cli/defradb_client_p2p_info.md index 55c55e24ab..22e6e44288 100644 --- a/docs/website/references/cli/defradb_client_p2p_info.md +++ b/docs/website/references/cli/defradb_client_p2p_info.md @@ -19,7 +19,7 @@ defradb client p2p info [flags] ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_p2p_replicator.md b/docs/website/references/cli/defradb_client_p2p_replicator.md index cfa84ea2f3..2ab9a6391a 100644 --- a/docs/website/references/cli/defradb_client_p2p_replicator.md +++ b/docs/website/references/cli/defradb_client_p2p_replicator.md @@ -16,7 +16,7 @@ A replicator replicates one or all collection(s) from one node to another. ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_p2p_replicator_delete.md b/docs/website/references/cli/defradb_client_p2p_replicator_delete.md index 0ee748feeb..9977d9811c 100644 --- a/docs/website/references/cli/defradb_client_p2p_replicator_delete.md +++ b/docs/website/references/cli/defradb_client_p2p_replicator_delete.md @@ -25,7 +25,7 @@ defradb client p2p replicator delete [-c, --collection] [flags] ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_p2p_replicator_getall.md b/docs/website/references/cli/defradb_client_p2p_replicator_getall.md index 470f692bfc..0bca40baa0 100644 --- a/docs/website/references/cli/defradb_client_p2p_replicator_getall.md +++ b/docs/website/references/cli/defradb_client_p2p_replicator_getall.md @@ -24,7 +24,7 @@ defradb client p2p replicator getall [flags] ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_p2p_replicator_set.md b/docs/website/references/cli/defradb_client_p2p_replicator_set.md index 52917a7150..7f8623b96e 100644 --- a/docs/website/references/cli/defradb_client_p2p_replicator_set.md +++ b/docs/website/references/cli/defradb_client_p2p_replicator_set.md @@ -25,7 +25,7 @@ defradb client p2p replicator set [-c, --collection] [flags] ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_query.md b/docs/website/references/cli/defradb_client_query.md index 2dcea07526..ec868456b1 100644 --- a/docs/website/references/cli/defradb_client_query.md +++ b/docs/website/references/cli/defradb_client_query.md @@ -13,7 +13,7 @@ Do a query request from a file by using the '-f' flag. Example command: defradb client query -f request.graphql Do a query request from a file and with an identity. Example command: - defradb client query -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j -f request.graphql + defradb client query -i 028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f -f request.graphql Or it can be sent via stdin by using the '-' special syntax. Example command: cat request.graphql | defradb client query - @@ -37,7 +37,7 @@ defradb client query [-i --identity] [request] [flags] ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_schema.md b/docs/website/references/cli/defradb_client_schema.md index c69bdaee8f..4ad3289b35 100644 --- a/docs/website/references/cli/defradb_client_schema.md +++ b/docs/website/references/cli/defradb_client_schema.md @@ -15,7 +15,7 @@ Make changes, updates, or look for existing schema types. ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_schema_add.md b/docs/website/references/cli/defradb_client_schema_add.md index 9e70bb1f17..c10496112e 100644 --- a/docs/website/references/cli/defradb_client_schema_add.md +++ b/docs/website/references/cli/defradb_client_schema_add.md @@ -36,7 +36,7 @@ defradb client schema add [schema] [flags] ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_schema_describe.md b/docs/website/references/cli/defradb_client_schema_describe.md index 06cb28479d..1a0abbc964 100644 --- a/docs/website/references/cli/defradb_client_schema_describe.md +++ b/docs/website/references/cli/defradb_client_schema_describe.md @@ -35,7 +35,7 @@ defradb client schema describe [flags] ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_schema_migration.md b/docs/website/references/cli/defradb_client_schema_migration.md index a7d492d535..3660095f0f 100644 --- a/docs/website/references/cli/defradb_client_schema_migration.md +++ b/docs/website/references/cli/defradb_client_schema_migration.md @@ -15,7 +15,7 @@ Make set or look for existing schema migrations on a DefraDB node. ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_schema_migration_down.md b/docs/website/references/cli/defradb_client_schema_migration_down.md index c457a61992..82f2168b04 100644 --- a/docs/website/references/cli/defradb_client_schema_migration_down.md +++ b/docs/website/references/cli/defradb_client_schema_migration_down.md @@ -32,7 +32,7 @@ defradb client schema migration down --collection [fl ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_schema_migration_reload.md b/docs/website/references/cli/defradb_client_schema_migration_reload.md index e4d1b77cdf..c74d4987b7 100644 --- a/docs/website/references/cli/defradb_client_schema_migration_reload.md +++ b/docs/website/references/cli/defradb_client_schema_migration_reload.md @@ -19,7 +19,7 @@ defradb client schema migration reload [flags] ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_schema_migration_set-registry.md b/docs/website/references/cli/defradb_client_schema_migration_set-registry.md index f920a37216..f554ac5f48 100644 --- a/docs/website/references/cli/defradb_client_schema_migration_set-registry.md +++ b/docs/website/references/cli/defradb_client_schema_migration_set-registry.md @@ -25,7 +25,7 @@ defradb client schema migration set-registry [collectionID] [cfg] [flags] ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_schema_migration_set.md b/docs/website/references/cli/defradb_client_schema_migration_set.md index 66062fe4e8..fde006cb05 100644 --- a/docs/website/references/cli/defradb_client_schema_migration_set.md +++ b/docs/website/references/cli/defradb_client_schema_migration_set.md @@ -32,7 +32,7 @@ defradb client schema migration set [src] [dst] [cfg] [flags] ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_schema_migration_up.md b/docs/website/references/cli/defradb_client_schema_migration_up.md index 5174bf5ce0..fb6352c2f6 100644 --- a/docs/website/references/cli/defradb_client_schema_migration_up.md +++ b/docs/website/references/cli/defradb_client_schema_migration_up.md @@ -32,7 +32,7 @@ defradb client schema migration up --collection [flag ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_schema_patch.md b/docs/website/references/cli/defradb_client_schema_patch.md index 6c884d0a0f..a47ffc633c 100644 --- a/docs/website/references/cli/defradb_client_schema_patch.md +++ b/docs/website/references/cli/defradb_client_schema_patch.md @@ -35,7 +35,7 @@ defradb client schema patch [schema] [migration] [flags] ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_schema_set-active.md b/docs/website/references/cli/defradb_client_schema_set-active.md index 5e9daf911b..efc4dee955 100644 --- a/docs/website/references/cli/defradb_client_schema_set-active.md +++ b/docs/website/references/cli/defradb_client_schema_set-active.md @@ -20,7 +20,7 @@ defradb client schema set-active [versionID] [flags] ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_tx.md b/docs/website/references/cli/defradb_client_tx.md index e9ffb95d3b..acc23bba4a 100644 --- a/docs/website/references/cli/defradb_client_tx.md +++ b/docs/website/references/cli/defradb_client_tx.md @@ -15,7 +15,7 @@ Create, commit, and discard DefraDB transactions ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_tx_commit.md b/docs/website/references/cli/defradb_client_tx_commit.md index d641d03691..7775cb5287 100644 --- a/docs/website/references/cli/defradb_client_tx_commit.md +++ b/docs/website/references/cli/defradb_client_tx_commit.md @@ -19,7 +19,7 @@ defradb client tx commit [id] [flags] ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_tx_create.md b/docs/website/references/cli/defradb_client_tx_create.md index 633c82b6e4..2c3c7e5278 100644 --- a/docs/website/references/cli/defradb_client_tx_create.md +++ b/docs/website/references/cli/defradb_client_tx_create.md @@ -21,7 +21,7 @@ defradb client tx create [flags] ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_tx_discard.md b/docs/website/references/cli/defradb_client_tx_discard.md index a61bbc5fa1..dfbee4733c 100644 --- a/docs/website/references/cli/defradb_client_tx_discard.md +++ b/docs/website/references/cli/defradb_client_tx_discard.md @@ -19,7 +19,7 @@ defradb client tx discard [id] [flags] ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_view.md b/docs/website/references/cli/defradb_client_view.md index 15dd2034cb..498b11a59e 100644 --- a/docs/website/references/cli/defradb_client_view.md +++ b/docs/website/references/cli/defradb_client_view.md @@ -15,7 +15,7 @@ Manage (add) views withing a running DefraDB instance ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/docs/website/references/cli/defradb_client_view_add.md b/docs/website/references/cli/defradb_client_view_add.md index ee5e94f6df..e50c16d7f6 100644 --- a/docs/website/references/cli/defradb_client_view_add.md +++ b/docs/website/references/cli/defradb_client_view_add.md @@ -25,7 +25,7 @@ defradb client view add [query] [sdl] [transform] [flags] ### Options inherited from parent commands ``` - -i, --identity string ACP Identity + -i, --identity string Hex formatted private key used to authenticate with ACP --keyring-backend string Keyring backend to use. Options are file or system (default "file") --keyring-namespace string Service name to use when using the system backend (default "defradb") --keyring-path string Path to store encrypted keys when using the file backend (default "keys") diff --git a/examples/schema/permissioned/users.graphql b/examples/schema/permissioned/users.graphql index 771e6da2c9..69923d2351 100644 --- a/examples/schema/permissioned/users.graphql +++ b/examples/schema/permissioned/users.graphql @@ -10,7 +10,7 @@ # The policy must be a valid DPI, learn more about the DefraDB Policy Interface [DPI](/acp/README.md) type Users @policy( - id: "24ab8cba6d6f0bcfe4d2712c7d95c09dd1b8076ea5a8896476413fd6c891c18c", + id: "50d354a91ab1b8fce8a0ae4693de7616fb1d82cfc540f25cfbe11eb0195a5765", resource: "users" ) { name: String diff --git a/go.mod b/go.mod index 7933128def..6b166e3369 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,9 @@ go 1.21.3 require ( github.com/bits-and-blooms/bitset v1.13.0 github.com/bxcodec/faker v2.0.1+incompatible + github.com/cosmos/cosmos-sdk v0.50.6 github.com/cosmos/gogoproto v1.4.12 + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 github.com/evanphx/json-patch/v5 v5.9.0 github.com/fxamacker/cbor/v2 v2.6.0 github.com/getkin/kin-openapi v0.124.0 @@ -51,7 +53,7 @@ require ( go.opentelemetry.io/otel/metric v1.26.0 go.opentelemetry.io/otel/sdk/metric v1.26.0 go.uber.org/zap v1.27.0 - golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 + golang.org/x/exp v0.0.0-20240404231335-c0f41cb1a7a0 golang.org/x/term v0.20.0 google.golang.org/grpc v1.64.0 google.golang.org/protobuf v1.34.1 @@ -59,15 +61,15 @@ require ( require ( buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.31.0-20230802163732-1c33ebd9ecfa.1 // indirect - cosmossdk.io/api v0.7.3 // indirect + cosmossdk.io/api v0.7.4 // indirect cosmossdk.io/collections v0.4.0 // indirect cosmossdk.io/core v0.11.0 // indirect cosmossdk.io/depinject v1.0.0-alpha.4 // indirect cosmossdk.io/errors v1.0.1 // indirect cosmossdk.io/log v1.3.1 // indirect cosmossdk.io/math v1.3.0 // indirect - cosmossdk.io/store v1.0.2 // indirect - cosmossdk.io/x/tx v0.13.1 // indirect + cosmossdk.io/store v1.1.0 // indirect + cosmossdk.io/x/tx v0.13.2 // indirect filippo.io/edwards25519 v1.0.0 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.1 // indirect @@ -86,7 +88,7 @@ require ( github.com/bytecodealliance/wasmtime-go/v15 v15.0.0 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cockroachdb/errors v1.11.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/pebble v1.1.0 // indirect @@ -98,12 +100,11 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cosmos/btcutil v1.0.5 // indirect github.com/cosmos/cosmos-db v1.0.2 // indirect - github.com/cosmos/cosmos-proto v1.0.0-beta.4 // indirect - github.com/cosmos/cosmos-sdk v0.50.5 // indirect + github.com/cosmos/cosmos-proto v1.0.0-beta.5 // indirect github.com/cosmos/go-bip39 v1.0.0 // indirect github.com/cosmos/gogogateway v1.2.0 // indirect github.com/cosmos/gorocksdb v1.2.0 // indirect - github.com/cosmos/iavl v1.0.1 // indirect + github.com/cosmos/iavl v1.1.2 // indirect github.com/cosmos/ics23/go v0.10.0 // indirect github.com/cosmos/ledger-cosmos-go v0.13.3 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect @@ -111,7 +112,6 @@ require ( github.com/danieljoos/wincred v1.2.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect github.com/dgraph-io/badger/v2 v2.2007.4 // indirect github.com/dgraph-io/ristretto v0.1.1 // indirect @@ -162,7 +162,7 @@ require ( github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-hclog v1.5.0 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect - github.com/hashicorp/go-metrics v0.5.2 // indirect + github.com/hashicorp/go-metrics v0.5.3 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-plugin v1.5.2 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect @@ -216,7 +216,7 @@ require ( github.com/libp2p/go-netroute v0.2.1 // indirect github.com/libp2p/go-reuseport v0.4.0 // indirect github.com/libp2p/go-yamux/v4 v4.0.1 // indirect - github.com/linxGnu/grocksdb v1.8.12 // indirect + github.com/linxGnu/grocksdb v1.8.14 // indirect github.com/lmittmann/tint v1.0.4 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -247,16 +247,16 @@ require ( github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/perimeterx/marshmallow v1.1.5 // indirect - github.com/petermattis/goid v0.0.0-20230904192822-1876fd5063bc // indirect + github.com/petermattis/goid v0.0.0-20231207134359-e60b3f734c67 // indirect github.com/piprate/json-gold v0.5.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/polydawn/refmt v0.89.0 // indirect github.com/pquerna/cachecontrol v0.1.0 // indirect - github.com/prometheus/client_golang v1.18.0 // indirect - github.com/prometheus/client_model v0.6.0 // indirect - github.com/prometheus/common v0.47.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/client_golang v1.19.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.52.2 // indirect + github.com/prometheus/procfs v0.13.0 // indirect github.com/quic-go/qpack v0.4.0 // indirect github.com/quic-go/quic-go v0.42.0 // indirect github.com/quic-go/webtransport-go v0.6.0 // indirect @@ -300,21 +300,21 @@ require ( go.uber.org/fx v1.20.1 // indirect go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.21.0 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/net v0.23.0 // indirect - golang.org/x/sync v0.6.0 // indirect + golang.org/x/crypto v0.23.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/sync v0.7.0 // indirect golang.org/x/sys v0.20.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.18.0 // indirect + golang.org/x/text v0.15.0 // indirect + golang.org/x/tools v0.20.0 // indirect gonum.org/v1/gonum v0.14.0 // indirect google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.5.1 // indirect - lukechampine.com/blake3 v1.2.1 // indirect + lukechampine.com/blake3 v1.3.0 // indirect nhooyr.io/websocket v1.8.7 // indirect pgregory.net/rapid v1.1.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect diff --git a/go.sum b/go.sum index b46681e784..508d0fa72f 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= -cosmossdk.io/api v0.7.3 h1:V815i8YOwOAQa1rLCsSMjVG5Gnzs02JLq+l7ks8s1jk= -cosmossdk.io/api v0.7.3/go.mod h1:IcxpYS5fMemZGqyYtErK7OqvdM0C8kdW3dq8Q/XIG38= +cosmossdk.io/api v0.7.4 h1:sPo8wKwCty1lht8kgL3J7YL1voJywP3YWuA5JKkBz30= +cosmossdk.io/api v0.7.4/go.mod h1:IcxpYS5fMemZGqyYtErK7OqvdM0C8kdW3dq8Q/XIG38= cosmossdk.io/collections v0.4.0 h1:PFmwj2W8szgpD5nOd8GWH6AbYNi1f2J6akWXJ7P5t9s= cosmossdk.io/collections v0.4.0/go.mod h1:oa5lUING2dP+gdDquow+QjlF45eL1t4TJDypgGd+tv0= cosmossdk.io/core v0.11.0 h1:vtIafqUi+1ZNAE/oxLOQQ7Oek2n4S48SWLG8h/+wdbo= @@ -18,10 +18,10 @@ cosmossdk.io/log v1.3.1 h1:UZx8nWIkfbbNEWusZqzAx3ZGvu54TZacWib3EzUYmGI= cosmossdk.io/log v1.3.1/go.mod h1:2/dIomt8mKdk6vl3OWJcPk2be3pGOS8OQaLUM/3/tCM= cosmossdk.io/math v1.3.0 h1:RC+jryuKeytIiictDslBP9i1fhkVm6ZDmZEoNP316zE= cosmossdk.io/math v1.3.0/go.mod h1:vnRTxewy+M7BtXBNFybkuhSH4WfedVAAnERHgVFhp3k= -cosmossdk.io/store v1.0.2 h1:lSg5BTvJBHUDwswNNyeh4K/CbqiHER73VU4nDNb8uk0= -cosmossdk.io/store v1.0.2/go.mod h1:EFtENTqVTuWwitGW1VwaBct+yDagk7oG/axBMPH+FXs= -cosmossdk.io/x/tx v0.13.1 h1:Mg+EMp67Pz+NukbJqYxuo8uRp7N/a9uR+oVS9pONtj8= -cosmossdk.io/x/tx v0.13.1/go.mod h1:CBCU6fsRVz23QGFIQBb1DNX2DztJCf3jWyEkHY2nJQ0= +cosmossdk.io/store v1.1.0 h1:LnKwgYMc9BInn9PhpTFEQVbL9UK475G2H911CGGnWHk= +cosmossdk.io/store v1.1.0/go.mod h1:oZfW/4Fc/zYqu3JmQcQdUJ3fqu5vnYTn3LZFFy8P8ng= +cosmossdk.io/x/tx v0.13.2 h1:Kh90UH30bhnnUdJH+CmWLyaH8IKdY6BBGY3EkdOk82o= +cosmossdk.io/x/tx v0.13.2/go.mod h1:yhPokDCfXVIuAtyp49IFlWB5YAXUgD7Zek+ZHwsHzvU= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= @@ -135,8 +135,8 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= @@ -192,10 +192,10 @@ github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= github.com/cosmos/cosmos-db v1.0.2 h1:hwMjozuY1OlJs/uh6vddqnk9j7VamLv+0DBlbEXbAKs= github.com/cosmos/cosmos-db v1.0.2/go.mod h1:Z8IXcFJ9PqKK6BIsVOB3QXtkKoqUOp1vRvPT39kOXEA= -github.com/cosmos/cosmos-proto v1.0.0-beta.4 h1:aEL7tU/rLOmxZQ9z4i7mzxcLbSCY48OdY7lIWTLG7oU= -github.com/cosmos/cosmos-proto v1.0.0-beta.4/go.mod h1:oeB+FyVzG3XrQJbJng0EnV8Vljfk9XvTIpGILNU/9Co= -github.com/cosmos/cosmos-sdk v0.50.5 h1:MOEi+DKYgW67YaPgB+Pf+nHbD3V9S/ayitRKJYLfGIA= -github.com/cosmos/cosmos-sdk v0.50.5/go.mod h1:oV/k6GJgXV9QPoM2fsYDPPsyPBgQbdotv532O6Mz1OQ= +github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= +github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= +github.com/cosmos/cosmos-sdk v0.50.6 h1:efR3MsvMHX5sxS3be+hOobGk87IzlZbSpsI2x/Vw3hk= +github.com/cosmos/cosmos-sdk v0.50.6/go.mod h1:lVkRY6cdMJ0fG3gp8y4hFrsKZqF4z7y0M2UXFb9Yt40= github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= @@ -205,8 +205,8 @@ github.com/cosmos/gogoproto v1.4.12 h1:vB6Lbe/rtnYGjQuFxkPiPYiCybqFT8QvLipDZP8Jp github.com/cosmos/gogoproto v1.4.12/go.mod h1:LnZob1bXRdUoqMMtwYlcR3wjiElmlC+FkjaZRv1/eLY= github.com/cosmos/gorocksdb v1.2.0 h1:d0l3jJG8M4hBouIZq0mDUHZ+zjOx044J3nGRskwTb4Y= github.com/cosmos/gorocksdb v1.2.0/go.mod h1:aaKvKItm514hKfNJpUJXnnOWeBnk2GL4+Qw9NHizILw= -github.com/cosmos/iavl v1.0.1 h1:D+mYbcRO2wptYzOM1Hxl9cpmmHU1ZEt9T2Wv5nZTeUw= -github.com/cosmos/iavl v1.0.1/go.mod h1:8xIUkgVvwvVrBu81scdPty+/Dx9GqwHnAvXz4cwF7RY= +github.com/cosmos/iavl v1.1.2 h1:zL9FK7C4L/P4IF1Dm5fIwz0WXCnn7Bp1M2FxH0ayM7Y= +github.com/cosmos/iavl v1.1.2/go.mod h1:jLeUvm6bGT1YutCaL2fIar/8vGUE8cPZvh/gXEWDaDM= github.com/cosmos/ics23/go v0.10.0 h1:iXqLLgp2Lp+EdpIuwXTYIQU+AiHj9mOC2X9ab++bZDM= github.com/cosmos/ics23/go v0.10.0/go.mod h1:ZfJSmng/TBNTBkFemHHHj5YY7VAU/MBU980F4VU1NG0= github.com/cosmos/ledger-cosmos-go v0.13.3 h1:7ehuBGuyIytsXbd4MP43mLeoN2LTOEnk5nvue4rK+yM= @@ -233,8 +233,8 @@ github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= @@ -517,8 +517,8 @@ github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVH github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-metrics v0.5.2 h1:ErEYO2f//CjKsUDw4SmLzelsK6L3ZmOAR/4P9iS7ruY= -github.com/hashicorp/go-metrics v0.5.2/go.mod h1:KEjodfebIOuBYSAe/bHTm+HChmKSxAOXPBieMLYozDE= +github.com/hashicorp/go-metrics v0.5.3 h1:M5uADWMOGCTUNU1YuC4hfknOeHNaX54LDm4oYSucoNE= +github.com/hashicorp/go-metrics v0.5.3/go.mod h1:KEjodfebIOuBYSAe/bHTm+HChmKSxAOXPBieMLYozDE= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= @@ -752,8 +752,8 @@ github.com/libp2p/zeroconf/v2 v2.2.0 h1:Cup06Jv6u81HLhIj1KasuNM/RHHrJ8T7wOTS4+Tv github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linxGnu/grocksdb v1.8.12 h1:1/pCztQUOa3BX/1gR3jSZDoaKFpeHFvQ1XrqZpSvZVo= -github.com/linxGnu/grocksdb v1.8.12/go.mod h1:xZCIb5Muw+nhbDK4Y5UJuOrin5MceOuiXkVUR7vp4WY= +github.com/linxGnu/grocksdb v1.8.14 h1:HTgyYalNwBSG/1qCQUIott44wU5b2Y9Kr3z7SK5OfGQ= +github.com/linxGnu/grocksdb v1.8.14/go.mod h1:QYiYypR2d4v63Wj1adOOfzglnoII0gLj3PNh4fZkcFA= github.com/lmittmann/tint v1.0.4 h1:LeYihpJ9hyGvE0w+K2okPTGUdVLfng1+nDNVR4vWISc= github.com/lmittmann/tint v1.0.4/go.mod h1:HIS3gSy7qNwGCj+5oRjAutErFBl4BzdQP6cJZ0NfMwE= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= @@ -930,8 +930,8 @@ github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9 github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= -github.com/petermattis/goid v0.0.0-20230904192822-1876fd5063bc h1:8bQZVK1X6BJR/6nYUPxQEP+ReTsceJTKizeuwjWOPUA= -github.com/petermattis/goid v0.0.0-20230904192822-1876fd5063bc/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/petermattis/goid v0.0.0-20231207134359-e60b3f734c67 h1:jik8PHtAIsPlCRJjJzl4udgEf7hawInF9texMeO2jrU= +github.com/petermattis/goid v0.0.0-20231207134359-e60b3f734c67/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= @@ -959,16 +959,16 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= -github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= +github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= -github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -976,8 +976,8 @@ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt2 github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.47.0 h1:p5Cz0FNHo7SnWOmWmoRozVcjEp0bIVU8cV7OShpjL1k= -github.com/prometheus/common v0.47.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= +github.com/prometheus/common v0.52.2 h1:LW8Vk7BccEdONfrJBDffQGRtpSzi5CQaRZGtboOO2ck= +github.com/prometheus/common v0.52.2/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -985,8 +985,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o= +github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= github.com/quic-go/quic-go v0.42.0 h1:uSfdap0eveIl8KXnipv9K7nlwZ5IqLlYOpJ58u5utpM= @@ -1268,13 +1268,13 @@ golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= -golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= +golang.org/x/exp v0.0.0-20240404231335-c0f41cb1a7a0 h1:985EYyeCOxTpcgOTJpflJUwOeEz0CQOdPt73OzpE9F8= +golang.org/x/exp v0.0.0-20240404231335-c0f41cb1a7a0/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1293,8 +1293,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1331,8 +1331,8 @@ golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1349,8 +1349,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1434,8 +1434,9 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1466,8 +1467,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= -golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= +golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1501,8 +1502,8 @@ google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUE google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4= google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -1583,8 +1584,8 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= -lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= +lukechampine.com/blake3 v1.3.0 h1:sJ3XhFINmHSrYCgl958hscfIa3bw8x4DqMP3u1YvoYE= +lukechampine.com/blake3 v1.3.0/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= diff --git a/http/auth.go b/http/auth.go new file mode 100644 index 0000000000..994e9a4220 --- /dev/null +++ b/http/auth.go @@ -0,0 +1,115 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package http + +import ( + "encoding/hex" + "net/http" + "strings" + "time" + + "github.com/decred/dcrd/dcrec/secp256k1/v4" + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/jwx/v2/jws" + "github.com/lestrrat-go/jwx/v2/jwt" + "github.com/sourcenetwork/immutable" + + acpIdentity "github.com/sourcenetwork/defradb/acp/identity" + "github.com/sourcenetwork/defradb/internal/db" +) + +const ( + // authHeaderName is the name of the authorization header. + // This header should contain an ACP identity. + authHeaderName = "Authorization" + // authSchemaPrefix is the prefix added to the + // authorization header value. + authSchemaPrefix = "Bearer " + // authTokenExpiration is the default expiration time for auth tokens. + authTokenExpiration = time.Minute * 15 +) + +var authTokenSignatureScheme = jwa.ES256K + +// buildAuthToken returns a new jwt auth token with the subject and audience set +// to the given values. Default expiration and not before values will also be set. +func buildAuthToken(identity acpIdentity.Identity, audience string) (jwt.Token, error) { + if identity.PublicKey == nil { + return nil, ErrMissingIdentityPublicKey + } + subject := hex.EncodeToString(identity.PublicKey.SerializeCompressed()) + now := time.Now() + return jwt.NewBuilder(). + Subject(subject). + Audience([]string{audience}). + Expiration(now.Add(authTokenExpiration)). + NotBefore(now). + Build() +} + +// signAuthToken returns a signed jwt auth token that can be used to authenticate the +// actor identified by the given identity with a defraDB node identified by the given audience. +func signAuthToken(identity acpIdentity.Identity, token jwt.Token) ([]byte, error) { + if identity.PrivateKey == nil { + return nil, ErrMissingIdentityPrivateKey + } + return jwt.Sign(token, jwt.WithKey(authTokenSignatureScheme, identity.PrivateKey.ToECDSA())) +} + +// buildAndSignAuthToken returns a signed jwt auth token that can be used to authenticate the +// actor identified by the given identity with a defraDB node identified by the given audience. +func buildAndSignAuthToken(identity acpIdentity.Identity, audience string) ([]byte, error) { + token, err := buildAuthToken(identity, audience) + if err != nil { + return nil, err + } + return signAuthToken(identity, token) +} + +// verifyAuthToken verifies that the jwt auth token is valid and that the signature +// matches the identity of the subject. +func verifyAuthToken(data []byte, audience string) (immutable.Option[acpIdentity.Identity], error) { + token, err := jwt.Parse(data, jwt.WithVerify(false), jwt.WithAudience(audience)) + if err != nil { + return immutable.None[acpIdentity.Identity](), err + } + subject, err := hex.DecodeString(token.Subject()) + if err != nil { + return immutable.None[acpIdentity.Identity](), err + } + pubKey, err := secp256k1.ParsePubKey(subject) + if err != nil { + return immutable.None[acpIdentity.Identity](), err + } + _, err = jws.Verify(data, jws.WithKey(authTokenSignatureScheme, pubKey.ToECDSA())) + if err != nil { + return immutable.None[acpIdentity.Identity](), err + } + return acpIdentity.FromPublicKey(pubKey), nil +} + +// AuthMiddleware authenticates an actor and sets their identity for all subsequent actions. +func AuthMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + token := strings.TrimPrefix(req.Header.Get(authHeaderName), authSchemaPrefix) + if token == "" { + next.ServeHTTP(rw, req) + return + } + identity, err := verifyAuthToken([]byte(token), strings.ToLower(req.Host)) + if err != nil { + http.Error(rw, "forbidden", http.StatusForbidden) + return + } + ctx := db.SetContextIdentity(req.Context(), identity) + next.ServeHTTP(rw, req.WithContext(ctx)) + }) +} diff --git a/http/auth_test.go b/http/auth_test.go new file mode 100644 index 0000000000..0ea9705efd --- /dev/null +++ b/http/auth_test.go @@ -0,0 +1,140 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package http + +import ( + "encoding/hex" + "testing" + "time" + + "github.com/lestrrat-go/jwx/v2/jwt" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + acpIdentity "github.com/sourcenetwork/defradb/acp/identity" + "github.com/sourcenetwork/defradb/crypto" +) + +func TestBuildAuthToken(t *testing.T) { + privKey, err := crypto.GenerateSecp256k1() + require.NoError(t, err) + + identity := acpIdentity.FromPrivateKey(privKey) + token, err := buildAuthToken(identity.Value(), "abc123") + require.NoError(t, err) + + subject := hex.EncodeToString(privKey.PubKey().SerializeCompressed()) + assert.Equal(t, subject, token.Subject()) + + assert.True(t, token.NotBefore().Before(time.Now())) + assert.True(t, token.Expiration().After(time.Now())) + assert.Equal(t, []string{"abc123"}, token.Audience()) +} + +func TestSignAuthTokenErrorsWithPublicIdentity(t *testing.T) { + privKey, err := crypto.GenerateSecp256k1() + require.NoError(t, err) + + identity := acpIdentity.FromPublicKey(privKey.PubKey()) + token, err := buildAuthToken(identity.Value(), "abc123") + require.NoError(t, err) + + _, err = signAuthToken(identity.Value(), token) + assert.ErrorIs(t, err, ErrMissingIdentityPrivateKey) +} + +func TestVerifyAuthToken(t *testing.T) { + privKey, err := crypto.GenerateSecp256k1() + require.NoError(t, err) + + identity := acpIdentity.FromPrivateKey(privKey) + token, err := buildAndSignAuthToken(identity.Value(), "abc123") + require.NoError(t, err) + + actual, err := verifyAuthToken(token, "abc123") + require.NoError(t, err) + + expected := acpIdentity.FromPublicKey(privKey.PubKey()) + assert.Equal(t, expected.Value().Address, actual.Value().Address) +} + +func TestVerifyAuthTokenErrorsWithNonMatchingAudience(t *testing.T) { + privKey, err := crypto.GenerateSecp256k1() + require.NoError(t, err) + + identity := acpIdentity.FromPrivateKey(privKey) + token, err := buildAndSignAuthToken(identity.Value(), "valid") + require.NoError(t, err) + + _, err = verifyAuthToken(token, "invalid") + assert.Error(t, err) +} + +func TestVerifyAuthTokenErrorsWithWrongPublicKey(t *testing.T) { + privKey, err := crypto.GenerateSecp256k1() + require.NoError(t, err) + + otherKey, err := crypto.GenerateSecp256k1() + require.NoError(t, err) + + identity := acpIdentity.FromPrivateKey(privKey) + token, err := buildAuthToken(identity.Value(), "123abc") + require.NoError(t, err) + + // override subject + subject := hex.EncodeToString(otherKey.PubKey().SerializeCompressed()) + err = token.Set(jwt.SubjectKey, subject) + require.NoError(t, err) + + data, err := signAuthToken(identity.Value(), token) + require.NoError(t, err) + + _, err = verifyAuthToken(data, "123abc") + assert.Error(t, err) +} + +func TestVerifyAuthTokenErrorsWithExpired(t *testing.T) { + privKey, err := crypto.GenerateSecp256k1() + require.NoError(t, err) + + identity := acpIdentity.FromPrivateKey(privKey) + token, err := buildAuthToken(identity.Value(), "123abc") + require.NoError(t, err) + + // override expiration + err = token.Set(jwt.ExpirationKey, time.Now().Add(-15*time.Minute)) + require.NoError(t, err) + + data, err := signAuthToken(identity.Value(), token) + require.NoError(t, err) + + _, err = verifyAuthToken(data, "123abc") + assert.Error(t, err) +} + +func TestVerifyAuthTokenErrorsWithNotBefore(t *testing.T) { + privKey, err := crypto.GenerateSecp256k1() + require.NoError(t, err) + + identity := acpIdentity.FromPrivateKey(privKey) + token, err := buildAuthToken(identity.Value(), "123abc") + require.NoError(t, err) + + // override not before + err = token.Set(jwt.NotBeforeKey, time.Now().Add(15*time.Minute)) + require.NoError(t, err) + + data, err := signAuthToken(identity.Value(), token) + require.NoError(t, err) + + _, err = verifyAuthToken(data, "123abc") + assert.Error(t, err) +} diff --git a/http/client.go b/http/client.go index 7616653f71..49982bad2a 100644 --- a/http/client.go +++ b/http/client.go @@ -354,7 +354,11 @@ func (c *Client) ExecRequest( result.GQL.Errors = []error{err} return result } - c.http.setDefaultHeaders(req) + err = c.http.setDefaultHeaders(req) + if err != nil { + result.GQL.Errors = []error{err} + return result + } res, err := c.http.client.Do(req) if err != nil { diff --git a/http/client_collection.go b/http/client_collection.go index 68b76c6a9e..59b2cf79b6 100644 --- a/http/client_collection.go +++ b/http/client_collection.go @@ -311,7 +311,10 @@ func (c *Collection) GetAllDocIDs( return nil, err } - c.http.setDefaultHeaders(req) + err = c.http.setDefaultHeaders(req) + if err != nil { + return nil, err + } res, err := c.http.client.Do(req) if err != nil { diff --git a/http/errors.go b/http/errors.go index ef25d06421..d4a72df516 100644 --- a/http/errors.go +++ b/http/errors.go @@ -27,15 +27,17 @@ const ( // This list is incomplete. Undefined errors may also be returned. // Errors returned from this package may be tested against these errors with errors.Is. var ( - ErrNoListener = errors.New("cannot serve with no listener") - ErrNoEmail = errors.New("email address must be specified for tls with autocert") - ErrInvalidRequestBody = errors.New("invalid request body") - ErrStreamingNotSupported = errors.New("streaming not supported") - ErrMigrationNotFound = errors.New("migration not found") - ErrMissingRequest = errors.New("missing request") - ErrInvalidTransactionId = errors.New("invalid transaction id") - ErrP2PDisabled = errors.New("p2p network is disabled") - ErrMethodIsNotImplemented = errors.New(errMethodIsNotImplemented) + ErrNoListener = errors.New("cannot serve with no listener") + ErrNoEmail = errors.New("email address must be specified for tls with autocert") + ErrInvalidRequestBody = errors.New("invalid request body") + ErrStreamingNotSupported = errors.New("streaming not supported") + ErrMigrationNotFound = errors.New("migration not found") + ErrMissingRequest = errors.New("missing request") + ErrInvalidTransactionId = errors.New("invalid transaction id") + ErrP2PDisabled = errors.New("p2p network is disabled") + ErrMethodIsNotImplemented = errors.New(errMethodIsNotImplemented) + ErrMissingIdentityPrivateKey = errors.New("identity has no private key") + ErrMissingIdentityPublicKey = errors.New("identity has no public key") ) type errorResponse struct { diff --git a/http/handler.go b/http/handler.go index 80afcc5a3e..3ec33d9b2a 100644 --- a/http/handler.go +++ b/http/handler.go @@ -75,13 +75,12 @@ func NewHandler(db client.DB) (*Handler, error) { return nil, err } txs := &sync.Map{} - mux := chi.NewMux() mux.Route("/api/"+Version, func(r chi.Router) { r.Use( ApiMiddleware(db, txs), TransactionMiddleware, - IdentityMiddleware, + AuthMiddleware, ) r.Handle("/*", router) }) @@ -89,7 +88,6 @@ func NewHandler(db client.DB) (*Handler, error) { responseJSON(rw, http.StatusOK, router.OpenAPI()) }) mux.Handle("/*", playgroundHandler) - return &Handler{ db: db, mux: mux, diff --git a/http/http_client.go b/http/http_client.go index 972e96e94d..b3dfcb9e3f 100644 --- a/http/http_client.go +++ b/http/http_client.go @@ -12,6 +12,7 @@ package http import ( "encoding/json" + "errors" "fmt" "io" "net/http" @@ -34,14 +35,13 @@ func newHttpClient(rawURL string) (*httpClient, error) { if err != nil { return nil, err } - client := httpClient{ + return &httpClient{ client: http.DefaultClient, baseURL: baseURL.JoinPath("/api/v0"), - } - return &client, nil + }, nil } -func (c *httpClient) setDefaultHeaders(req *http.Request) { +func (c *httpClient) setDefaultHeaders(req *http.Request) error { req.Header.Set("Accept", "application/json") req.Header.Set("Content-Type", "application/json") @@ -50,14 +50,25 @@ func (c *httpClient) setDefaultHeaders(req *http.Request) { req.Header.Set(txHeaderName, fmt.Sprintf("%d", txn.ID())) } id := db.GetContextIdentity(req.Context()) - if id.HasValue() { - req.Header.Add(authHeaderName, authSchemaPrefix+id.Value().String()) + if !id.HasValue() { + return nil + } + token, err := buildAndSignAuthToken(id.Value(), strings.ToLower(c.baseURL.Host)) + if errors.Is(err, ErrMissingIdentityPrivateKey) { + return nil + } + if err != nil { + return err } + req.Header.Set(authHeaderName, fmt.Sprintf("%s%s", authSchemaPrefix, token)) + return nil } func (c *httpClient) request(req *http.Request) ([]byte, error) { - c.setDefaultHeaders(req) - + err := c.setDefaultHeaders(req) + if err != nil { + return nil, err + } res, err := c.client.Do(req) if err != nil { return nil, err diff --git a/http/middleware.go b/http/middleware.go index d37a7d3962..d02c3d6470 100644 --- a/http/middleware.go +++ b/http/middleware.go @@ -21,7 +21,6 @@ import ( "github.com/go-chi/cors" "golang.org/x/exp/slices" - acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/internal/db" @@ -31,12 +30,6 @@ const ( // txHeaderName is the name of the transaction header. // This header should contain a valid transaction id. txHeaderName = "x-defradb-tx" - // authHeaderName is the name of the authorization header. - // This header should contain an ACP identity. - authHeaderName = "Authorization" - // Using Basic right now, but this will soon change to 'Bearer' as acp authentication - // gets implemented: https://github.com/sourcenetwork/defradb/issues/2017 - authSchemaPrefix = "Basic " ) type contextKey string @@ -123,23 +116,3 @@ func CollectionMiddleware(next http.Handler) http.Handler { next.ServeHTTP(rw, req.WithContext(ctx)) }) } - -func IdentityMiddleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - authHeader := req.Header.Get(authHeaderName) - if authHeader == "" { - next.ServeHTTP(rw, req) - return - } - - identity := strings.TrimPrefix(authHeader, authSchemaPrefix) - // If expected schema prefix was not found, or empty, then assume no identity. - if identity == authHeader || identity == "" { - next.ServeHTTP(rw, req) - return - } - - ctx := db.SetContextIdentity(req.Context(), acpIdentity.New(identity)) - next.ServeHTTP(rw, req.WithContext(ctx)) - }) -} diff --git a/http/openapi.go b/http/openapi.go index 698a88796e..f6816376ae 100644 --- a/http/openapi.go +++ b/http/openapi.go @@ -87,6 +87,13 @@ func NewOpenAPISpec() (*openapi3.T, error) { Value: txnHeaderParam, } + // add authentication schemes + securitySchemes := openapi3.SecuritySchemes{ + "bearerToken": &openapi3.SecuritySchemeRef{ + Value: openapi3.NewJWTSecurityScheme(), + }, + } + return &openapi3.T{ OpenAPI: "3.0.3", Info: &openapi3.Info{ @@ -105,9 +112,10 @@ func NewOpenAPISpec() (*openapi3.T, error) { URL: "https://docs.source.network", }, Components: &openapi3.Components{ - Schemas: schemas, - Responses: responses, - Parameters: parameters, + Schemas: schemas, + Responses: responses, + Parameters: parameters, + SecuritySchemes: securitySchemes, }, Tags: openapi3.Tags{ &openapi3.Tag{ diff --git a/http/utils.go b/http/utils.go index a67afef476..81aeac1b05 100644 --- a/http/utils.go +++ b/http/utils.go @@ -28,10 +28,16 @@ func requestJSON(req *http.Request, out any) error { return json.Unmarshal(data, out) } -func responseJSON(rw http.ResponseWriter, status int, out any) { +// responseJSON writes a json response with the given status and data +// to the response writer. Any errors encountered will be logged. +func responseJSON(rw http.ResponseWriter, status int, data any) { rw.Header().Add("Content-Type", "application/json") rw.WriteHeader(status) - json.NewEncoder(rw).Encode(out) //nolint:errcheck + + err := json.NewEncoder(rw).Encode(data) + if err != nil { + log.ErrorE("failed to write response", err) + } } func parseError(msg any) error { diff --git a/internal/db/db.go b/internal/db/db.go index af40627e2b..4379928c82 100644 --- a/internal/db/db.go +++ b/internal/db/db.go @@ -168,12 +168,12 @@ func (db *db) AddPolicy( return client.AddPolicyResult{}, client.ErrPolicyAddFailureNoACP } identity := GetContextIdentity(ctx) + policyID, err := db.acp.Value().AddPolicy( ctx, - identity.Value().String(), + identity.Value().Address, policy, ) - if err != nil { return client.AddPolicyResult{}, err } diff --git a/internal/db/fetcher/mocks/fetcher.go b/internal/db/fetcher/mocks/fetcher.go index 99fee65c47..b60385d110 100644 --- a/internal/db/fetcher/mocks/fetcher.go +++ b/internal/db/fetcher/mocks/fetcher.go @@ -3,8 +3,8 @@ package mocks import ( - client "github.com/sourcenetwork/defradb/client" acp "github.com/sourcenetwork/defradb/acp" + client "github.com/sourcenetwork/defradb/client" context "context" diff --git a/internal/db/permission/check.go b/internal/db/permission/check.go index 36dce10489..4d5e9f5ed0 100644 --- a/internal/db/permission/check.go +++ b/internal/db/permission/check.go @@ -78,7 +78,7 @@ func CheckAccessOfDocOnCollectionWithACP( hasAccess, err := acpSystem.CheckDocAccess( ctx, permission, - identity.Value().String(), + identity.Value().Address, policyID, resourceName, docID, diff --git a/internal/db/permission/register.go b/internal/db/permission/register.go index a46e5eef34..bbe9a2e713 100644 --- a/internal/db/permission/register.go +++ b/internal/db/permission/register.go @@ -40,7 +40,7 @@ func RegisterDocOnCollectionWithACP( if policyID, resourceName, hasPolicy := isPermissioned(collection); hasPolicy && identity.HasValue() { return acpSystem.RegisterDocObject( ctx, - identity.Value().String(), + identity.Value().Address, policyID, resourceName, docID, diff --git a/net/peer_test.go b/net/peer_test.go index fdd1feb583..e708ff0708 100644 --- a/net/peer_test.go +++ b/net/peer_test.go @@ -12,10 +12,12 @@ package net import ( "context" + "encoding/hex" "fmt" "testing" "time" + "github.com/decred/dcrd/dcrec/secp256k1/v4" "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" libp2p "github.com/libp2p/go-libp2p" @@ -371,11 +373,17 @@ func TestSetReplicatorWithACollectionSpecifiedThatHasPolicy_ReturnError(t *testi types: - actor ` - ctx = db.SetContextIdentity(ctx, acpIdentity.New("cosmos1zzg43wdrhmmk89z3pmejwete2kkd4a3vn7w969")) + + privKeyBytes, err := hex.DecodeString("028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f") + require.NoError(t, err) + privKey := secp256k1.PrivKeyFromBytes(privKeyBytes) + identity := acpIdentity.FromPrivateKey(privKey) + + ctx = db.SetContextIdentity(ctx, identity) policyResult, err := d.AddPolicy(ctx, policy) policyID := policyResult.PolicyID require.NoError(t, err) - require.Equal(t, "fc3a0a39c73949c70a79e02b8d928028e9cbcc772ba801463a6acdcf2f256cd4", policyID) + require.Equal(t, "7bef56a54eae563eafdc48c57cf37075351498ebb5a200f59cf9b8c6f8149606", policyID) schema := fmt.Sprintf(` type User @policy(id: "%s", resource: "user") { @@ -422,11 +430,17 @@ func TestSetReplicatorWithSomeCollectionThatHasPolicyUsingAllCollectionsByDefaul types: - actor ` - ctx = db.SetContextIdentity(ctx, acpIdentity.New("cosmos1zzg43wdrhmmk89z3pmejwete2kkd4a3vn7w969")) + + privKeyBytes, err := hex.DecodeString("028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f") + require.NoError(t, err) + privKey := secp256k1.PrivKeyFromBytes(privKeyBytes) + identity := acpIdentity.FromPrivateKey(privKey) + + ctx = db.SetContextIdentity(ctx, identity) policyResult, err := d.AddPolicy(ctx, policy) policyID := policyResult.PolicyID require.NoError(t, err) - require.Equal(t, "fc3a0a39c73949c70a79e02b8d928028e9cbcc772ba801463a6acdcf2f256cd4", policyID) + require.Equal(t, "7bef56a54eae563eafdc48c57cf37075351498ebb5a200f59cf9b8c6f8149606", policyID) schema := fmt.Sprintf(` type User @policy(id: "%s", resource: "user") { @@ -781,11 +795,17 @@ func TestAddP2PCollectionsWithPermissionedCollection_Error(t *testing.T) { types: - actor ` - ctx = db.SetContextIdentity(ctx, acpIdentity.New("cosmos1zzg43wdrhmmk89z3pmejwete2kkd4a3vn7w969")) + + privKeyBytes, err := hex.DecodeString("028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f") + require.NoError(t, err) + privKey := secp256k1.PrivKeyFromBytes(privKeyBytes) + identity := acpIdentity.FromPrivateKey(privKey) + + ctx = db.SetContextIdentity(ctx, identity) policyResult, err := d.AddPolicy(ctx, policy) policyID := policyResult.PolicyID require.NoError(t, err) - require.Equal(t, "fc3a0a39c73949c70a79e02b8d928028e9cbcc772ba801463a6acdcf2f256cd4", policyID) + require.Equal(t, "7bef56a54eae563eafdc48c57cf37075351498ebb5a200f59cf9b8c6f8149606", policyID) schema := fmt.Sprintf(` type User @policy(id: "%s", resource: "user") { diff --git a/tests/clients/cli/wrapper_cli.go b/tests/clients/cli/wrapper_cli.go index 44ed7f22bc..ab3dd9ace4 100644 --- a/tests/clients/cli/wrapper_cli.go +++ b/tests/clients/cli/wrapper_cli.go @@ -12,6 +12,7 @@ package cli import ( "context" + "encoding/hex" "fmt" "io" "strings" @@ -58,8 +59,8 @@ func (w *cliWrapper) executeStream(ctx context.Context, args []string) (io.ReadC args = append(args, "--tx", fmt.Sprintf("%d", tx.ID())) } id := db.GetContextIdentity(ctx) - if id.HasValue() { - args = append(args, "--identity", id.Value().String()) + if id.HasValue() && id.Value().PrivateKey != nil { + args = append(args, "--identity", hex.EncodeToString(id.Value().PrivateKey.Serialize())) } args = append(args, "--url", w.address) diff --git a/tests/integration/acp.go b/tests/integration/acp.go index b02d83eed7..ccfba9ed0d 100644 --- a/tests/integration/acp.go +++ b/tests/integration/acp.go @@ -29,7 +29,7 @@ type AddPolicy struct { Policy string // The policy creator identity, i.e. actor creating the policy. - Identity string + Identity immutable.Option[acpIdentity.Identity] // The expected policyID generated based on the Policy loaded in to the ACP system. ExpectedPolicyID string @@ -52,7 +52,7 @@ func addPolicyACP( } for _, node := range getNodes(action.NodeID, s.nodes) { - ctx := db.SetContextIdentity(s.ctx, acpIdentity.New(action.Identity)) + ctx := db.SetContextIdentity(s.ctx, action.Identity) policyResult, err := node.AddPolicy(ctx, action.Policy) if err == nil { diff --git a/tests/integration/acp/add_policy/basic_test.go b/tests/integration/acp/add_policy/basic_test.go index fdf53f02cc..9adc54996b 100644 --- a/tests/integration/acp/add_policy/basic_test.go +++ b/tests/integration/acp/add_policy/basic_test.go @@ -46,7 +46,7 @@ func TestACP_AddPolicy_BasicYAML_ValidPolicyID(t *testing.T) { `, - ExpectedPolicyID: "dfe202ffb4f0fe9b46157c313213a3839e08a6f0a7c3aba55e4724cb49ffde8a", + ExpectedPolicyID: "aa664afaf8dff947ba85f4d464662d595af6c1e2466bd11fd6b82ea95b547ea3", }, }, } @@ -91,7 +91,7 @@ func TestACP_AddPolicy_BasicJSON_ValidPolicyID(t *testing.T) { } `, - ExpectedPolicyID: "dfe202ffb4f0fe9b46157c313213a3839e08a6f0a7c3aba55e4724cb49ffde8a", + ExpectedPolicyID: "aa664afaf8dff947ba85f4d464662d595af6c1e2466bd11fd6b82ea95b547ea3", }, }, } diff --git a/tests/integration/acp/add_policy/with_empty_args_test.go b/tests/integration/acp/add_policy/with_empty_args_test.go index dc530d95b0..6b07ef0f67 100644 --- a/tests/integration/acp/add_policy/with_empty_args_test.go +++ b/tests/integration/acp/add_policy/with_empty_args_test.go @@ -13,6 +13,9 @@ package test_acp_add_policy import ( "testing" + "github.com/sourcenetwork/immutable" + + acpIdentity "github.com/sourcenetwork/defradb/acp/identity" testUtils "github.com/sourcenetwork/defradb/tests/integration" ) @@ -42,7 +45,7 @@ func TestACP_AddPolicy_EmptyPolicyCreator_Error(t *testing.T) { Actions: []any{ testUtils.AddPolicy{ - Identity: "", + Identity: immutable.None[acpIdentity.Identity](), Policy: ` description: a basic policy that satisfies minimum DPI requirements @@ -80,7 +83,7 @@ func TestACP_AddPolicy_EmptyCreatorAndPolicyArgs_Error(t *testing.T) { Actions: []any{ testUtils.AddPolicy{ - Identity: "", + Identity: immutable.None[acpIdentity.Identity](), Policy: "", diff --git a/tests/integration/acp/add_policy/with_extra_perms_and_relations_test.go b/tests/integration/acp/add_policy/with_extra_perms_and_relations_test.go index cd84e3d910..3cdfbddab0 100644 --- a/tests/integration/acp/add_policy/with_extra_perms_and_relations_test.go +++ b/tests/integration/acp/add_policy/with_extra_perms_and_relations_test.go @@ -53,7 +53,7 @@ func TestACP_AddPolicy_ExtraPermissionsAndExtraRelations_ValidPolicyID(t *testin - actor `, - ExpectedPolicyID: "ecfeeebd1b65e6a21b2f1b57006176bcbc6a37ef238f27c7034953f46fe04674", + ExpectedPolicyID: "f29c97dca930c9e93f7ef9e2139c63939c573af96c95af5cb9392861a0111b13", }, }, } diff --git a/tests/integration/acp/add_policy/with_extra_perms_test.go b/tests/integration/acp/add_policy/with_extra_perms_test.go index 8c13555c8d..9b39a3743b 100644 --- a/tests/integration/acp/add_policy/with_extra_perms_test.go +++ b/tests/integration/acp/add_policy/with_extra_perms_test.go @@ -47,7 +47,7 @@ func TestACP_AddPolicy_ExtraPermissions_ValidPolicyID(t *testing.T) { name: actor `, - ExpectedPolicyID: "9d518bb2d5aceb2c8f9b12b909eecd50276c1bd0250069875f265166e6030bb5", + ExpectedPolicyID: "af6795fa4fce1c2b4f9c1252c1cdd758708a45e4fc3097406c008d78c820ee80", }, }, } diff --git a/tests/integration/acp/add_policy/with_extra_relations_test.go b/tests/integration/acp/add_policy/with_extra_relations_test.go index d3e4308c24..acfb848247 100644 --- a/tests/integration/acp/add_policy/with_extra_relations_test.go +++ b/tests/integration/acp/add_policy/with_extra_relations_test.go @@ -51,7 +51,7 @@ func TestACP_AddPolicy_ExtraRelations_ValidPolicyID(t *testing.T) { - actor `, - ExpectedPolicyID: "450c47aa47b7b07820f99e5cb38170dc108a2f12b137946e6b47d0c0a73b607f", + ExpectedPolicyID: "922636974ecbc3c335143e45917832f219dfe4a168a523e7314616b94e7f9ebc", }, }, } diff --git a/tests/integration/acp/add_policy/with_invalid_creator_arg_test.go b/tests/integration/acp/add_policy/with_invalid_creator_arg_test.go index 2e56670add..68cad2b361 100644 --- a/tests/integration/acp/add_policy/with_invalid_creator_arg_test.go +++ b/tests/integration/acp/add_policy/with_invalid_creator_arg_test.go @@ -13,17 +13,25 @@ package test_acp_add_policy import ( "testing" + acpIdentity "github.com/sourcenetwork/defradb/acp/identity" testUtils "github.com/sourcenetwork/defradb/tests/integration" + + "github.com/sourcenetwork/immutable" ) func TestACP_AddPolicy_InvalidCreatorIdentityWithValidPolicy_Error(t *testing.T) { test := testUtils.TestCase{ + // Using an invalid creator is not possible with other client + // types since the token authentication will fail + SupportedClientTypes: immutable.Some([]testUtils.ClientType{ + testUtils.GoClientType, + }), Description: "Test acp, adding policy, with invalid creator, with valid policy, return error", Actions: []any{ testUtils.AddPolicy{ - Identity: "invalid", + Identity: immutable.Some(acpIdentity.Identity{Address: "invalid"}), Policy: ` description: a basic policy that satisfies minimum DPI requirements @@ -57,12 +65,17 @@ func TestACP_AddPolicy_InvalidCreatorIdentityWithValidPolicy_Error(t *testing.T) func TestACP_AddPolicy_InvalidCreatorIdentityWithEmptyPolicy_Error(t *testing.T) { test := testUtils.TestCase{ + // Using an invalid creator is not possible with other client + // types since the token authentication will fail + SupportedClientTypes: immutable.Some([]testUtils.ClientType{ + testUtils.GoClientType, + }), Description: "Test acp, adding policy, with invalid creator, with empty policy, return error", Actions: []any{ testUtils.AddPolicy{ - Identity: "invalid", + Identity: immutable.Some(acpIdentity.Identity{Address: "invalid"}), Policy: "", diff --git a/tests/integration/acp/add_policy/with_managed_relation_test.go b/tests/integration/acp/add_policy/with_managed_relation_test.go index d80c5b1c05..c3bff2c8fd 100644 --- a/tests/integration/acp/add_policy/with_managed_relation_test.go +++ b/tests/integration/acp/add_policy/with_managed_relation_test.go @@ -52,7 +52,7 @@ func TestACP_AddPolicy_WithRelationManagingOtherRelation_ValidPolicyID(t *testin - actor `, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, }, } diff --git a/tests/integration/acp/add_policy/with_multi_policies_test.go b/tests/integration/acp/add_policy/with_multi_policies_test.go index 6fbbfb2c39..008f7969e9 100644 --- a/tests/integration/acp/add_policy/with_multi_policies_test.go +++ b/tests/integration/acp/add_policy/with_multi_policies_test.go @@ -46,7 +46,7 @@ func TestACP_AddPolicy_AddMultipleDifferentPolicies_ValidPolicyIDs(t *testing.T) `, - ExpectedPolicyID: "dfe202ffb4f0fe9b46157c313213a3839e08a6f0a7c3aba55e4724cb49ffde8a", + ExpectedPolicyID: "aa664afaf8dff947ba85f4d464662d595af6c1e2466bd11fd6b82ea95b547ea3", }, testUtils.AddPolicy{ @@ -80,7 +80,7 @@ func TestACP_AddPolicy_AddMultipleDifferentPolicies_ValidPolicyIDs(t *testing.T) - actor `, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, }, } @@ -125,7 +125,7 @@ func TestACP_AddPolicy_AddMultipleDifferentPoliciesInDifferentFmts_ValidPolicyID } `, - ExpectedPolicyID: "dfe202ffb4f0fe9b46157c313213a3839e08a6f0a7c3aba55e4724cb49ffde8a", + ExpectedPolicyID: "aa664afaf8dff947ba85f4d464662d595af6c1e2466bd11fd6b82ea95b547ea3", }, testUtils.AddPolicy{ @@ -159,7 +159,7 @@ func TestACP_AddPolicy_AddMultipleDifferentPoliciesInDifferentFmts_ValidPolicyID - actor `, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, }, } @@ -198,7 +198,7 @@ func TestACP_AddPolicy_AddDuplicatePolicyByOtherCreator_ValidPolicyIDs(t *testin Policy: policyUsedByBoth, - ExpectedPolicyID: "dfe202ffb4f0fe9b46157c313213a3839e08a6f0a7c3aba55e4724cb49ffde8a", + ExpectedPolicyID: "aa664afaf8dff947ba85f4d464662d595af6c1e2466bd11fd6b82ea95b547ea3", }, testUtils.AddPolicy{ @@ -206,7 +206,7 @@ func TestACP_AddPolicy_AddDuplicatePolicyByOtherCreator_ValidPolicyIDs(t *testin Policy: policyUsedByBoth, - ExpectedPolicyID: "551c57323f33decfdc23312e5e1036e3ab85d2414e962814dab9101619dd9ff9", + ExpectedPolicyID: "5cff96a89799f7974906138fb794f670d35ac5df9985621da44f9f3529af1c0b", }, }, } @@ -244,7 +244,7 @@ func TestACP_AddPolicy_AddMultipleDuplicatePolicies_Error(t *testing.T) { `, - ExpectedPolicyID: "dfe202ffb4f0fe9b46157c313213a3839e08a6f0a7c3aba55e4724cb49ffde8a", + ExpectedPolicyID: "aa664afaf8dff947ba85f4d464662d595af6c1e2466bd11fd6b82ea95b547ea3", }, testUtils.AddPolicy{ @@ -271,7 +271,7 @@ func TestACP_AddPolicy_AddMultipleDuplicatePolicies_Error(t *testing.T) { `, - ExpectedError: "policy dfe202ffb4f0fe9b46157c313213a3839e08a6f0a7c3aba55e4724cb49ffde8a: policy exists", + ExpectedError: "policy aa664afaf8dff947ba85f4d464662d595af6c1e2466bd11fd6b82ea95b547ea3: policy exists", }, }, } @@ -308,7 +308,7 @@ func TestACP_AddPolicy_AddMultipleDuplicatePoliciesDifferentFmts_Error(t *testin - actor `, - ExpectedPolicyID: "dfe202ffb4f0fe9b46157c313213a3839e08a6f0a7c3aba55e4724cb49ffde8a", + ExpectedPolicyID: "aa664afaf8dff947ba85f4d464662d595af6c1e2466bd11fd6b82ea95b547ea3", }, testUtils.AddPolicy{ @@ -342,7 +342,7 @@ func TestACP_AddPolicy_AddMultipleDuplicatePoliciesDifferentFmts_Error(t *testin } `, - ExpectedError: "policy dfe202ffb4f0fe9b46157c313213a3839e08a6f0a7c3aba55e4724cb49ffde8a: policy exists", + ExpectedError: "policy aa664afaf8dff947ba85f4d464662d595af6c1e2466bd11fd6b82ea95b547ea3: policy exists", }, }, } diff --git a/tests/integration/acp/add_policy/with_multiple_resources_test.go b/tests/integration/acp/add_policy/with_multiple_resources_test.go index 6d6c890452..c7ae18f7ed 100644 --- a/tests/integration/acp/add_policy/with_multiple_resources_test.go +++ b/tests/integration/acp/add_policy/with_multiple_resources_test.go @@ -62,7 +62,7 @@ func TestACP_AddPolicy_MultipleResources_ValidID(t *testing.T) { - actor `, - ExpectedPolicyID: "cf082c11fa812dddaa5093f0ccae66c2b5294efe0a2b50ffdcbc0185adf6adf1", + ExpectedPolicyID: "390239e42550ea5945b9185576b79694f7000a7ce3b301d60afe35572c958cd7", }, }, } diff --git a/tests/integration/acp/add_policy/with_no_perms_test.go b/tests/integration/acp/add_policy/with_no_perms_test.go index 156788ca45..b3b7faa307 100644 --- a/tests/integration/acp/add_policy/with_no_perms_test.go +++ b/tests/integration/acp/add_policy/with_no_perms_test.go @@ -50,7 +50,7 @@ func TestACP_AddPolicy_NoPermissionsOnlyOwner_ValidID(t *testing.T) { `, - ExpectedPolicyID: "b6edfd9d24a79067a2f5960e1369499ebaf4c5ec6747e2f444f33bf9c3915fcb", + ExpectedPolicyID: "39b436f0c28e7ce5ed7e1c592bb578590d62ccfacef0df565ac97520c880c017", }, }, } @@ -87,7 +87,7 @@ func TestACP_AddPolicy_NoPermissionsMultiRelations_ValidID(t *testing.T) { `, - ExpectedPolicyID: "7eb7448daa631cfe33da3a149f5eea716026f54bf23ce1315c594259382c5c57", + ExpectedPolicyID: "07da6260811df769d551e89e02364b3e939cb585696c1a69b626bb8ecdd378f9", }, }, } @@ -119,7 +119,7 @@ func TestACP_AddPolicy_NoPermissionsLabelOnlyOwner_ValidID(t *testing.T) { `, - ExpectedPolicyID: "b6edfd9d24a79067a2f5960e1369499ebaf4c5ec6747e2f444f33bf9c3915fcb", + ExpectedPolicyID: "39b436f0c28e7ce5ed7e1c592bb578590d62ccfacef0df565ac97520c880c017", }, }, } @@ -154,7 +154,7 @@ func TestACP_AddPolicy_NoPermissionsLabelMultiRelations_ValidID(t *testing.T) { `, - ExpectedPolicyID: "7eb7448daa631cfe33da3a149f5eea716026f54bf23ce1315c594259382c5c57", + ExpectedPolicyID: "07da6260811df769d551e89e02364b3e939cb585696c1a69b626bb8ecdd378f9", }, }, } diff --git a/tests/integration/acp/add_policy/with_no_resources_test.go b/tests/integration/acp/add_policy/with_no_resources_test.go index 6b4097584a..8bd6e5268d 100644 --- a/tests/integration/acp/add_policy/with_no_resources_test.go +++ b/tests/integration/acp/add_policy/with_no_resources_test.go @@ -36,7 +36,7 @@ func TestACP_AddPolicy_NoResource_ValidID(t *testing.T) { resources: `, - ExpectedPolicyID: "b72d8ec56ffb141922781d2b1b0803404bef57be0eeec98f1662f3017fc2de35", + ExpectedPolicyID: "e16824022121b55f2b2babbd2ab82960a8837767197e20acf9c577cbb4539991", }, }, } @@ -62,7 +62,7 @@ func TestACP_AddPolicy_NoResourceLabel_ValidID(t *testing.T) { name: actor `, - ExpectedPolicyID: "b72d8ec56ffb141922781d2b1b0803404bef57be0eeec98f1662f3017fc2de35", + ExpectedPolicyID: "e16824022121b55f2b2babbd2ab82960a8837767197e20acf9c577cbb4539991", }, }, } @@ -83,7 +83,7 @@ func TestACP_AddPolicy_PolicyWithOnlySpace_ValidID(t *testing.T) { Policy: " ", - ExpectedPolicyID: "b72d8ec56ffb141922781d2b1b0803404bef57be0eeec98f1662f3017fc2de35", + ExpectedPolicyID: "e16824022121b55f2b2babbd2ab82960a8837767197e20acf9c577cbb4539991", }, }, } diff --git a/tests/integration/acp/add_policy/with_perm_expr_test.go b/tests/integration/acp/add_policy/with_perm_expr_test.go index 177de98ebe..fd31e30840 100644 --- a/tests/integration/acp/add_policy/with_perm_expr_test.go +++ b/tests/integration/acp/add_policy/with_perm_expr_test.go @@ -48,7 +48,7 @@ func TestACP_AddPolicy_PermissionExprWithOwnerInTheEndWithMinus_ValidID(t *testi - actor `, - ExpectedPolicyID: "d74384d99b6732c3a6e0e47c7b75ea19553f643bcca416380530d8ad4e50e529", + ExpectedPolicyID: "fcb989d8bad149e3c4b22f8a69969760187b29ea1c796a3f9d2e16e32f493590", }, }, } @@ -89,7 +89,7 @@ func TestACP_AddPolicy_PermissionExprWithOwnerInTheEndWithMinusNoSpace_ValidID(t - actor `, - ExpectedPolicyID: "f6d5d6d8b0183230fcbdf06cfe14b611f782752d276006ad4622231eeaf60820", + ExpectedPolicyID: "50d8fbaf70a08c2c0e2bf0355a353a8bb06cc4d6e2f3ddbf71d91f9ef5aa49af", }, }, } diff --git a/tests/integration/acp/add_policy/with_permissionless_owner_test.go b/tests/integration/acp/add_policy/with_permissionless_owner_test.go index 0fda8a7468..c6ada1c121 100644 --- a/tests/integration/acp/add_policy/with_permissionless_owner_test.go +++ b/tests/integration/acp/add_policy/with_permissionless_owner_test.go @@ -55,7 +55,7 @@ func TestACP_AddPolicy_PermissionlessOwnerWrite_ValidID(t *testing.T) { - actor `, - ExpectedPolicyID: "af1ee9ffe8558da8455dc1cfc5897028c16c038a053b4cf740dfcef8032d944a", + ExpectedPolicyID: "f7e7b84108ba67bcdeb211ff740eee13b2b6770106dcf0d0436a3a33d8a2f9f0", }, }, } @@ -95,7 +95,7 @@ func TestACP_AddPolicy_PermissionlessOwnerRead_ValidID(t *testing.T) { - actor `, - ExpectedPolicyID: "3ceb4a4be889998496355604b68836bc280dc26dab829af3ec45b63d7767a7f1", + ExpectedPolicyID: "22c3eee3b3d216c01244a47a6aa241a08b767b3ef0a9edfbd30b3575a6bd94f4", }, }, } @@ -135,7 +135,7 @@ func TestACP_AddPolicy_PermissionlessOwnerReadWrite_ValidID(t *testing.T) { - actor `, - ExpectedPolicyID: "af1ee9ffe8558da8455dc1cfc5897028c16c038a053b4cf740dfcef8032d944a", + ExpectedPolicyID: "f7e7b84108ba67bcdeb211ff740eee13b2b6770106dcf0d0436a3a33d8a2f9f0", }, }, } diff --git a/tests/integration/acp/add_policy/with_unused_relations_test.go b/tests/integration/acp/add_policy/with_unused_relations_test.go index fbcec10755..27149ede0c 100644 --- a/tests/integration/acp/add_policy/with_unused_relations_test.go +++ b/tests/integration/acp/add_policy/with_unused_relations_test.go @@ -49,7 +49,7 @@ func TestACP_AddPolicy_UnusedRelation_ValidID(t *testing.T) { `, - ExpectedPolicyID: "e1bb7702f653d4f9a0595d2d97c209fc0da8f315be007bd19545599eed41ae42", + ExpectedPolicyID: "a6d42bfedff5db1feca0313793e4f9540851e3feaefffaebc98a1ee5bb140e45", }, }, } diff --git a/tests/integration/acp/fixture.go b/tests/integration/acp/fixture.go index ae05f780a4..c2a0c8f74b 100644 --- a/tests/integration/acp/fixture.go +++ b/tests/integration/acp/fixture.go @@ -10,7 +10,26 @@ package test_acp +import ( + "encoding/hex" + + "github.com/decred/dcrd/dcrec/secp256k1/v4" + "github.com/sourcenetwork/immutable" + + acpIdentity "github.com/sourcenetwork/defradb/acp/identity" +) + var ( - Actor1Identity = "cosmos1zzg43wdrhmmk89z3pmejwete2kkd4a3vn7w969" - Actor2Identity = "cosmos1x25hhksxhu86r45hqwk28dd70qzux3262hdrll" + Actor1Identity = MustParseIdentity("028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f") + Actor2Identity = MustParseIdentity("4d092126012ebaf56161716018a71630d99443d9d5217e9d8502bb5c5456f2c5") ) + +// MustParseIdentity returns an identity that uses the given private key or panics. +func MustParseIdentity(privateKeyHex string) immutable.Option[acpIdentity.Identity] { + privateKeyBytes, err := hex.DecodeString(privateKeyHex) + if err != nil { + panic(err) + } + privateKey := secp256k1.PrivKeyFromBytes(privateKeyBytes) + return acpIdentity.FromPrivateKey(privateKey) +} diff --git a/tests/integration/acp/index/create_test.go b/tests/integration/acp/index/create_test.go index 9c440e25e2..8d6fc4de3f 100644 --- a/tests/integration/acp/index/create_test.go +++ b/tests/integration/acp/index/create_test.go @@ -25,13 +25,13 @@ func TestACP_IndexCreateWithSeparateRequest_OnCollectionWithPolicy_NoError(t *te testUtils.AddPolicy{ Identity: acpUtils.Actor1Identity, Policy: userPolicy, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", resource: "users" ) { name: String @@ -71,13 +71,13 @@ func TestACP_IndexCreateWithDirective_OnCollectionWithPolicy_NoError(t *testing. testUtils.AddPolicy{ Identity: acpUtils.Actor1Identity, Policy: userPolicy, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", resource: "users" ) { name: String @index diff --git a/tests/integration/acp/index/fixture.go b/tests/integration/acp/index/fixture.go index 49c76e8b22..0f0af7ba1c 100644 --- a/tests/integration/acp/index/fixture.go +++ b/tests/integration/acp/index/fixture.go @@ -10,7 +10,7 @@ package test_acp_index -// policy id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001" +// policy id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd" const userPolicy = ` description: a test policy which marks a collection in a database as a resource @@ -39,7 +39,7 @@ resources: - actor ` -// policy id: "68a4e64d5034b8a0565a90cd36483de0d61e0ea2450cf57c1fa8d27cbbf17c2c" +// policy id: "e3c35f345c844e8c0144d793933ea7287af1930d36e9d7d98e8d930fb9815a4a" const bookAuthorPolicy = ` description: a test policy which marks a collection in a database as a resource diff --git a/tests/integration/acp/index/query_test.go b/tests/integration/acp/index/query_test.go index a7c09cd9e9..6ce7fdc1a2 100644 --- a/tests/integration/acp/index/query_test.go +++ b/tests/integration/acp/index/query_test.go @@ -24,12 +24,12 @@ func TestACPWithIndex_UponQueryingPrivateDocWithoutIdentity_ShouldNotFetch(t *te testUtils.AddPolicy{ Identity: acpUtils.Actor1Identity, Policy: userPolicy, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", resource: "users" ) { name: String @index @@ -78,12 +78,12 @@ func TestACPWithIndex_UponQueryingPrivateDocWithIdentity_ShouldFetch(t *testing. testUtils.AddPolicy{ Identity: acpUtils.Actor1Identity, Policy: userPolicy, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", resource: "users" ) { name: String @index @@ -136,12 +136,12 @@ func TestACPWithIndex_UponQueryingPrivateDocWithWrongIdentity_ShouldNotFetch(t * testUtils.AddPolicy{ Identity: acpUtils.Actor1Identity, Policy: userPolicy, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", resource: "users" ) { name: String @index diff --git a/tests/integration/acp/index/query_with_relation_test.go b/tests/integration/acp/index/query_with_relation_test.go index 614aaa6e84..8a0d8c7d74 100644 --- a/tests/integration/acp/index/query_with_relation_test.go +++ b/tests/integration/acp/index/query_with_relation_test.go @@ -22,12 +22,12 @@ func createAuthorBooksSchemaWithPolicyAndCreateDocs() []any { testUtils.AddPolicy{ Identity: acpUtils.Actor1Identity, Policy: bookAuthorPolicy, - ExpectedPolicyID: "68a4e64d5034b8a0565a90cd36483de0d61e0ea2450cf57c1fa8d27cbbf17c2c", + ExpectedPolicyID: "e3c35f345c844e8c0144d793933ea7287af1930d36e9d7d98e8d930fb9815a4a", }, testUtils.SchemaUpdate{ Schema: ` type Author @policy( - id: "68a4e64d5034b8a0565a90cd36483de0d61e0ea2450cf57c1fa8d27cbbf17c2c", + id: "e3c35f345c844e8c0144d793933ea7287af1930d36e9d7d98e8d930fb9815a4a", resource: "author" ) { name: String @@ -37,7 +37,7 @@ func createAuthorBooksSchemaWithPolicyAndCreateDocs() []any { } type Book @policy( - id: "68a4e64d5034b8a0565a90cd36483de0d61e0ea2450cf57c1fa8d27cbbf17c2c", + id: "e3c35f345c844e8c0144d793933ea7287af1930d36e9d7d98e8d930fb9815a4a", resource: "author" ) { name: String diff --git a/tests/integration/acp/p2p/replicator_test.go b/tests/integration/acp/p2p/replicator_test.go index 9c3b0eca3f..ace55f6a06 100644 --- a/tests/integration/acp/p2p/replicator_test.go +++ b/tests/integration/acp/p2p/replicator_test.go @@ -62,13 +62,13 @@ func TestACP_P2POneToOneReplicatorWithPermissionedCollection_Error(t *testing.T) - actor `, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", resource: "users" ) { name: String diff --git a/tests/integration/acp/p2p/subscribe_test.go b/tests/integration/acp/p2p/subscribe_test.go index 610339d24f..c41039e380 100644 --- a/tests/integration/acp/p2p/subscribe_test.go +++ b/tests/integration/acp/p2p/subscribe_test.go @@ -62,13 +62,13 @@ func TestACP_P2PSubscribeAddGetSingleWithPermissionedCollection_Error(t *testing - actor `, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", resource: "users" ) { name: String diff --git a/tests/integration/acp/query/fixture.go b/tests/integration/acp/query/fixture.go index ed81ed0633..2b05b48232 100644 --- a/tests/integration/acp/query/fixture.go +++ b/tests/integration/acp/query/fixture.go @@ -58,13 +58,13 @@ func getSetupEmployeeCompanyActions() []any { testUtils.AddPolicy{ Identity: acpUtils.Actor1Identity, Policy: employeeCompanyPolicy, - ExpectedPolicyID: "67607eb2a2a873f4a69eb6876323cee7601d8a4d4fedcc18154aaee65cf38e7f", + ExpectedPolicyID: "6f11799717723307077147736fddccd8a7b5e68d2ec22e2155f0186e0c43a2e2", }, testUtils.SchemaUpdate{ Schema: ` type Employee @policy( - id: "67607eb2a2a873f4a69eb6876323cee7601d8a4d4fedcc18154aaee65cf38e7f", + id: "6f11799717723307077147736fddccd8a7b5e68d2ec22e2155f0186e0c43a2e2", resource: "employees" ) { name: String @@ -73,7 +73,7 @@ func getSetupEmployeeCompanyActions() []any { } type Company @policy( - id: "67607eb2a2a873f4a69eb6876323cee7601d8a4d4fedcc18154aaee65cf38e7f", + id: "6f11799717723307077147736fddccd8a7b5e68d2ec22e2155f0186e0c43a2e2", resource: "companies" ) { name: String diff --git a/tests/integration/acp/register_and_delete_test.go b/tests/integration/acp/register_and_delete_test.go index 5d0baf0762..ef4e80ec22 100644 --- a/tests/integration/acp/register_and_delete_test.go +++ b/tests/integration/acp/register_and_delete_test.go @@ -58,13 +58,13 @@ func TestACP_CreateWithoutIdentityAndDeleteWithoutIdentity_CanDelete(t *testing. - actor `, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", resource: "users" ) { name: String @@ -151,13 +151,13 @@ func TestACP_CreateWithoutIdentityAndDeleteWithIdentity_CanDelete(t *testing.T) - actor `, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", resource: "users" ) { name: String @@ -245,13 +245,13 @@ func TestACP_CreateWithIdentityAndDeleteWithIdentity_CanDelete(t *testing.T) { - actor `, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", resource: "users" ) { name: String @@ -343,13 +343,13 @@ func TestACP_CreateWithIdentityAndDeleteWithoutIdentity_CanNotDelete(t *testing. - actor `, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", resource: "users" ) { name: String @@ -449,13 +449,13 @@ func TestACP_CreateWithIdentityAndDeleteWithWrongIdentity_CanNotDelete(t *testin - actor `, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", resource: "users" ) { name: String diff --git a/tests/integration/acp/register_and_read_test.go b/tests/integration/acp/register_and_read_test.go index a2620b82d7..0ad80c4953 100644 --- a/tests/integration/acp/register_and_read_test.go +++ b/tests/integration/acp/register_and_read_test.go @@ -54,13 +54,13 @@ func TestACP_CreateWithoutIdentityAndReadWithoutIdentity_CanRead(t *testing.T) { - actor `, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", resource: "users" ) { name: String @@ -142,13 +142,13 @@ func TestACP_CreateWithoutIdentityAndReadWithIdentity_CanRead(t *testing.T) { - actor `, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", resource: "users" ) { name: String @@ -232,13 +232,13 @@ func TestACP_CreateWithIdentityAndReadWithIdentity_CanRead(t *testing.T) { - actor `, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", resource: "users" ) { name: String @@ -324,13 +324,13 @@ func TestACP_CreateWithIdentityAndReadWithoutIdentity_CanNotRead(t *testing.T) { - actor `, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", resource: "users" ) { name: String @@ -408,13 +408,13 @@ func TestACP_CreateWithIdentityAndReadWithWrongIdentity_CanNotRead(t *testing.T) - actor `, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", resource: "users" ) { name: String diff --git a/tests/integration/acp/register_and_update_test.go b/tests/integration/acp/register_and_update_test.go index 96810409db..4a51aa6aec 100644 --- a/tests/integration/acp/register_and_update_test.go +++ b/tests/integration/acp/register_and_update_test.go @@ -60,13 +60,13 @@ func TestACP_CreateWithoutIdentityAndUpdateWithoutIdentity_CanUpdate(t *testing. - actor `, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", resource: "users" ) { name: String @@ -165,13 +165,13 @@ func TestACP_CreateWithoutIdentityAndUpdateWithIdentity_CanUpdate(t *testing.T) - actor `, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", resource: "users" ) { name: String @@ -271,13 +271,13 @@ func TestACP_CreateWithIdentityAndUpdateWithIdentity_CanUpdate(t *testing.T) { - actor `, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", resource: "users" ) { name: String @@ -387,13 +387,13 @@ func TestACP_CreateWithIdentityAndUpdateWithoutIdentity_CanNotUpdate(t *testing. - actor `, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", resource: "users" ) { name: String @@ -505,13 +505,13 @@ func TestACP_CreateWithIdentityAndUpdateWithWrongIdentity_CanNotUpdate(t *testin - actor `, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", resource: "users" ) { name: String @@ -624,13 +624,13 @@ func TestACP_CreateWithIdentityAndUpdateWithoutIdentityGQL_CanNotUpdate(t *testi - actor `, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", resource: "users" ) { name: String @@ -741,13 +741,13 @@ func TestACP_CreateWithIdentityAndUpdateWithWrongIdentityGQL_CanNotUpdate(t *tes - actor `, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", resource: "users" ) { name: String diff --git a/tests/integration/acp/schema/add_dpi/accept_basic_dpi_fmts_test.go b/tests/integration/acp/schema/add_dpi/accept_basic_dpi_fmts_test.go index cfc668a25c..a7e9c835f2 100644 --- a/tests/integration/acp/schema/add_dpi/accept_basic_dpi_fmts_test.go +++ b/tests/integration/acp/schema/add_dpi/accept_basic_dpi_fmts_test.go @@ -19,7 +19,7 @@ import ( ) func TestACP_AddDPISchema_BasicYAML_SchemaAccepted(t *testing.T) { - policyIDOfValidDPI := "dfe202ffb4f0fe9b46157c313213a3839e08a6f0a7c3aba55e4724cb49ffde8a" + policyIDOfValidDPI := "aa664afaf8dff947ba85f4d464662d595af6c1e2466bd11fd6b82ea95b547ea3" test := testUtils.TestCase{ @@ -113,7 +113,7 @@ func TestACP_AddDPISchema_BasicYAML_SchemaAccepted(t *testing.T) { } func TestACP_AddDPISchema_BasicJSON_SchemaAccepted(t *testing.T) { - policyIDOfValidDPI := "dfe202ffb4f0fe9b46157c313213a3839e08a6f0a7c3aba55e4724cb49ffde8a" + policyIDOfValidDPI := "aa664afaf8dff947ba85f4d464662d595af6c1e2466bd11fd6b82ea95b547ea3" test := testUtils.TestCase{ diff --git a/tests/integration/acp/schema/add_dpi/accept_extra_permissions_on_dpi_test.go b/tests/integration/acp/schema/add_dpi/accept_extra_permissions_on_dpi_test.go index 348736a58c..275caf0864 100644 --- a/tests/integration/acp/schema/add_dpi/accept_extra_permissions_on_dpi_test.go +++ b/tests/integration/acp/schema/add_dpi/accept_extra_permissions_on_dpi_test.go @@ -19,7 +19,7 @@ import ( ) func TestACP_AddDPISchema_WithExtraPermsHavingRequiredRelation_AcceptSchema(t *testing.T) { - policyIDOfValidDPI := "16e39e650d4cbd5161ae0c572edad6f7e2950c1c4afa37e427af3c8708e68f0f" + policyIDOfValidDPI := "c137c80b1ad0fc52aa183c3b43dff62d1eefdd04cb0f49ca6a646b545843eece" test := testUtils.TestCase{ @@ -118,7 +118,7 @@ func TestACP_AddDPISchema_WithExtraPermsHavingRequiredRelation_AcceptSchema(t *t } func TestACP_AddDPISchema_WithExtraPermsHavingRequiredRelationInTheEnd_AcceptSchema(t *testing.T) { - policyIDOfValidDPI := "35b6f3db54cfb0f451a4faba77d2c71d8718215caeb5a15a8570dfdba07b694d" + policyIDOfValidDPI := "053f118041543b324f127a57a19e29c26aa95af8fa732ded2cf80e8dd96fa2d3" test := testUtils.TestCase{ @@ -217,7 +217,7 @@ func TestACP_AddDPISchema_WithExtraPermsHavingRequiredRelationInTheEnd_AcceptSch } func TestACP_AddDPISchema_WithExtraPermsHavingNoRequiredRelation_AcceptSchema(t *testing.T) { - policyIDOfValidDPI := "7b6266a93bfb6920bf57884f55c3823a5a5147c4ce445a9fc703b7c1e59b2d12" + policyIDOfValidDPI := "b1758de0d20726e53c9c343382af0f834ed6a10381f96399ce7c39fab607c349" test := testUtils.TestCase{ diff --git a/tests/integration/acp/schema/add_dpi/accept_managed_relation_on_dpi_test.go b/tests/integration/acp/schema/add_dpi/accept_managed_relation_on_dpi_test.go index 72f622201f..c7b168aea7 100644 --- a/tests/integration/acp/schema/add_dpi/accept_managed_relation_on_dpi_test.go +++ b/tests/integration/acp/schema/add_dpi/accept_managed_relation_on_dpi_test.go @@ -19,7 +19,7 @@ import ( ) func TestACP_AddDPISchema_WithManagedRelation_AcceptSchemas(t *testing.T) { - policyIDOfValidDPI := "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001" + policyIDOfValidDPI := "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd" test := testUtils.TestCase{ diff --git a/tests/integration/acp/schema/add_dpi/accept_mixed_resources_on_partial_dpi_test.go b/tests/integration/acp/schema/add_dpi/accept_mixed_resources_on_partial_dpi_test.go index a991d4b280..5c4f39c034 100644 --- a/tests/integration/acp/schema/add_dpi/accept_mixed_resources_on_partial_dpi_test.go +++ b/tests/integration/acp/schema/add_dpi/accept_mixed_resources_on_partial_dpi_test.go @@ -19,7 +19,7 @@ import ( ) func TestACP_AddDPISchema_PartialValidDPIButUseOnlyValidDPIResource_AcceptSchema(t *testing.T) { - policyIDOfPartiallyValidDPI := "d5d411825b2d8fa5a550f1e34153b88b375ed9c9af19ce6d2ba1769e237a45d0" + policyIDOfPartiallyValidDPI := "bfda7dc76b4719a32ff2ef6691646501d14fb139518ff6c05d4be1825b9128ed" test := testUtils.TestCase{ diff --git a/tests/integration/acp/schema/add_dpi/accept_multi_dpis_test.go b/tests/integration/acp/schema/add_dpi/accept_multi_dpis_test.go index 0ec43706ee..25e41408cf 100644 --- a/tests/integration/acp/schema/add_dpi/accept_multi_dpis_test.go +++ b/tests/integration/acp/schema/add_dpi/accept_multi_dpis_test.go @@ -43,8 +43,8 @@ func TestACP_AddDPISchema_AddDuplicateDPIsByOtherCreatorsUseBoth_AcceptSchema(t - actor ` - const policyIDOfFirstCreatorsDPI string = "4f13c5084c3d0e1e5c5db702fceef84c3b6ab948949ca8e27fcaad3fb8bc39f4" - const policyIDOfSecondCreatorsDPI string = "d33aa07a28ea19ed07a5256eb7e7f5600b0e0af13254889a7fce60202c4f6c7e" + const policyIDOfFirstCreatorsDPI string = "d5b240c738dba7fe7d8ae55acf257d8e4010c9d8b78e0b1f0bd26741b1ec5663" + const policyIDOfSecondCreatorsDPI string = "6d2ec2fd16ed62a1cad05d8e791abe12cbbf9551080c0ca052336b49e635c291" test := testUtils.TestCase{ diff --git a/tests/integration/acp/schema/add_dpi/accept_multi_resources_on_dpi_test.go b/tests/integration/acp/schema/add_dpi/accept_multi_resources_on_dpi_test.go index 9903bc18d2..97f96d1acd 100644 --- a/tests/integration/acp/schema/add_dpi/accept_multi_resources_on_dpi_test.go +++ b/tests/integration/acp/schema/add_dpi/accept_multi_resources_on_dpi_test.go @@ -19,7 +19,7 @@ import ( ) func TestACP_AddDPISchema_WithMultipleResources_AcceptSchema(t *testing.T) { - policyIDOfValidDPI := "f3e521de628fa607ba11af0e9b53e2fb74ca0e6ea33622003d1f43dbae0ce41d" + policyIDOfValidDPI := "6209c5d12fce1fb0db4140ffa9d6b14a4d38133f601ab83f12dbb6ef84ee7da3" test := testUtils.TestCase{ @@ -128,7 +128,7 @@ func TestACP_AddDPISchema_WithMultipleResources_AcceptSchema(t *testing.T) { } func TestACP_AddDPISchema_WithMultipleResourcesBothBeingUsed_AcceptSchema(t *testing.T) { - policyIDOfValidDPI := "f3e521de628fa607ba11af0e9b53e2fb74ca0e6ea33622003d1f43dbae0ce41d" + policyIDOfValidDPI := "6209c5d12fce1fb0db4140ffa9d6b14a4d38133f601ab83f12dbb6ef84ee7da3" test := testUtils.TestCase{ diff --git a/tests/integration/acp/schema/add_dpi/accept_same_resource_on_diff_schemas_test.go b/tests/integration/acp/schema/add_dpi/accept_same_resource_on_diff_schemas_test.go index 086a69a1b1..aaef9d915d 100644 --- a/tests/integration/acp/schema/add_dpi/accept_same_resource_on_diff_schemas_test.go +++ b/tests/integration/acp/schema/add_dpi/accept_same_resource_on_diff_schemas_test.go @@ -19,7 +19,7 @@ import ( ) func TestACP_AddDPISchema_UseSameResourceOnDifferentSchemas_AcceptSchemas(t *testing.T) { - policyIDOfValidDPI := "4f13c5084c3d0e1e5c5db702fceef84c3b6ab948949ca8e27fcaad3fb8bc39f4" + policyIDOfValidDPI := "d5b240c738dba7fe7d8ae55acf257d8e4010c9d8b78e0b1f0bd26741b1ec5663" sharedSameResourceName := "users" test := testUtils.TestCase{ diff --git a/tests/integration/acp/schema/add_dpi/reject_empty_arg_on_schema_test.go b/tests/integration/acp/schema/add_dpi/reject_empty_arg_on_schema_test.go index b23f47e19b..8174ccf7bc 100644 --- a/tests/integration/acp/schema/add_dpi/reject_empty_arg_on_schema_test.go +++ b/tests/integration/acp/schema/add_dpi/reject_empty_arg_on_schema_test.go @@ -17,7 +17,7 @@ import ( ) func TestACP_AddDPISchema_NoArgWasSpecifiedOnSchema_SchemaRejected(t *testing.T) { - policyIDOfValidDPI := "4f13c5084c3d0e1e5c5db702fceef84c3b6ab948949ca8e27fcaad3fb8bc39f4" + policyIDOfValidDPI := "d5b240c738dba7fe7d8ae55acf257d8e4010c9d8b78e0b1f0bd26741b1ec5663" test := testUtils.TestCase{ @@ -91,7 +91,7 @@ func TestACP_AddDPISchema_NoArgWasSpecifiedOnSchema_SchemaRejected(t *testing.T) } func TestACP_AddDPISchema_SpecifiedArgsAreEmptyOnSchema_SchemaRejected(t *testing.T) { - policyIDOfValidDPI := "4f13c5084c3d0e1e5c5db702fceef84c3b6ab948949ca8e27fcaad3fb8bc39f4" + policyIDOfValidDPI := "d5b240c738dba7fe7d8ae55acf257d8e4010c9d8b78e0b1f0bd26741b1ec5663" test := testUtils.TestCase{ diff --git a/tests/integration/acp/schema/add_dpi/reject_invalid_arg_type_on_schema_test.go b/tests/integration/acp/schema/add_dpi/reject_invalid_arg_type_on_schema_test.go index 94b3fd2dde..028b42f4ac 100644 --- a/tests/integration/acp/schema/add_dpi/reject_invalid_arg_type_on_schema_test.go +++ b/tests/integration/acp/schema/add_dpi/reject_invalid_arg_type_on_schema_test.go @@ -18,7 +18,7 @@ import ( ) func TestACP_AddDPISchema_InvalidPolicyIDArgTypeWasSpecifiedOnSchema_SchemaRejected(t *testing.T) { - policyIDOfValidDPI := "4f13c5084c3d0e1e5c5db702fceef84c3b6ab948949ca8e27fcaad3fb8bc39f4" + policyIDOfValidDPI := "d5b240c738dba7fe7d8ae55acf257d8e4010c9d8b78e0b1f0bd26741b1ec5663" test := testUtils.TestCase{ @@ -92,7 +92,7 @@ func TestACP_AddDPISchema_InvalidPolicyIDArgTypeWasSpecifiedOnSchema_SchemaRejec } func TestACP_AddDPISchema_InvalidResourceArgTypeWasSpecifiedOnSchema_SchemaRejected(t *testing.T) { - policyIDOfValidDPI := "4f13c5084c3d0e1e5c5db702fceef84c3b6ab948949ca8e27fcaad3fb8bc39f4" + policyIDOfValidDPI := "d5b240c738dba7fe7d8ae55acf257d8e4010c9d8b78e0b1f0bd26741b1ec5663" test := testUtils.TestCase{ diff --git a/tests/integration/acp/schema/add_dpi/reject_invalid_owner_read_perm_on_dpi_test.go b/tests/integration/acp/schema/add_dpi/reject_invalid_owner_read_perm_on_dpi_test.go index 540222d37b..a6140d23d2 100644 --- a/tests/integration/acp/schema/add_dpi/reject_invalid_owner_read_perm_on_dpi_test.go +++ b/tests/integration/acp/schema/add_dpi/reject_invalid_owner_read_perm_on_dpi_test.go @@ -18,7 +18,7 @@ import ( ) func TestACP_AddDPISchema_OwnerMissingRequiredReadPermissionOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "782ffee730033ff01a3bdb05a3aa130f08c0914887378b0dfee314be6c3a8dd0" + policyIDOfInvalidDPI := "d3b58e284fb1117eb9c0111105195c910a6873d70695d8d3af3a967632bd5a5c" test := testUtils.TestCase{ @@ -102,7 +102,7 @@ func TestACP_AddDPISchema_OwnerMissingRequiredReadPermissionOnDPI_SchemaRejected } func TestACP_AddDPISchema_OwnerMissingRequiredReadPermissionLabelOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "62d2d65d0304cb9a16bb4f07d1f48c7142911f73bc1db6ee54cdd2c6c7949c73" + policyIDOfInvalidDPI := "6f81aa1e33a6888e041ed7686f21007d09b30ae218fb2ce19071ea2155482b23" test := testUtils.TestCase{ @@ -185,7 +185,7 @@ func TestACP_AddDPISchema_OwnerMissingRequiredReadPermissionLabelOnDPI_SchemaRej } func TestACP_AddDPISchema_OwnerSpecifiedIncorrectlyOnReadPermissionExprOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "f9fe33e8b2ee18a65d16bdc8017fe829ec13b0797330422639cd9dafac7b00f8" + policyIDOfInvalidDPI := "7f291632f6772e108830d41a5388391fc04cd4f2473d2a759d2a3326ee973848" test := testUtils.TestCase{ @@ -269,7 +269,7 @@ func TestACP_AddDPISchema_OwnerSpecifiedIncorrectlyOnReadPermissionExprOnDPI_Sch } func TestACP_AddDPISchema_OwnerSpecifiedIncorrectlyOnReadPermissionNoSpaceExprOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "08cc6bed6b9695dd47b6bf1e934ff91975db598631a55c26db9ead1393a77588" + policyIDOfInvalidDPI := "aa4c2b7bc9177a7b1cb808289a7f03564c489cc5c829ef756997cbe47b689a3f" test := testUtils.TestCase{ @@ -353,7 +353,7 @@ func TestACP_AddDPISchema_OwnerSpecifiedIncorrectlyOnReadPermissionNoSpaceExprOn } func TestACP_AddDPISchema_MaliciousOwnerSpecifiedOnReadPermissionExprOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "fff5c6fc25fbc2a9e5a7251c19b1cb950889281d656e5aeb642ce7c16f181c9b" + policyIDOfInvalidDPI := "f52f9a23643c6d52f4aaf2569885c152f06edc7d95647f88f6c4e8ead757e792" test := testUtils.TestCase{ diff --git a/tests/integration/acp/schema/add_dpi/reject_invalid_owner_read_perm_symbol_on_dpi_test.go b/tests/integration/acp/schema/add_dpi/reject_invalid_owner_read_perm_symbol_on_dpi_test.go index 29ec5a9ecf..5aaedbdcf3 100644 --- a/tests/integration/acp/schema/add_dpi/reject_invalid_owner_read_perm_symbol_on_dpi_test.go +++ b/tests/integration/acp/schema/add_dpi/reject_invalid_owner_read_perm_symbol_on_dpi_test.go @@ -18,7 +18,7 @@ import ( ) func TestACP_AddDPISchema_OwnerRelationWithDifferenceSetOpOnReadPermissionExprOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "c9bb1811862ded3a4a8a931054bd99ecabde3f41231c6aa2c50e1f1f5af2b5e8" + policyIDOfInvalidDPI := "646063232aca1ae245d59f64e192436e1e843008f496c21eb4070d7d765f6f10" test := testUtils.TestCase{ @@ -103,7 +103,7 @@ func TestACP_AddDPISchema_OwnerRelationWithDifferenceSetOpOnReadPermissionExprOn } func TestACP_AddDPISchema_OwnerRelationWithIntersectionSetOpOnReadPermissionExprOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "7bff1d8a967df4de99f8daaa2567c660eb6e7b2c554c9a49bf831230e5d9eba6" + policyIDOfInvalidDPI := "3252d478a953afc01782253abd47ad644e2784911ad4418acd802c9002a72c5a" test := testUtils.TestCase{ @@ -188,7 +188,7 @@ func TestACP_AddDPISchema_OwnerRelationWithIntersectionSetOpOnReadPermissionExpr } func TestACP_AddDPISchema_OwnerRelationWithInvalidSetOpOnReadPermissionExprOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "cc2fab7c299e94e2bd9370708d26ca1262ff3b0d75f9a58d1086658cfec26c65" + policyIDOfInvalidDPI := "e4120157eaf6546994d7301deacb3f313ba7240a90dba9aeaa7b1227292f39cb" test := testUtils.TestCase{ diff --git a/tests/integration/acp/schema/add_dpi/reject_invalid_owner_write_perm_on_dpi_test.go b/tests/integration/acp/schema/add_dpi/reject_invalid_owner_write_perm_on_dpi_test.go index f3b5877444..36fad20c9b 100644 --- a/tests/integration/acp/schema/add_dpi/reject_invalid_owner_write_perm_on_dpi_test.go +++ b/tests/integration/acp/schema/add_dpi/reject_invalid_owner_write_perm_on_dpi_test.go @@ -18,7 +18,7 @@ import ( ) func TestACP_AddDPISchema_OwnerMissingRequiredWritePermissionOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "4256d2b54767cafd0e0a2b39a6faebf44bc99a7fc74ff5b51894f7accf2ef638" + policyIDOfInvalidDPI := "25bfbcab338ce03e1312b7a4dd78f4574156b5ca626c8f894ed101c81d2d31c8" test := testUtils.TestCase{ @@ -102,7 +102,7 @@ func TestACP_AddDPISchema_OwnerMissingRequiredWritePermissionOnDPI_SchemaRejecte } func TestACP_AddDPISchema_OwnerMissingRequiredWritePermissionLabelOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "e8be944571cd6b52faa1e8b75fa339a9f60065b65d78ed126d037722e2512593" + policyIDOfInvalidDPI := "f0c8fc28378030d268ccdab9931e788d8839e0640085e43c1d48deebfe859f34" test := testUtils.TestCase{ @@ -185,7 +185,7 @@ func TestACP_AddDPISchema_OwnerMissingRequiredWritePermissionLabelOnDPI_SchemaRe } func TestACP_AddDPISchema_OwnerSpecifiedIncorrectlyOnWritePermissionExprOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "34ff30cb9e80993e2b11f86f85c6daa7cd9bf25724e4d5ff0704518d7970d074" + policyIDOfInvalidDPI := "e94d4ca7705564e39014bec198c3e20f5fae86c4dd44297711bd6a2257e579dc" test := testUtils.TestCase{ @@ -269,7 +269,7 @@ func TestACP_AddDPISchema_OwnerSpecifiedIncorrectlyOnWritePermissionExprOnDPI_Sc } func TestACP_AddDPISchema_OwnerSpecifiedIncorrectlyOnWritePermissionNoSpaceExprOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "2e9fc5805b0442e856e9893fea0f4759d333e442856a230ed741b88670e6426c" + policyIDOfInvalidDPI := "2d185da7b2d40981ce325d71d1d21dbae87690a461d7cb5c4ac753ad213607a3" test := testUtils.TestCase{ @@ -353,7 +353,7 @@ func TestACP_AddDPISchema_OwnerSpecifiedIncorrectlyOnWritePermissionNoSpaceExprO } func TestACP_AddDPISchema_MaliciousOwnerSpecifiedOnWritePermissionExprOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "3bcd650ac1e69d5efe6c930d05420231a0a69e6018d0f1015e0ecef9869d8dd5" + policyIDOfInvalidDPI := "2ad2dcd971d0e358a8d231970a3aa71b5cd94d38a85034c5cef10cd1c9fd5895" test := testUtils.TestCase{ diff --git a/tests/integration/acp/schema/add_dpi/reject_invalid_owner_write_perm_symbol_on_dpi_test.go b/tests/integration/acp/schema/add_dpi/reject_invalid_owner_write_perm_symbol_on_dpi_test.go index 96ff618123..07d719b920 100644 --- a/tests/integration/acp/schema/add_dpi/reject_invalid_owner_write_perm_symbol_on_dpi_test.go +++ b/tests/integration/acp/schema/add_dpi/reject_invalid_owner_write_perm_symbol_on_dpi_test.go @@ -18,7 +18,7 @@ import ( ) func TestACP_AddDPISchema_OwnerRelationWithDifferenceSetOpOnWritePermissionExprOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "2e14b379df6008ba577a11ac47d59c09eb0146afc5453e1ac0f40178ac3f5720" + policyIDOfInvalidDPI := "e3fa9d82173e212e4d13e4f96b521dba16644c5dd1b07518fe2e93391ca1c087" test := testUtils.TestCase{ @@ -103,7 +103,7 @@ func TestACP_AddDPISchema_OwnerRelationWithDifferenceSetOpOnWritePermissionExprO } func TestACP_AddDPISchema_OwnerRelationWithIntersectionSetOpOnWritePermissionExprOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "143546c4da209d67466690bf749899c37cd956f64c128ea7cca0662688f832ac" + policyIDOfInvalidDPI := "5c12fb0c1d7e108f7144f9e21c830478f80d5a4b0108b35fefa17ad83f4aac15" test := testUtils.TestCase{ @@ -188,7 +188,7 @@ func TestACP_AddDPISchema_OwnerRelationWithIntersectionSetOpOnWritePermissionExp } func TestACP_AddDPISchema_OwnerRelationWithInvalidSetOpOnWritePermissionExprOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "b9b4e941be904b0472ab6031628ce08ae4f87314e68972a6cfc114ed449820a4" + policyIDOfInvalidDPI := "3c341a310b63bb689cf501598409f2a07a4b1798057d5634b8c47fe1efd094c9" test := testUtils.TestCase{ diff --git a/tests/integration/acp/schema/add_dpi/reject_missing_dpi_test.go b/tests/integration/acp/schema/add_dpi/reject_missing_dpi_test.go index c59008edf5..4aa2acb7bc 100644 --- a/tests/integration/acp/schema/add_dpi/reject_missing_dpi_test.go +++ b/tests/integration/acp/schema/add_dpi/reject_missing_dpi_test.go @@ -18,7 +18,7 @@ import ( ) func TestACP_AddDPISchema_WhereNoPolicyWasAdded_SchemaRejected(t *testing.T) { - nonExistingPolicyID := "dfe202ffb4f0fe9b46157c313213a3839e08a6f0a7c3aba55e4724cb49ffde8a" + nonExistingPolicyID := "aa664afaf8dff947ba85f4d464662d595af6c1e2466bd11fd6b82ea95b547ea3" test := testUtils.TestCase{ @@ -68,8 +68,8 @@ func TestACP_AddDPISchema_WhereNoPolicyWasAdded_SchemaRejected(t *testing.T) { } func TestACP_AddDPISchema_WhereAPolicyWasAddedButLinkedPolicyWasNotAdded_SchemaRejected(t *testing.T) { - policyAdded := "4f13c5084c3d0e1e5c5db702fceef84c3b6ab948949ca8e27fcaad3fb8bc39f4" - incorrectPolicyID := "dfe202ffb4f0fe9b46157c313213a3839e08a6f0a7c3aba55e4724cb49ffde8a" + policyAdded := "d5b240c738dba7fe7d8ae55acf257d8e4010c9d8b78e0b1f0bd26741b1ec5663" + incorrectPolicyID := "aa664afaf8dff947ba85f4d464662d595af6c1e2466bd11fd6b82ea95b547ea3" test := testUtils.TestCase{ diff --git a/tests/integration/acp/schema/add_dpi/reject_missing_id_arg_on_schema_test.go b/tests/integration/acp/schema/add_dpi/reject_missing_id_arg_on_schema_test.go index 7a5942f2e6..9f3d90d203 100644 --- a/tests/integration/acp/schema/add_dpi/reject_missing_id_arg_on_schema_test.go +++ b/tests/integration/acp/schema/add_dpi/reject_missing_id_arg_on_schema_test.go @@ -17,7 +17,7 @@ import ( ) func TestACP_AddDPISchema_NoPolicyIDWasSpecifiedOnSchema_SchemaRejected(t *testing.T) { - policyIDOfValidDPI := "4f13c5084c3d0e1e5c5db702fceef84c3b6ab948949ca8e27fcaad3fb8bc39f4" + policyIDOfValidDPI := "d5b240c738dba7fe7d8ae55acf257d8e4010c9d8b78e0b1f0bd26741b1ec5663" test := testUtils.TestCase{ @@ -91,7 +91,7 @@ func TestACP_AddDPISchema_NoPolicyIDWasSpecifiedOnSchema_SchemaRejected(t *testi } func TestACP_AddDPISchema_SpecifiedPolicyIDArgIsEmptyOnSchema_SchemaRejected(t *testing.T) { - policyIDOfValidDPI := "4f13c5084c3d0e1e5c5db702fceef84c3b6ab948949ca8e27fcaad3fb8bc39f4" + policyIDOfValidDPI := "d5b240c738dba7fe7d8ae55acf257d8e4010c9d8b78e0b1f0bd26741b1ec5663" test := testUtils.TestCase{ diff --git a/tests/integration/acp/schema/add_dpi/reject_missing_perms_on_dpi_test.go b/tests/integration/acp/schema/add_dpi/reject_missing_perms_on_dpi_test.go index 16c6eb1024..22fb305b7b 100644 --- a/tests/integration/acp/schema/add_dpi/reject_missing_perms_on_dpi_test.go +++ b/tests/integration/acp/schema/add_dpi/reject_missing_perms_on_dpi_test.go @@ -18,7 +18,7 @@ import ( ) func TestACP_AddDPISchema_MissingRequiredReadPermissionOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "7eb7448daa631cfe33da3a149f5eea716026f54bf23ce1315c594259382c5c57" + policyIDOfInvalidDPI := "07da6260811df769d551e89e02364b3e939cb585696c1a69b626bb8ecdd378f9" test := testUtils.TestCase{ diff --git a/tests/integration/acp/schema/add_dpi/reject_missing_resource_arg_on_schema_test.go b/tests/integration/acp/schema/add_dpi/reject_missing_resource_arg_on_schema_test.go index 45635eae15..d8d67af51f 100644 --- a/tests/integration/acp/schema/add_dpi/reject_missing_resource_arg_on_schema_test.go +++ b/tests/integration/acp/schema/add_dpi/reject_missing_resource_arg_on_schema_test.go @@ -18,7 +18,7 @@ import ( ) func TestACP_AddDPISchema_NoResourceWasSpecifiedOnSchema_SchemaRejected(t *testing.T) { - policyIDOfValidDPI := "4f13c5084c3d0e1e5c5db702fceef84c3b6ab948949ca8e27fcaad3fb8bc39f4" + policyIDOfValidDPI := "d5b240c738dba7fe7d8ae55acf257d8e4010c9d8b78e0b1f0bd26741b1ec5663" test := testUtils.TestCase{ @@ -94,7 +94,7 @@ func TestACP_AddDPISchema_NoResourceWasSpecifiedOnSchema_SchemaRejected(t *testi } func TestACP_AddDPISchema_SpecifiedResourceArgIsEmptyOnSchema_SchemaRejected(t *testing.T) { - policyIDOfValidDPI := "4f13c5084c3d0e1e5c5db702fceef84c3b6ab948949ca8e27fcaad3fb8bc39f4" + policyIDOfValidDPI := "d5b240c738dba7fe7d8ae55acf257d8e4010c9d8b78e0b1f0bd26741b1ec5663" test := testUtils.TestCase{ diff --git a/tests/integration/acp/schema/add_dpi/reject_missing_resource_on_dpi_test.go b/tests/integration/acp/schema/add_dpi/reject_missing_resource_on_dpi_test.go index 3d50f1c2a7..8261c6759f 100644 --- a/tests/integration/acp/schema/add_dpi/reject_missing_resource_on_dpi_test.go +++ b/tests/integration/acp/schema/add_dpi/reject_missing_resource_on_dpi_test.go @@ -18,7 +18,7 @@ import ( ) func TestACP_AddDPISchema_SpecifiedResourceDoesNotExistOnDPI_SchemaRejected(t *testing.T) { - policyIDOfValidDPI := "4f13c5084c3d0e1e5c5db702fceef84c3b6ab948949ca8e27fcaad3fb8bc39f4" + policyIDOfValidDPI := "d5b240c738dba7fe7d8ae55acf257d8e4010c9d8b78e0b1f0bd26741b1ec5663" test := testUtils.TestCase{ diff --git a/tests/integration/acp/schema/add_dpi/reject_mixed_resources_on_partial_dpi_test.go b/tests/integration/acp/schema/add_dpi/reject_mixed_resources_on_partial_dpi_test.go index ba9e06a2b6..b2bcbbf6ae 100644 --- a/tests/integration/acp/schema/add_dpi/reject_mixed_resources_on_partial_dpi_test.go +++ b/tests/integration/acp/schema/add_dpi/reject_mixed_resources_on_partial_dpi_test.go @@ -18,7 +18,7 @@ import ( ) func TestACP_AddDPISchema_PartialValidDPIButUseInValidDPIResource_RejectSchema(t *testing.T) { - policyIDOfPartiallyValidDPI := "d5d411825b2d8fa5a550f1e34153b88b375ed9c9af19ce6d2ba1769e237a45d0" + policyIDOfPartiallyValidDPI := "bfda7dc76b4719a32ff2ef6691646501d14fb139518ff6c05d4be1825b9128ed" test := testUtils.TestCase{ diff --git a/tests/integration/collection_description/updates/remove/policy_test.go b/tests/integration/collection_description/updates/remove/policy_test.go index 1d4de5d399..b52b8112c4 100644 --- a/tests/integration/collection_description/updates/remove/policy_test.go +++ b/tests/integration/collection_description/updates/remove/policy_test.go @@ -52,13 +52,13 @@ func TestColDescrUpdateRemovePolicy_Errors(t *testing.T) { - actor `, - ExpectedPolicyID: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "53980e762616fcffbe76307995895e862f87ef3f21d509325d1dc772a770b001", + id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", resource: "users" ) { name: String diff --git a/tests/integration/test_case.go b/tests/integration/test_case.go index b2f3368339..bea260c773 100644 --- a/tests/integration/test_case.go +++ b/tests/integration/test_case.go @@ -16,6 +16,7 @@ import ( "github.com/lens-vm/lens/host-go/config/model" "github.com/sourcenetwork/immutable" + acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/net" "github.com/sourcenetwork/defradb/tests/gen" @@ -218,7 +219,7 @@ type CreateDoc struct { // // If an Identity is provided and the collection has a policy, then the // created document(s) will be owned by this Identity. - Identity string + Identity immutable.Option[acpIdentity.Identity] // The collection in which this document should be created. CollectionID int @@ -247,7 +248,7 @@ type DeleteDoc struct { // // If an Identity is provided and the collection has a policy, then // can also delete private document(s) that are owned by this Identity. - Identity string + Identity immutable.Option[acpIdentity.Identity] // The collection in which this document should be deleted. CollectionID int @@ -280,7 +281,7 @@ type UpdateDoc struct { // // If an Identity is provided and the collection has a policy, then // can also update private document(s) that are owned by this Identity. - Identity string + Identity immutable.Option[acpIdentity.Identity] // The collection in which this document exists. CollectionID int @@ -434,7 +435,7 @@ type Request struct { // // If an Identity is provided and the collection has a policy, then can // operate over private document(s) that are owned by this Identity. - Identity string + Identity immutable.Option[acpIdentity.Identity] // Used to identify the transaction for this to run against. Optional. TransactionID immutable.Option[int] diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index f5e6156707..708e14450b 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -27,7 +27,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/crypto" "github.com/sourcenetwork/defradb/datastore" @@ -851,7 +850,7 @@ func refreshDocuments( continue } - ctx := db.SetContextIdentity(s.ctx, acpIdentity.New(action.Identity)) + ctx := db.SetContextIdentity(s.ctx, action.Identity) // The document may have been mutated by other actions, so to be sure we have the latest // version without having to worry about the individual update mechanics we fetch it. doc, err = collection.Get(ctx, doc.ID(), false) @@ -1206,7 +1205,7 @@ func createDocViaColSave( txn := getTransaction(s, node, immutable.None[int](), action.ExpectedError) ctx := db.SetContextTxn(s.ctx, txn) - ctx = db.SetContextIdentity(ctx, acpIdentity.New(action.Identity)) + ctx = db.SetContextIdentity(ctx, action.Identity) return doc, collections[action.CollectionID].Save(ctx, doc) } @@ -1226,7 +1225,7 @@ func createDocViaColCreate( txn := getTransaction(s, node, immutable.None[int](), action.ExpectedError) ctx := db.SetContextTxn(s.ctx, txn) - ctx = db.SetContextIdentity(ctx, acpIdentity.New(action.Identity)) + ctx = db.SetContextIdentity(ctx, action.Identity) return doc, collections[action.CollectionID].Create(ctx, doc) } @@ -1255,7 +1254,7 @@ func createDocViaGQL( txn := getTransaction(s, node, immutable.None[int](), action.ExpectedError) ctx := db.SetContextTxn(s.ctx, txn) - ctx = db.SetContextIdentity(ctx, acpIdentity.New(action.Identity)) + ctx = db.SetContextIdentity(ctx, action.Identity) result := node.ExecRequest( ctx, @@ -1287,7 +1286,7 @@ func deleteDoc( action DeleteDoc, ) { doc := s.documents[action.CollectionID][action.DocID] - ctx := db.SetContextIdentity(s.ctx, acpIdentity.New(action.Identity)) + ctx := db.SetContextIdentity(s.ctx, action.Identity) var expectedErrorRaised bool actionNodes := getNodes(action.NodeID, s.nodes) @@ -1345,7 +1344,7 @@ func updateDocViaColSave( collections []client.Collection, ) error { cachedDoc := s.documents[action.CollectionID][action.DocID] - ctx := db.SetContextIdentity(s.ctx, acpIdentity.New(action.Identity)) + ctx := db.SetContextIdentity(s.ctx, action.Identity) doc, err := collections[action.CollectionID].Get(ctx, cachedDoc.ID(), true) if err != nil { @@ -1372,7 +1371,7 @@ func updateDocViaColUpdate( collections []client.Collection, ) error { cachedDoc := s.documents[action.CollectionID][action.DocID] - ctx := db.SetContextIdentity(s.ctx, acpIdentity.New(action.Identity)) + ctx := db.SetContextIdentity(s.ctx, action.Identity) doc, err := collections[action.CollectionID].Get(ctx, cachedDoc.ID(), true) if err != nil { @@ -1415,7 +1414,7 @@ func updateDocViaGQL( txn := getTransaction(s, node, immutable.None[int](), action.ExpectedError) ctx := db.SetContextTxn(s.ctx, txn) - ctx = db.SetContextIdentity(ctx, acpIdentity.New(action.Identity)) + ctx = db.SetContextIdentity(ctx, action.Identity) result := node.ExecRequest(ctx, request) if len(result.GQL.Errors) > 0 { @@ -1635,7 +1634,7 @@ func executeRequest( txn := getTransaction(s, node, action.TransactionID, action.ExpectedError) ctx := db.SetContextTxn(s.ctx, txn) - ctx = db.SetContextIdentity(ctx, acpIdentity.New(action.Identity)) + ctx = db.SetContextIdentity(ctx, action.Identity) result := node.ExecRequest(ctx, action.Request) From 2aeddc102be3874f1ad365b8ccc8a400320773d5 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 2 Jun 2024 01:46:54 -0400 Subject: [PATCH 24/78] bot: Update dependencies (bulk dependabot PRs) 01-06-2024 (#2660) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ✅ This PR was created by combining the following PRs: #2654 bot: Bump @types/react from 18.3.2 to 18.3.3 in /playground #2653 bot: Bump @vitejs/plugin-react-swc from 3.6.0 to 3.7.0 in /playground #2651 bot: Bump go.opentelemetry.io/otel/sdk/metric from 1.26.0 to 1.27.0 ⚠️ The following PR was resolved manually due to merge conflicts: #2650 bot: Bump go.opentelemetry.io/otel/metric from 1.26.0 to 1.27.0 --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Shahzad Lone Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- go.mod | 10 ++-- go.sum | 20 +++---- playground/package-lock.json | 112 +++++++++++++++++------------------ playground/package.json | 4 +- 4 files changed, 73 insertions(+), 73 deletions(-) diff --git a/go.mod b/go.mod index 6b166e3369..f3935f7ce9 100644 --- a/go.mod +++ b/go.mod @@ -50,8 +50,8 @@ require ( github.com/valyala/fastjson v1.6.4 github.com/vito/go-sse v1.0.0 github.com/zalando/go-keyring v0.2.4 - go.opentelemetry.io/otel/metric v1.26.0 - go.opentelemetry.io/otel/sdk/metric v1.26.0 + go.opentelemetry.io/otel/metric v1.27.0 + go.opentelemetry.io/otel/sdk/metric v1.27.0 go.uber.org/zap v1.27.0 golang.org/x/exp v0.0.0-20240404231335-c0f41cb1a7a0 golang.org/x/term v0.20.0 @@ -293,9 +293,9 @@ require ( github.com/zondax/ledger-go v0.14.3 // indirect go.etcd.io/bbolt v1.3.8 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel v1.26.0 // indirect - go.opentelemetry.io/otel/sdk v1.26.0 // indirect - go.opentelemetry.io/otel/trace v1.26.0 // indirect + go.opentelemetry.io/otel v1.27.0 // indirect + go.opentelemetry.io/otel/sdk v1.27.0 // indirect + go.opentelemetry.io/otel/trace v1.27.0 // indirect go.uber.org/dig v1.17.1 // indirect go.uber.org/fx v1.20.1 // indirect go.uber.org/mock v0.4.0 // indirect diff --git a/go.sum b/go.sum index 508d0fa72f..04c0313b04 100644 --- a/go.sum +++ b/go.sum @@ -1205,16 +1205,16 @@ go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs= -go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4= -go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30= -go.opentelemetry.io/otel/metric v1.26.0/go.mod h1:SY+rHOI4cEawI9a7N1A4nIg/nTQXe1ccCNWYOJUrpX4= -go.opentelemetry.io/otel/sdk v1.26.0 h1:Y7bumHf5tAiDlRYFmGqetNcLaVUZmh4iYfmGxtmz7F8= -go.opentelemetry.io/otel/sdk v1.26.0/go.mod h1:0p8MXpqLeJ0pzcszQQN4F0S5FVjBLgypeGSngLsmirs= -go.opentelemetry.io/otel/sdk/metric v1.26.0 h1:cWSks5tfriHPdWFnl+qpX3P681aAYqlZHcAyHw5aU9Y= -go.opentelemetry.io/otel/sdk/metric v1.26.0/go.mod h1:ClMFFknnThJCksebJwz7KIyEDHO+nTB6gK8obLy8RyE= -go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA= -go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0= +go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= +go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= +go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= +go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= +go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= +go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= +go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= +go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= +go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= diff --git a/playground/package-lock.json b/playground/package-lock.json index d8a5fcccac..b8d7b737cb 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -15,12 +15,12 @@ "swagger-ui-react": "^5.17.12" }, "devDependencies": { - "@types/react": "^18.3.2", + "@types/react": "^18.3.3", "@types/react-dom": "^18.3.0", "@types/swagger-ui-react": "^4.18.3", "@typescript-eslint/eslint-plugin": "^7.10.0", "@typescript-eslint/parser": "^7.10.0", - "@vitejs/plugin-react-swc": "^3.6.0", + "@vitejs/plugin-react-swc": "^3.7.0", "eslint": "^8.57.0", "eslint-plugin-react-hooks": "^4.6.2", "eslint-plugin-react-refresh": "^0.4.7", @@ -2110,14 +2110,14 @@ } }, "node_modules/@swc/core": { - "version": "1.4.17", - "resolved": "https://registry.npmjs.org/@swc/core/-/core-1.4.17.tgz", - "integrity": "sha512-tq+mdWvodMBNBBZbwFIMTVGYHe9N7zvEaycVVjfvAx20k1XozHbHhRv+9pEVFJjwRxLdXmtvFZd3QZHRAOpoNQ==", + "version": "1.5.7", + "resolved": "https://registry.npmjs.org/@swc/core/-/core-1.5.7.tgz", + "integrity": "sha512-U4qJRBefIJNJDRCCiVtkfa/hpiZ7w0R6kASea+/KLp+vkus3zcLSB8Ub8SvKgTIxjWpwsKcZlPf5nrv4ls46SQ==", "dev": true, "hasInstallScript": true, "dependencies": { "@swc/counter": "^0.1.2", - "@swc/types": "^0.1.5" + "@swc/types": "0.1.7" }, "engines": { "node": ">=10" @@ -2127,16 +2127,16 @@ "url": "https://opencollective.com/swc" }, "optionalDependencies": { - "@swc/core-darwin-arm64": "1.4.17", - "@swc/core-darwin-x64": "1.4.17", - "@swc/core-linux-arm-gnueabihf": "1.4.17", - "@swc/core-linux-arm64-gnu": "1.4.17", - "@swc/core-linux-arm64-musl": "1.4.17", - "@swc/core-linux-x64-gnu": "1.4.17", - "@swc/core-linux-x64-musl": "1.4.17", - "@swc/core-win32-arm64-msvc": "1.4.17", - "@swc/core-win32-ia32-msvc": "1.4.17", - "@swc/core-win32-x64-msvc": "1.4.17" + "@swc/core-darwin-arm64": "1.5.7", + "@swc/core-darwin-x64": "1.5.7", + "@swc/core-linux-arm-gnueabihf": "1.5.7", + "@swc/core-linux-arm64-gnu": "1.5.7", + "@swc/core-linux-arm64-musl": "1.5.7", + "@swc/core-linux-x64-gnu": "1.5.7", + "@swc/core-linux-x64-musl": "1.5.7", + "@swc/core-win32-arm64-msvc": "1.5.7", + "@swc/core-win32-ia32-msvc": "1.5.7", + "@swc/core-win32-x64-msvc": "1.5.7" }, "peerDependencies": { "@swc/helpers": "^0.5.0" @@ -2148,9 +2148,9 @@ } }, "node_modules/@swc/core-darwin-arm64": { - "version": "1.4.17", - "resolved": "https://registry.npmjs.org/@swc/core-darwin-arm64/-/core-darwin-arm64-1.4.17.tgz", - "integrity": "sha512-HVl+W4LezoqHBAYg2JCqR+s9ife9yPfgWSj37iIawLWzOmuuJ7jVdIB7Ee2B75bEisSEKyxRlTl6Y1Oq3owBgw==", + "version": "1.5.7", + "resolved": "https://registry.npmjs.org/@swc/core-darwin-arm64/-/core-darwin-arm64-1.5.7.tgz", + "integrity": "sha512-bZLVHPTpH3h6yhwVl395k0Mtx8v6CGhq5r4KQdAoPbADU974Mauz1b6ViHAJ74O0IVE5vyy7tD3OpkQxL/vMDQ==", "cpu": [ "arm64" ], @@ -2164,9 +2164,9 @@ } }, "node_modules/@swc/core-darwin-x64": { - "version": "1.4.17", - "resolved": "https://registry.npmjs.org/@swc/core-darwin-x64/-/core-darwin-x64-1.4.17.tgz", - "integrity": "sha512-WYRO9Fdzq4S/he8zjW5I95G1zcvyd9yyD3Tgi4/ic84P5XDlSMpBDpBLbr/dCPjmSg7aUXxNQqKqGkl6dQxYlA==", + "version": "1.5.7", + "resolved": "https://registry.npmjs.org/@swc/core-darwin-x64/-/core-darwin-x64-1.5.7.tgz", + "integrity": "sha512-RpUyu2GsviwTc2qVajPL0l8nf2vKj5wzO3WkLSHAHEJbiUZk83NJrZd1RVbEknIMO7+Uyjh54hEh8R26jSByaw==", "cpu": [ "x64" ], @@ -2180,9 +2180,9 @@ } }, "node_modules/@swc/core-linux-arm-gnueabihf": { - "version": "1.4.17", - "resolved": "https://registry.npmjs.org/@swc/core-linux-arm-gnueabihf/-/core-linux-arm-gnueabihf-1.4.17.tgz", - "integrity": "sha512-cgbvpWOvtMH0XFjvwppUCR+Y+nf6QPaGu6AQ5hqCP+5Lv2zO5PG0RfasC4zBIjF53xgwEaaWmGP5/361P30X8Q==", + "version": "1.5.7", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm-gnueabihf/-/core-linux-arm-gnueabihf-1.5.7.tgz", + "integrity": "sha512-cTZWTnCXLABOuvWiv6nQQM0hP6ZWEkzdgDvztgHI/+u/MvtzJBN5lBQ2lue/9sSFYLMqzqff5EHKlFtrJCA9dQ==", "cpu": [ "arm" ], @@ -2196,9 +2196,9 @@ } }, "node_modules/@swc/core-linux-arm64-gnu": { - "version": "1.4.17", - "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-gnu/-/core-linux-arm64-gnu-1.4.17.tgz", - "integrity": "sha512-l7zHgaIY24cF9dyQ/FOWbmZDsEj2a9gRFbmgx2u19e3FzOPuOnaopFj0fRYXXKCmtdx+anD750iBIYnTR+pq/Q==", + "version": "1.5.7", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-gnu/-/core-linux-arm64-gnu-1.5.7.tgz", + "integrity": "sha512-hoeTJFBiE/IJP30Be7djWF8Q5KVgkbDtjySmvYLg9P94bHg9TJPSQoC72tXx/oXOgXvElDe/GMybru0UxhKx4g==", "cpu": [ "arm64" ], @@ -2212,9 +2212,9 @@ } }, "node_modules/@swc/core-linux-arm64-musl": { - "version": "1.4.17", - "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-musl/-/core-linux-arm64-musl-1.4.17.tgz", - "integrity": "sha512-qhH4gr9gAlVk8MBtzXbzTP3BJyqbAfUOATGkyUtohh85fPXQYuzVlbExix3FZXTwFHNidGHY8C+ocscI7uDaYw==", + "version": "1.5.7", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-musl/-/core-linux-arm64-musl-1.5.7.tgz", + "integrity": "sha512-+NDhK+IFTiVK1/o7EXdCeF2hEzCiaRSrb9zD7X2Z7inwWlxAntcSuzZW7Y6BRqGQH89KA91qYgwbnjgTQ22PiQ==", "cpu": [ "arm64" ], @@ -2228,9 +2228,9 @@ } }, "node_modules/@swc/core-linux-x64-gnu": { - "version": "1.4.17", - "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.4.17.tgz", - "integrity": "sha512-vRDFATL1oN5oZMImkwbgSHEkp8xG1ofEASBypze01W1Tqto8t+yo6gsp69wzCZBlxldsvPpvFZW55Jq0Rn+UnA==", + "version": "1.5.7", + "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.5.7.tgz", + "integrity": "sha512-25GXpJmeFxKB+7pbY7YQLhWWjkYlR+kHz5I3j9WRl3Lp4v4UD67OGXwPe+DIcHqcouA1fhLhsgHJWtsaNOMBNg==", "cpu": [ "x64" ], @@ -2244,9 +2244,9 @@ } }, "node_modules/@swc/core-linux-x64-musl": { - "version": "1.4.17", - "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.4.17.tgz", - "integrity": "sha512-zQNPXAXn3nmPqv54JVEN8k2JMEcMTQ6veVuU0p5O+A7KscJq+AGle/7ZQXzpXSfUCXlLMX4wvd+rwfGhh3J4cw==", + "version": "1.5.7", + "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.5.7.tgz", + "integrity": "sha512-0VN9Y5EAPBESmSPPsCJzplZHV26akC0sIgd3Hc/7S/1GkSMoeuVL+V9vt+F/cCuzr4VidzSkqftdP3qEIsXSpg==", "cpu": [ "x64" ], @@ -2260,9 +2260,9 @@ } }, "node_modules/@swc/core-win32-arm64-msvc": { - "version": "1.4.17", - "resolved": "https://registry.npmjs.org/@swc/core-win32-arm64-msvc/-/core-win32-arm64-msvc-1.4.17.tgz", - "integrity": "sha512-z86n7EhOwyzxwm+DLE5NoLkxCTme2lq7QZlDjbQyfCxOt6isWz8rkW5QowTX8w9Rdmk34ncrjSLvnHOeLY17+w==", + "version": "1.5.7", + "resolved": "https://registry.npmjs.org/@swc/core-win32-arm64-msvc/-/core-win32-arm64-msvc-1.5.7.tgz", + "integrity": "sha512-RtoNnstBwy5VloNCvmvYNApkTmuCe4sNcoYWpmY7C1+bPR+6SOo8im1G6/FpNem8AR5fcZCmXHWQ+EUmRWJyuA==", "cpu": [ "arm64" ], @@ -2276,9 +2276,9 @@ } }, "node_modules/@swc/core-win32-ia32-msvc": { - "version": "1.4.17", - "resolved": "https://registry.npmjs.org/@swc/core-win32-ia32-msvc/-/core-win32-ia32-msvc-1.4.17.tgz", - "integrity": "sha512-JBwuSTJIgiJJX6wtr4wmXbfvOswHFj223AumUrK544QV69k60FJ9q2adPW9Csk+a8wm1hLxq4HKa2K334UHJ/g==", + "version": "1.5.7", + "resolved": "https://registry.npmjs.org/@swc/core-win32-ia32-msvc/-/core-win32-ia32-msvc-1.5.7.tgz", + "integrity": "sha512-Xm0TfvcmmspvQg1s4+USL3x8D+YPAfX2JHygvxAnCJ0EHun8cm2zvfNBcsTlnwYb0ybFWXXY129aq1wgFC9TpQ==", "cpu": [ "ia32" ], @@ -2292,9 +2292,9 @@ } }, "node_modules/@swc/core-win32-x64-msvc": { - "version": "1.4.17", - "resolved": "https://registry.npmjs.org/@swc/core-win32-x64-msvc/-/core-win32-x64-msvc-1.4.17.tgz", - "integrity": "sha512-jFkOnGQamtVDBm3MF5Kq1lgW8vx4Rm1UvJWRUfg+0gx7Uc3Jp3QMFeMNw/rDNQYRDYPG3yunCC+2463ycd5+dg==", + "version": "1.5.7", + "resolved": "https://registry.npmjs.org/@swc/core-win32-x64-msvc/-/core-win32-x64-msvc-1.5.7.tgz", + "integrity": "sha512-tp43WfJLCsKLQKBmjmY/0vv1slVywR5Q4qKjF5OIY8QijaEW7/8VwPyUyVoJZEnDgv9jKtUTG5PzqtIYPZGnyg==", "cpu": [ "x64" ], @@ -2314,9 +2314,9 @@ "dev": true }, "node_modules/@swc/types": { - "version": "0.1.6", - "resolved": "https://registry.npmjs.org/@swc/types/-/types-0.1.6.tgz", - "integrity": "sha512-/JLo/l2JsT/LRd80C3HfbmVpxOAJ11FO2RCEslFrgzLltoP9j8XIbsyDcfCt2WWyX+CM96rBoNM+IToAkFOugg==", + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/@swc/types/-/types-0.1.7.tgz", + "integrity": "sha512-scHWahbHF0eyj3JsxG9CFJgFdFNaVQCNAimBlT6PzS3n/HptxqREjsm4OH6AN3lYcffZYSPxXW8ua2BEHp0lJQ==", "dev": true, "dependencies": { "@swc/counter": "^0.1.3" @@ -2383,9 +2383,9 @@ } }, "node_modules/@types/react": { - "version": "18.3.2", - "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.2.tgz", - "integrity": "sha512-Btgg89dAnqD4vV7R3hlwOxgqobUQKgx3MmrQRi0yYbs/P0ym8XozIAlkqVilPqHQwXs4e9Tf63rrCgl58BcO4w==", + "version": "18.3.3", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.3.tgz", + "integrity": "sha512-hti/R0pS0q1/xx+TsI73XIqk26eBsISZ2R0wUijXIngRK9R/e7Xw/cXVxQK7R5JjW+SV4zGcn5hXjudkN/pLIw==", "devOptional": true, "dependencies": { "@types/prop-types": "*", @@ -2620,12 +2620,12 @@ "dev": true }, "node_modules/@vitejs/plugin-react-swc": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/@vitejs/plugin-react-swc/-/plugin-react-swc-3.6.0.tgz", - "integrity": "sha512-XFRbsGgpGxGzEV5i5+vRiro1bwcIaZDIdBRP16qwm+jP68ue/S8FJTBEgOeojtVDYrbSua3XFp71kC8VJE6v+g==", + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react-swc/-/plugin-react-swc-3.7.0.tgz", + "integrity": "sha512-yrknSb3Dci6svCd/qhHqhFPDSw0QtjumcqdKMoNNzmOl5lMXTTiqzjWtG4Qask2HdvvzaNgSunbQGet8/GrKdA==", "dev": true, "dependencies": { - "@swc/core": "^1.3.107" + "@swc/core": "^1.5.7" }, "peerDependencies": { "vite": "^4 || ^5" diff --git a/playground/package.json b/playground/package.json index 41ba60950f..9441420f74 100644 --- a/playground/package.json +++ b/playground/package.json @@ -17,12 +17,12 @@ "swagger-ui-react": "^5.17.12" }, "devDependencies": { - "@types/react": "^18.3.2", + "@types/react": "^18.3.3", "@types/react-dom": "^18.3.0", "@types/swagger-ui-react": "^4.18.3", "@typescript-eslint/eslint-plugin": "^7.10.0", "@typescript-eslint/parser": "^7.10.0", - "@vitejs/plugin-react-swc": "^3.6.0", + "@vitejs/plugin-react-swc": "^3.7.0", "eslint": "^8.57.0", "eslint-plugin-react-hooks": "^4.6.2", "eslint-plugin-react-refresh": "^0.4.7", From 97cc96cb8ce9bf5da66785733ae1999d73527309 Mon Sep 17 00:00:00 2001 From: Fred Carle Date: Mon, 3 Jun 2024 10:40:23 -0400 Subject: [PATCH 25/78] ci(i): Update codecov to v4 (#2669) ## Relevant issue(s) Resolves #2668 ## Description I noticed that someone on the codecov repo mentioned that v4 fixed their issue https://github.com/codecov/codecov-action/issues/598#issuecomment-2063762920. Trying it :) --- .github/workflows/test-and-upload-coverage.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-and-upload-coverage.yml b/.github/workflows/test-and-upload-coverage.yml index 0541b9a331..d613f32f98 100644 --- a/.github/workflows/test-and-upload-coverage.yml +++ b/.github/workflows/test-and-upload-coverage.yml @@ -135,7 +135,7 @@ jobs: path: coverage_reports - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: token: ${{ secrets.CODECOV_TOKEN }} name: defradb-codecov From 69be4a518f4319907a40dbe5200de3fa061ca4bb Mon Sep 17 00:00:00 2001 From: Fred Carle Date: Mon, 3 Jun 2024 12:08:35 -0400 Subject: [PATCH 26/78] test(i): Add txn conflict documentation (#2672) ## Relevant issue(s) Resolves #2671 ## Description We noticed that our in-memory store and badger behave a little differently on transaction conflicts. This PR simply documents this behaviour difference with added unit tests. The next step will be to decide if our in-memory store should be modified to behave the same way. --- datastore/txn_test.go | 268 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 268 insertions(+) diff --git a/datastore/txn_test.go b/datastore/txn_test.go index f5170146d6..95c2cf7ef0 100644 --- a/datastore/txn_test.go +++ b/datastore/txn_test.go @@ -19,6 +19,7 @@ import ( "github.com/stretchr/testify/require" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" + "github.com/sourcenetwork/defradb/datastore/memory" ) func TestNewTxnFrom(t *testing.T) { @@ -120,3 +121,270 @@ func TestShimTxnStoreClose(t *testing.T) { err = shimTxn.Close() require.NoError(t, err) } + +// This test documents https://github.com/sourcenetwork/defradb/issues/2673 +func TestMemoryStoreTxn_TwoTransactionsWithPutConflict_ShouldErrorWithConflict(t *testing.T) { + ctx := context.Background() + rootstore := memory.NewDatastore(ctx) + + txn1, err := rootstore.NewTransaction(ctx, false) + require.NoError(t, err) + + err = txn1.Put(ctx, ds.NewKey("key"), []byte("value")) + require.NoError(t, err) + + txn2, err := rootstore.NewTransaction(ctx, false) + require.NoError(t, err) + + err = txn2.Put(ctx, ds.NewKey("key"), []byte("value")) + require.NoError(t, err) + + // Commit txn2 first to create a conflict + err = txn2.Commit(ctx) + require.NoError(t, err) + + err = txn1.Commit(ctx) + require.ErrorIs(t, err, badger.ErrConflict) +} + +func TestMemoryStoreTxn_TwoTransactionsWithGetPutConflict_ShouldErrorWithConflict(t *testing.T) { + ctx := context.Background() + rootstore := memory.NewDatastore(ctx) + + rootstore.Put(ctx, ds.NewKey("key"), []byte("value")) + + txn1, err := rootstore.NewTransaction(ctx, false) + require.NoError(t, err) + + _, err = txn1.Get(ctx, ds.NewKey("key")) + require.NoError(t, err) + + err = txn1.Put(ctx, ds.NewKey("other-key"), []byte("value")) + require.NoError(t, err) + + txn2, err := rootstore.NewTransaction(ctx, false) + require.NoError(t, err) + + err = txn2.Put(ctx, ds.NewKey("key"), []byte("value")) + require.NoError(t, err) + + // Commit txn2 first to create a conflict + err = txn2.Commit(ctx) + require.NoError(t, err) + + err = txn1.Commit(ctx) + require.ErrorIs(t, err, badger.ErrConflict) +} + +func TestMemoryStoreTxn_TwoTransactionsWithHasPutConflict_ShouldErrorWithConflict(t *testing.T) { + ctx := context.Background() + rootstore := memory.NewDatastore(ctx) + + rootstore.Put(ctx, ds.NewKey("key"), []byte("value")) + + txn1, err := rootstore.NewTransaction(ctx, false) + require.NoError(t, err) + + _, err = txn1.Has(ctx, ds.NewKey("key")) + require.NoError(t, err) + + err = txn1.Put(ctx, ds.NewKey("other-key"), []byte("value")) + require.NoError(t, err) + + txn2, err := rootstore.NewTransaction(ctx, false) + require.NoError(t, err) + + err = txn2.Put(ctx, ds.NewKey("key"), []byte("value")) + require.NoError(t, err) + + // Commit txn2 first to create a conflict + err = txn2.Commit(ctx) + require.NoError(t, err) + + err = txn1.Commit(ctx) + require.ErrorIs(t, err, badger.ErrConflict) +} + +// This test documents https://github.com/sourcenetwork/defradb/issues/2673 +func TestBadgerMemoryStoreTxn_TwoTransactionsWithPutConflict_ShouldErrorWithConflict(t *testing.T) { + ctx := context.Background() + opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} + rootstore, err := badgerds.NewDatastore("", &opts) + require.NoError(t, err) + + txn1, err := rootstore.NewTransaction(ctx, false) + require.NoError(t, err) + + err = txn1.Put(ctx, ds.NewKey("key"), []byte("value")) + require.NoError(t, err) + + txn2, err := rootstore.NewTransaction(ctx, false) + require.NoError(t, err) + + err = txn2.Put(ctx, ds.NewKey("key"), []byte("value")) + require.NoError(t, err) + + // Commit txn2 first to create a conflict + err = txn2.Commit(ctx) + require.NoError(t, err) + + err = txn1.Commit(ctx) + // We are expecting this to fail because of the conflict but badger does not return an error. + // Conflicts in badger only occurs when the value of a key was changed between the time you read and you rewrite it. + // require.ErrorIs(t, err, badger.ErrConflict) + require.NoError(t, err) +} + +func TestBadgerMemoryStoreTxn_TwoTransactionsWithGetPutConflict_ShouldErrorWithConflict(t *testing.T) { + ctx := context.Background() + opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} + rootstore, err := badgerds.NewDatastore("", &opts) + require.NoError(t, err) + + rootstore.Put(ctx, ds.NewKey("key"), []byte("value")) + + txn1, err := rootstore.NewTransaction(ctx, false) + require.NoError(t, err) + + _, err = txn1.Get(ctx, ds.NewKey("key")) + require.NoError(t, err) + + err = txn1.Put(ctx, ds.NewKey("other-key"), []byte("value")) + require.NoError(t, err) + + txn2, err := rootstore.NewTransaction(ctx, false) + require.NoError(t, err) + + err = txn2.Put(ctx, ds.NewKey("key"), []byte("value")) + require.NoError(t, err) + + // Commit txn2 first to create a conflict + err = txn2.Commit(ctx) + require.NoError(t, err) + + err = txn1.Commit(ctx) + require.ErrorIs(t, err, badger.ErrConflict) +} + +func TestBadgerMemoryStoreTxn_TwoTransactionsWithHasPutConflict_ShouldErrorWithConflict(t *testing.T) { + ctx := context.Background() + opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} + rootstore, err := badgerds.NewDatastore("", &opts) + require.NoError(t, err) + + rootstore.Put(ctx, ds.NewKey("key"), []byte("value")) + + txn1, err := rootstore.NewTransaction(ctx, false) + require.NoError(t, err) + + _, err = txn1.Has(ctx, ds.NewKey("key")) + require.NoError(t, err) + + err = txn1.Put(ctx, ds.NewKey("other-key"), []byte("value")) + require.NoError(t, err) + + txn2, err := rootstore.NewTransaction(ctx, false) + require.NoError(t, err) + + err = txn2.Put(ctx, ds.NewKey("key"), []byte("value")) + require.NoError(t, err) + + // Commit txn2 first to create a conflict + err = txn2.Commit(ctx) + require.NoError(t, err) + + err = txn1.Commit(ctx) + require.ErrorIs(t, err, badger.ErrConflict) +} + +// This test documents https://github.com/sourcenetwork/defradb/issues/2673 +func TestBadgerFileStoreTxn_TwoTransactionsWithPutConflict_ShouldErrorWithConflict(t *testing.T) { + ctx := context.Background() + opts := badgerds.Options{Options: badger.DefaultOptions("")} + rootstore, err := badgerds.NewDatastore(t.TempDir(), &opts) + require.NoError(t, err) + + txn1, err := rootstore.NewTransaction(ctx, false) + require.NoError(t, err) + + err = txn1.Put(ctx, ds.NewKey("key"), []byte("value")) + require.NoError(t, err) + + txn2, err := rootstore.NewTransaction(ctx, false) + require.NoError(t, err) + + err = txn2.Put(ctx, ds.NewKey("key"), []byte("value")) + require.NoError(t, err) + + // Commit txn2 first to create a conflict + err = txn2.Commit(ctx) + require.NoError(t, err) + + err = txn1.Commit(ctx) + // We are expecting this to fail because of the conflict but badger does not return an error. + // Conflicts in badger only occurs when the value of a key was changed between the time you read and you rewrite it. + // require.ErrorIs(t, err, badger.ErrConflict) + require.NoError(t, err) +} + +func TestBadgerFileStoreTxn_TwoTransactionsWithGetPutConflict_ShouldErrorWithConflict(t *testing.T) { + ctx := context.Background() + opts := badgerds.Options{Options: badger.DefaultOptions("")} + rootstore, err := badgerds.NewDatastore(t.TempDir(), &opts) + require.NoError(t, err) + + rootstore.Put(ctx, ds.NewKey("key"), []byte("value")) + + txn1, err := rootstore.NewTransaction(ctx, false) + require.NoError(t, err) + + _, err = txn1.Get(ctx, ds.NewKey("key")) + require.NoError(t, err) + + err = txn1.Put(ctx, ds.NewKey("other-key"), []byte("value")) + require.NoError(t, err) + + txn2, err := rootstore.NewTransaction(ctx, false) + require.NoError(t, err) + + err = txn2.Put(ctx, ds.NewKey("key"), []byte("value")) + require.NoError(t, err) + + // Commit txn2 first to create a conflict + err = txn2.Commit(ctx) + require.NoError(t, err) + + err = txn1.Commit(ctx) + require.ErrorIs(t, err, badger.ErrConflict) +} + +func TestBadgerFileStoreTxn_TwoTransactionsWithHasPutConflict_ShouldErrorWithConflict(t *testing.T) { + ctx := context.Background() + opts := badgerds.Options{Options: badger.DefaultOptions("")} + rootstore, err := badgerds.NewDatastore(t.TempDir(), &opts) + require.NoError(t, err) + + rootstore.Put(ctx, ds.NewKey("key"), []byte("value")) + + txn1, err := rootstore.NewTransaction(ctx, false) + require.NoError(t, err) + + _, err = txn1.Has(ctx, ds.NewKey("key")) + require.NoError(t, err) + + err = txn1.Put(ctx, ds.NewKey("other-key"), []byte("value")) + require.NoError(t, err) + + txn2, err := rootstore.NewTransaction(ctx, false) + require.NoError(t, err) + + err = txn2.Put(ctx, ds.NewKey("key"), []byte("value")) + require.NoError(t, err) + + // Commit txn2 first to create a conflict + err = txn2.Commit(ctx) + require.NoError(t, err) + + err = txn1.Commit(ctx) + require.ErrorIs(t, err, badger.ErrConflict) +} From 4b3800e451a0f23cf05be9a56a5f012e503af574 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 3 Jun 2024 16:04:06 -0400 Subject: [PATCH 27/78] bot: Update dependencies (bulk dependabot PRs) 03-06-2024 (#2674) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ✅ This PR was created by combining the following PRs: #2667 bot: Bump swagger-ui-react from 5.17.12 to 5.17.14 in /playground #2666 bot: Bump vite from 5.2.11 to 5.2.12 in /playground #2665 bot: Bump graphiql from 3.2.2 to 3.2.3 in /playground #2664 bot: Bump github.com/spf13/viper from 1.18.2 to 1.19.0 #2663 bot: Bump @typescript-eslint/parser from 7.10.0 to 7.11.0 in /playground #2651 bot: Bump go.opentelemetry.io/otel/sdk/metric from 1.26.0 to 1.27.0 ⚠️ The following PRs were resolved manually due to merge conflicts: #2661 bot: Bump @typescript-eslint/eslint-plugin from 7.10.0 to 7.11.0 in /playground --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Shahzad Lone Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- go.mod | 4 +- go.sum | 8 +- playground/package-lock.json | 423 ++++++++++++++++++++++++++++------- playground/package.json | 10 +- 4 files changed, 350 insertions(+), 95 deletions(-) diff --git a/go.mod b/go.mod index f3935f7ce9..5e47eeadf2 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,7 @@ require ( github.com/sourcenetwork/sourcehub v0.2.1-0.20240305165631-9b75b1000724 github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.18.2 + github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.9.0 github.com/tidwall/btree v1.7.0 github.com/valyala/fastjson v1.6.4 @@ -245,7 +245,7 @@ require ( github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect - github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/perimeterx/marshmallow v1.1.5 // indirect github.com/petermattis/goid v0.0.0-20231207134359-e60b3f734c67 // indirect github.com/piprate/json-gold v0.5.0 // indirect diff --git a/go.sum b/go.sum index 04c0313b04..64f0b7b56b 100644 --- a/go.sum +++ b/go.sum @@ -924,8 +924,8 @@ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2D github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= -github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= @@ -1104,8 +1104,8 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= -github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= diff --git a/playground/package-lock.json b/playground/package-lock.json index b8d7b737cb..4a52be6ace 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -8,24 +8,24 @@ "name": "playground", "version": "0.0.0", "dependencies": { - "graphiql": "^3.2.2", + "graphiql": "^3.2.3", "graphql": "^16.8.1", "react": "^18.3.1", "react-dom": "^18.3.1", - "swagger-ui-react": "^5.17.12" + "swagger-ui-react": "^5.17.14" }, "devDependencies": { "@types/react": "^18.3.3", "@types/react-dom": "^18.3.0", "@types/swagger-ui-react": "^4.18.3", - "@typescript-eslint/eslint-plugin": "^7.10.0", - "@typescript-eslint/parser": "^7.10.0", + "@typescript-eslint/eslint-plugin": "^7.11.0", + "@typescript-eslint/parser": "^7.11.0", "@vitejs/plugin-react-swc": "^3.7.0", "eslint": "^8.57.0", "eslint-plugin-react-hooks": "^4.6.2", "eslint-plugin-react-refresh": "^0.4.7", "typescript": "^5.4.5", - "vite": "^5.2.11" + "vite": "^5.2.12" } }, "node_modules/@babel/runtime": { @@ -549,9 +549,9 @@ } }, "node_modules/@floating-ui/core": { - "version": "1.6.1", - "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.6.1.tgz", - "integrity": "sha512-42UH54oPZHPdRHdw6BgoBD6cg/eVTmVrFcgeRDM3jbO7uxSoipVcmcIGFcA5jmOHO5apcyvBhkSKES3fQJnu7A==", + "version": "1.6.2", + "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.6.2.tgz", + "integrity": "sha512-+2XpQV9LLZeanU4ZevzRnGFg2neDeKHgFLjP6YLW+tly0IvrhqT4u8enLGjLH3qeh85g19xY5rsAusfwTdn5lg==", "dependencies": { "@floating-ui/utils": "^0.2.0" } @@ -566,9 +566,9 @@ } }, "node_modules/@floating-ui/react-dom": { - "version": "2.0.9", - "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.0.9.tgz", - "integrity": "sha512-q0umO0+LQK4+p6aGyvzASqKbKOJcAHJ7ycE9CuUvfx3s9zTHWmGJTPOIlM/hmSBfUfg/XfY5YhLBLR/LHwShQQ==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.1.0.tgz", + "integrity": "sha512-lNzj5EQmEKn5FFKc04+zasr09h/uX8RtJRNj5gUXsSQIXHVWTVh+hVAg1vOMCexkX8EgvemMvIFpQfkosnVNyA==", "dependencies": { "@floating-ui/dom": "^1.0.0" }, @@ -583,9 +583,9 @@ "integrity": "sha512-J4yDIIthosAsRZ5CPYP/jQvUAQtlZTTD/4suA08/FEnlxqW3sKS9iAhgsa9VYLZ6vDHn/ixJgIqRQPotoBjxIw==" }, "node_modules/@graphiql/react": { - "version": "0.22.1", - "resolved": "https://registry.npmjs.org/@graphiql/react/-/react-0.22.1.tgz", - "integrity": "sha512-PBClhO2juCvVvmE5qD4PHivJLkhp0dqIX1zgId8Z83UCKpxO2M+bEspRL9aOQQaE4F4xqExCUk5B2AL+wc+agg==", + "version": "0.22.2", + "resolved": "https://registry.npmjs.org/@graphiql/react/-/react-0.22.2.tgz", + "integrity": "sha512-46UV7CBQdZ0iU537uOkOU6HOOs7P1o7vQpFSUezB4VRem0Y3I4TDaYQADCOo7gFlwBs5Vb9YOup8r7cmXGIr7A==", "dependencies": { "@graphiql/toolkit": "^0.9.1", "@headlessui/react": "^1.7.15", @@ -596,10 +596,10 @@ "@types/codemirror": "^5.60.8", "clsx": "^1.2.1", "codemirror": "^5.65.3", - "codemirror-graphql": "^2.0.11", + "codemirror-graphql": "^2.0.12", "copy-to-clipboard": "^3.2.0", "framer-motion": "^6.5.1", - "graphql-language-service": "^5.2.0", + "graphql-language-service": "^5.2.1", "markdown-it": "^14.1.0", "set-value": "^4.1.0" }, @@ -714,22 +714,22 @@ } }, "node_modules/@lezer/lr": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.0.tgz", - "integrity": "sha512-Wst46p51km8gH0ZUmeNrtpRYmdlRHUpN1DQd3GFAyKANi8WVz8c2jHYTf1CVScFaCjQw1iO3ZZdqGDxQPRErTg==", + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.1.tgz", + "integrity": "sha512-CHsKq8DMKBf9b3yXPDIU4DbH+ZJd/sJdYOW2llbW/HudP5u0VS6Bfq1hLYfgU7uAYGFIyGGQIsSOXGPEErZiJw==", "peer": true, "dependencies": { "@lezer/common": "^1.0.0" } }, "node_modules/@motionone/animation": { - "version": "10.17.0", - "resolved": "https://registry.npmjs.org/@motionone/animation/-/animation-10.17.0.tgz", - "integrity": "sha512-ANfIN9+iq1kGgsZxs+Nz96uiNcPLGTXwfNo2Xz/fcJXniPYpaz/Uyrfa+7I5BPLxCP82sh7quVDudf1GABqHbg==", + "version": "10.18.0", + "resolved": "https://registry.npmjs.org/@motionone/animation/-/animation-10.18.0.tgz", + "integrity": "sha512-9z2p5GFGCm0gBsZbi8rVMOAJCtw1WqBTIPw3ozk06gDvZInBPIsQcHgYogEJ4yuHJ+akuW8g1SEIOpTOvYs8hw==", "dependencies": { - "@motionone/easing": "^10.17.0", - "@motionone/types": "^10.17.0", - "@motionone/utils": "^10.17.0", + "@motionone/easing": "^10.18.0", + "@motionone/types": "^10.17.1", + "@motionone/utils": "^10.18.0", "tslib": "^2.3.1" } }, @@ -747,35 +747,35 @@ } }, "node_modules/@motionone/easing": { - "version": "10.17.0", - "resolved": "https://registry.npmjs.org/@motionone/easing/-/easing-10.17.0.tgz", - "integrity": "sha512-Bxe2wSuLu/qxqW4rBFS5m9tMLOw+QBh8v5A7Z5k4Ul4sTj5jAOfZG5R0bn5ywmk+Fs92Ij1feZ5pmC4TeXA8Tg==", + "version": "10.18.0", + "resolved": "https://registry.npmjs.org/@motionone/easing/-/easing-10.18.0.tgz", + "integrity": "sha512-VcjByo7XpdLS4o9T8t99JtgxkdMcNWD3yHU/n6CLEz3bkmKDRZyYQ/wmSf6daum8ZXqfUAgFeCZSpJZIMxaCzg==", "dependencies": { - "@motionone/utils": "^10.17.0", + "@motionone/utils": "^10.18.0", "tslib": "^2.3.1" } }, "node_modules/@motionone/generators": { - "version": "10.17.0", - "resolved": "https://registry.npmjs.org/@motionone/generators/-/generators-10.17.0.tgz", - "integrity": "sha512-T6Uo5bDHrZWhIfxG/2Aut7qyWQyJIWehk6OB4qNvr/jwA/SRmixwbd7SOrxZi1z5rH3LIeFFBKK1xHnSbGPZSQ==", + "version": "10.18.0", + "resolved": "https://registry.npmjs.org/@motionone/generators/-/generators-10.18.0.tgz", + "integrity": "sha512-+qfkC2DtkDj4tHPu+AFKVfR/C30O1vYdvsGYaR13W/1cczPrrcjdvYCj0VLFuRMN+lP1xvpNZHCRNM4fBzn1jg==", "dependencies": { - "@motionone/types": "^10.17.0", - "@motionone/utils": "^10.17.0", + "@motionone/types": "^10.17.1", + "@motionone/utils": "^10.18.0", "tslib": "^2.3.1" } }, "node_modules/@motionone/types": { - "version": "10.17.0", - "resolved": "https://registry.npmjs.org/@motionone/types/-/types-10.17.0.tgz", - "integrity": "sha512-EgeeqOZVdRUTEHq95Z3t8Rsirc7chN5xFAPMYFobx8TPubkEfRSm5xihmMUkbaR2ErKJTUw3347QDPTHIW12IA==" + "version": "10.17.1", + "resolved": "https://registry.npmjs.org/@motionone/types/-/types-10.17.1.tgz", + "integrity": "sha512-KaC4kgiODDz8hswCrS0btrVrzyU2CSQKO7Ps90ibBVSQmjkrt2teqta6/sOG59v7+dPnKMAg13jyqtMKV2yJ7A==" }, "node_modules/@motionone/utils": { - "version": "10.17.0", - "resolved": "https://registry.npmjs.org/@motionone/utils/-/utils-10.17.0.tgz", - "integrity": "sha512-bGwrki4896apMWIj9yp5rAS2m0xyhxblg6gTB/leWDPt+pb410W8lYWsxyurX+DH+gO1zsQsfx2su/c1/LtTpg==", + "version": "10.18.0", + "resolved": "https://registry.npmjs.org/@motionone/utils/-/utils-10.18.0.tgz", + "integrity": "sha512-3XVF7sgyTSI2KWvTf6uLlBJ5iAgRgmvp3bpuOiQJvInd4nZ19ET8lX5unn30SlmRH7hXbBbH+Gxd0m0klJ3Xtw==", "dependencies": { - "@motionone/types": "^10.17.0", + "@motionone/types": "^10.17.1", "hey-listen": "^1.0.8", "tslib": "^2.3.1" } @@ -2429,16 +2429,16 @@ "integrity": "sha512-EwmlvuaxPNej9+T4v5AuBPJa2x2UOJVdjCtDHgcDqitUeOtjnJKJ+apYjVcAoBEMjKW1VVFGZLUb5+qqa09XFA==" }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "7.10.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-7.10.0.tgz", - "integrity": "sha512-PzCr+a/KAef5ZawX7nbyNwBDtM1HdLIT53aSA2DDlxmxMngZ43O8SIePOeX8H5S+FHXeI6t97mTt/dDdzY4Fyw==", + "version": "7.11.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-7.11.0.tgz", + "integrity": "sha512-P+qEahbgeHW4JQ/87FuItjBj8O3MYv5gELDzr8QaQ7fsll1gSMTYb6j87MYyxwf3DtD7uGFB9ShwgmCJB5KmaQ==", "dev": true, "dependencies": { "@eslint-community/regexpp": "^4.10.0", - "@typescript-eslint/scope-manager": "7.10.0", - "@typescript-eslint/type-utils": "7.10.0", - "@typescript-eslint/utils": "7.10.0", - "@typescript-eslint/visitor-keys": "7.10.0", + "@typescript-eslint/scope-manager": "7.11.0", + "@typescript-eslint/type-utils": "7.11.0", + "@typescript-eslint/utils": "7.11.0", + "@typescript-eslint/visitor-keys": "7.11.0", "graphemer": "^1.4.0", "ignore": "^5.3.1", "natural-compare": "^1.4.0", @@ -2461,16 +2461,63 @@ } } }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/scope-manager": { + "version": "7.11.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.11.0.tgz", + "integrity": "sha512-27tGdVEiutD4POirLZX4YzT180vevUURJl4wJGmm6TrQoiYwuxTIY98PBp6L2oN+JQxzE0URvYlzJaBHIekXAw==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.11.0", + "@typescript-eslint/visitor-keys": "7.11.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/types": { + "version": "7.11.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.11.0.tgz", + "integrity": "sha512-MPEsDRZTyCiXkD4vd3zywDCifi7tatc4K37KqTprCvaXptP7Xlpdw0NR2hRJTetG5TxbWDB79Ys4kLmHliEo/w==", + "dev": true, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/visitor-keys": { + "version": "7.11.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.11.0.tgz", + "integrity": "sha512-7syYk4MzjxTEk0g/w3iqtgxnFQspDJfn6QKD36xMuuhTzjcxY7F8EmBLnALjVyaOF1/bVocu3bS/2/F7rXrveQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.11.0", + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, "node_modules/@typescript-eslint/parser": { - "version": "7.10.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-7.10.0.tgz", - "integrity": "sha512-2EjZMA0LUW5V5tGQiaa2Gys+nKdfrn2xiTIBLR4fxmPmVSvgPcKNW+AE/ln9k0A4zDUti0J/GZXMDupQoI+e1w==", + "version": "7.11.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-7.11.0.tgz", + "integrity": "sha512-yimw99teuaXVWsBcPO1Ais02kwJ1jmNA1KxE7ng0aT7ndr1pT1wqj0OJnsYVGKKlc4QJai86l/025L6z8CljOg==", "dev": true, "dependencies": { - "@typescript-eslint/scope-manager": "7.10.0", - "@typescript-eslint/types": "7.10.0", - "@typescript-eslint/typescript-estree": "7.10.0", - "@typescript-eslint/visitor-keys": "7.10.0", + "@typescript-eslint/scope-manager": "7.11.0", + "@typescript-eslint/types": "7.11.0", + "@typescript-eslint/typescript-estree": "7.11.0", + "@typescript-eslint/visitor-keys": "7.11.0", "debug": "^4.3.4" }, "engines": { @@ -2489,6 +2536,81 @@ } } }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/scope-manager": { + "version": "7.11.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.11.0.tgz", + "integrity": "sha512-27tGdVEiutD4POirLZX4YzT180vevUURJl4wJGmm6TrQoiYwuxTIY98PBp6L2oN+JQxzE0URvYlzJaBHIekXAw==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.11.0", + "@typescript-eslint/visitor-keys": "7.11.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/types": { + "version": "7.11.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.11.0.tgz", + "integrity": "sha512-MPEsDRZTyCiXkD4vd3zywDCifi7tatc4K37KqTprCvaXptP7Xlpdw0NR2hRJTetG5TxbWDB79Ys4kLmHliEo/w==", + "dev": true, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/typescript-estree": { + "version": "7.11.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.11.0.tgz", + "integrity": "sha512-cxkhZ2C/iyi3/6U9EPc5y+a6csqHItndvN/CzbNXTNrsC3/ASoYQZEt9uMaEp+xFNjasqQyszp5TumAVKKvJeQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.11.0", + "@typescript-eslint/visitor-keys": "7.11.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^1.3.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/visitor-keys": { + "version": "7.11.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.11.0.tgz", + "integrity": "sha512-7syYk4MzjxTEk0g/w3iqtgxnFQspDJfn6QKD36xMuuhTzjcxY7F8EmBLnALjVyaOF1/bVocu3bS/2/F7rXrveQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.11.0", + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, "node_modules/@typescript-eslint/scope-manager": { "version": "7.10.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.10.0.tgz", @@ -2507,13 +2629,13 @@ } }, "node_modules/@typescript-eslint/type-utils": { - "version": "7.10.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-7.10.0.tgz", - "integrity": "sha512-D7tS4WDkJWrVkuzgm90qYw9RdgBcrWmbbRkrLA4d7Pg3w0ttVGDsvYGV19SH8gPR5L7OtcN5J1hTtyenO9xE9g==", + "version": "7.11.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-7.11.0.tgz", + "integrity": "sha512-WmppUEgYy+y1NTseNMJ6mCFxt03/7jTOy08bcg7bxJJdsM4nuhnchyBbE8vryveaJUf62noH7LodPSo5Z0WUCg==", "dev": true, "dependencies": { - "@typescript-eslint/typescript-estree": "7.10.0", - "@typescript-eslint/utils": "7.10.0", + "@typescript-eslint/typescript-estree": "7.11.0", + "@typescript-eslint/utils": "7.11.0", "debug": "^4.3.4", "ts-api-utils": "^1.3.0" }, @@ -2533,6 +2655,64 @@ } } }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/types": { + "version": "7.11.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.11.0.tgz", + "integrity": "sha512-MPEsDRZTyCiXkD4vd3zywDCifi7tatc4K37KqTprCvaXptP7Xlpdw0NR2hRJTetG5TxbWDB79Ys4kLmHliEo/w==", + "dev": true, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/typescript-estree": { + "version": "7.11.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.11.0.tgz", + "integrity": "sha512-cxkhZ2C/iyi3/6U9EPc5y+a6csqHItndvN/CzbNXTNrsC3/ASoYQZEt9uMaEp+xFNjasqQyszp5TumAVKKvJeQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.11.0", + "@typescript-eslint/visitor-keys": "7.11.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^1.3.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/visitor-keys": { + "version": "7.11.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.11.0.tgz", + "integrity": "sha512-7syYk4MzjxTEk0g/w3iqtgxnFQspDJfn6QKD36xMuuhTzjcxY7F8EmBLnALjVyaOF1/bVocu3bS/2/F7rXrveQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.11.0", + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, "node_modules/@typescript-eslint/types": { "version": "7.10.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.10.0.tgz", @@ -2575,15 +2755,15 @@ } }, "node_modules/@typescript-eslint/utils": { - "version": "7.10.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-7.10.0.tgz", - "integrity": "sha512-olzif1Fuo8R8m/qKkzJqT7qwy16CzPRWBvERS0uvyc+DHd8AKbO4Jb7kpAvVzMmZm8TrHnI7hvjN4I05zow+tg==", + "version": "7.11.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-7.11.0.tgz", + "integrity": "sha512-xlAWwPleNRHwF37AhrZurOxA1wyXowW4PqVXZVUNCLjB48CqdPJoJWkrpH2nij9Q3Lb7rtWindtoXwxjxlKKCA==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.4.0", - "@typescript-eslint/scope-manager": "7.10.0", - "@typescript-eslint/types": "7.10.0", - "@typescript-eslint/typescript-estree": "7.10.0" + "@typescript-eslint/scope-manager": "7.11.0", + "@typescript-eslint/types": "7.11.0", + "@typescript-eslint/typescript-estree": "7.11.0" }, "engines": { "node": "^18.18.0 || >=20.0.0" @@ -2596,6 +2776,81 @@ "eslint": "^8.56.0" } }, + "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/scope-manager": { + "version": "7.11.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.11.0.tgz", + "integrity": "sha512-27tGdVEiutD4POirLZX4YzT180vevUURJl4wJGmm6TrQoiYwuxTIY98PBp6L2oN+JQxzE0URvYlzJaBHIekXAw==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.11.0", + "@typescript-eslint/visitor-keys": "7.11.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/types": { + "version": "7.11.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.11.0.tgz", + "integrity": "sha512-MPEsDRZTyCiXkD4vd3zywDCifi7tatc4K37KqTprCvaXptP7Xlpdw0NR2hRJTetG5TxbWDB79Ys4kLmHliEo/w==", + "dev": true, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/typescript-estree": { + "version": "7.11.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.11.0.tgz", + "integrity": "sha512-cxkhZ2C/iyi3/6U9EPc5y+a6csqHItndvN/CzbNXTNrsC3/ASoYQZEt9uMaEp+xFNjasqQyszp5TumAVKKvJeQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.11.0", + "@typescript-eslint/visitor-keys": "7.11.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^1.3.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/visitor-keys": { + "version": "7.11.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.11.0.tgz", + "integrity": "sha512-7syYk4MzjxTEk0g/w3iqtgxnFQspDJfn6QKD36xMuuhTzjcxY7F8EmBLnALjVyaOF1/bVocu3bS/2/F7rXrveQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.11.0", + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, "node_modules/@typescript-eslint/visitor-keys": { "version": "7.10.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.10.0.tgz", @@ -2924,12 +3179,12 @@ "integrity": "sha512-br21LjYmSlVL0vFCPWPfhzUCT34FM/pAdK7rRIZwa0rrtrIdotvP4Oh4GUHsu2E3IrQMCfRkL/fN3ytMNxVQvg==" }, "node_modules/codemirror-graphql": { - "version": "2.0.11", - "resolved": "https://registry.npmjs.org/codemirror-graphql/-/codemirror-graphql-2.0.11.tgz", - "integrity": "sha512-j1QDDXKVkpin2VsyS0ke2nAhKal6/N1UJtgnBGrPe3gj9ZSP6/K8Xytft94k0xW6giIU/JhZjvW0GwwERNzbFA==", + "version": "2.0.12", + "resolved": "https://registry.npmjs.org/codemirror-graphql/-/codemirror-graphql-2.0.12.tgz", + "integrity": "sha512-5UCqhWzck1jClCmRewFb8aSiabnAqiaRfsvIPfmbf6WJvOb8oiefJeHilclPPiZBzY8v/Et6EBMtOeKnWCoyng==", "dependencies": { "@types/codemirror": "^0.0.90", - "graphql-language-service": "5.2.0" + "graphql-language-service": "5.2.1" }, "peerDependencies": { "@codemirror/language": "6.0.0", @@ -3825,13 +4080,13 @@ "dev": true }, "node_modules/graphiql": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/graphiql/-/graphiql-3.2.2.tgz", - "integrity": "sha512-Tpv9gz9/xfOCJq2RTU/ByPgCFkh3ftN16xmcJxNms3j7C0eJ9z7xg6J0lASGGJ6mTeIW9myEI98SJBPL1c4vcA==", + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/graphiql/-/graphiql-3.2.3.tgz", + "integrity": "sha512-b5XuFyTWkORhQkUZULPOPmUXocg+x7HFB53cYEjV7LcH4taB4ViGwmXqHILhfPtv+JcTN80Aw8HELVWSa16iiA==", "dependencies": { - "@graphiql/react": "^0.22.1", + "@graphiql/react": "^0.22.2", "@graphiql/toolkit": "^0.9.1", - "graphql-language-service": "^5.2.0", + "graphql-language-service": "^5.2.1", "markdown-it": "^14.1.0" }, "peerDependencies": { @@ -3849,9 +4104,9 @@ } }, "node_modules/graphql-language-service": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/graphql-language-service/-/graphql-language-service-5.2.0.tgz", - "integrity": "sha512-o/ZgTS0pBxWm3hSF4+6GwiV1//DxzoLWEbS38+jqpzzy1d/QXBidwQuVYTOksclbtOJZ3KR/tZ8fi/tI6VpVMg==", + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/graphql-language-service/-/graphql-language-service-5.2.1.tgz", + "integrity": "sha512-8ewD6otGO43vg2TiEGjoLz3CweTwfaf4ZnqfNREqZXS2JSJGXtsRBOMMknCxMfFVh4x14ql3jyDrXcyAAtbmkQ==", "dependencies": { "nullthrows": "^1.0.0", "vscode-languageserver-types": "^3.17.1" @@ -5636,9 +5891,9 @@ } }, "node_modules/swagger-ui-react": { - "version": "5.17.12", - "resolved": "https://registry.npmjs.org/swagger-ui-react/-/swagger-ui-react-5.17.12.tgz", - "integrity": "sha512-qkDBOx9c3v1m8LyUgyc+Idz8UXLmz7RMDYX0Xlry0kwBQYxkw6AXfQ1bemgkna1sRQCvASmucdm2TYAdx6nlaA==", + "version": "5.17.14", + "resolved": "https://registry.npmjs.org/swagger-ui-react/-/swagger-ui-react-5.17.14.tgz", + "integrity": "sha512-mCXerZrbcn4ftPYifUF0+iKIRTHoVCv0HcJc/sXl9nCe3oeWdsjmOWVqKabzzAkAa0NwsbKNJFv2UL/Ivnf6VQ==", "dependencies": { "@babel/runtime-corejs3": "^7.24.5", "@braintree/sanitize-url": "=7.0.2", @@ -5939,9 +6194,9 @@ "optional": true }, "node_modules/vite": { - "version": "5.2.11", - "resolved": "https://registry.npmjs.org/vite/-/vite-5.2.11.tgz", - "integrity": "sha512-HndV31LWW05i1BLPMUCE1B9E9GFbOu1MbenhS58FuK6owSO5qHm7GiCotrNY1YE5rMeQSFBGmT5ZaLEjFizgiQ==", + "version": "5.2.12", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.2.12.tgz", + "integrity": "sha512-/gC8GxzxMK5ntBwb48pR32GGhENnjtY30G4A0jemunsBkiEZFw60s8InGpN8gkhHEkjnRK1aSAxeQgwvFhUHAA==", "dev": true, "dependencies": { "esbuild": "^0.20.1", diff --git a/playground/package.json b/playground/package.json index 9441420f74..68e88506c6 100644 --- a/playground/package.json +++ b/playground/package.json @@ -10,23 +10,23 @@ "preview": "vite preview" }, "dependencies": { - "graphiql": "^3.2.2", + "graphiql": "^3.2.3", "graphql": "^16.8.1", "react": "^18.3.1", "react-dom": "^18.3.1", - "swagger-ui-react": "^5.17.12" + "swagger-ui-react": "^5.17.14" }, "devDependencies": { "@types/react": "^18.3.3", "@types/react-dom": "^18.3.0", "@types/swagger-ui-react": "^4.18.3", - "@typescript-eslint/eslint-plugin": "^7.10.0", - "@typescript-eslint/parser": "^7.10.0", + "@typescript-eslint/eslint-plugin": "^7.11.0", + "@typescript-eslint/parser": "^7.11.0", "@vitejs/plugin-react-swc": "^3.7.0", "eslint": "^8.57.0", "eslint-plugin-react-hooks": "^4.6.2", "eslint-plugin-react-refresh": "^0.4.7", "typescript": "^5.4.5", - "vite": "^5.2.11" + "vite": "^5.2.12" } } From e7be26751095789afe9891bfc4d83ed6fb86f5c7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Jun 2024 17:58:36 -0400 Subject: [PATCH 28/78] bot: Bump @typescript-eslint/parser from 7.11.0 to 7.12.0 in /playground (#2676) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [@typescript-eslint/parser](https://github.com/typescript-eslint/typescript-eslint/tree/HEAD/packages/parser) from 7.11.0 to 7.12.0.
Release notes

Sourced from @​typescript-eslint/parser's releases.

v7.12.0

7.12.0 (2024-06-03)

🚀 Features

  • eslint-plugin: [no-useless-template-literals] rename to no-useless-template-expression (deprecate no-useless-template-literals) (#8821)
  • eslint-plugin: [no-floating-promises] add option 'allowForKnownSafePromises' (#9186)
  • rule-tester: check for parsing errors in suggestion fixes (#9052)
  • rule-tester: port checkDuplicateTestCases from ESLint (#9026)

🩹 Fixes

  • no-useless-template-expression -> no-unnecessary-template-expression (#9174)
  • eslint-plugin: [no-unnecessary-type-assertion] combine template literal check with const variable check (#8820)
  • eslint-plugin: [dot-notation] fix false positive when accessing private/protected property with optional chaining (#8851)
  • eslint-plugin: [explicit-member-accessibility] refine report locations (#8869)
  • eslint-plugin: [no-unnecessary-type-assertion] declares are always defined, so always check declares (#8901)
  • eslint-plugin: [prefer-literal-enum-member] allow using member it self on allowBitwiseExpressions (#9114)
  • eslint-plugin: [return-await] clean up in-try-catch detection and make autofixes safe (#9031)
  • eslint-plugin: [member-ordering] also TSMethodSignature can be get/set (#9193)
  • types: correct typing ParserOptions (#9202)

❤️ Thank You

You can read about our versioning strategy and releases on our website.

Changelog

Sourced from @​typescript-eslint/parser's changelog.

7.12.0 (2024-06-03)

🩹 Fixes

  • types: correct typing ParserOptions

❤️ Thank You

  • Abraham Guo
  • Han Yeong-woo
  • Joshua Chen
  • Kim Sang Du
  • Kirk Waiblinger
  • YeonJuan

You can read about our versioning strategy and releases on our website.

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=@typescript-eslint/parser&package-manager=npm_and_yarn&previous-version=7.11.0&new-version=7.12.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- playground/package-lock.json | 125 +++++++---------------------------- playground/package.json | 2 +- 2 files changed, 26 insertions(+), 101 deletions(-) diff --git a/playground/package-lock.json b/playground/package-lock.json index 4a52be6ace..4503e9c4f3 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -19,7 +19,7 @@ "@types/react-dom": "^18.3.0", "@types/swagger-ui-react": "^4.18.3", "@typescript-eslint/eslint-plugin": "^7.11.0", - "@typescript-eslint/parser": "^7.11.0", + "@typescript-eslint/parser": "^7.12.0", "@vitejs/plugin-react-swc": "^3.7.0", "eslint": "^8.57.0", "eslint-plugin-react-hooks": "^4.6.2", @@ -2509,15 +2509,15 @@ } }, "node_modules/@typescript-eslint/parser": { - "version": "7.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-7.11.0.tgz", - "integrity": "sha512-yimw99teuaXVWsBcPO1Ais02kwJ1jmNA1KxE7ng0aT7ndr1pT1wqj0OJnsYVGKKlc4QJai86l/025L6z8CljOg==", + "version": "7.12.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-7.12.0.tgz", + "integrity": "sha512-dm/J2UDY3oV3TKius2OUZIFHsomQmpHtsV0FTh1WO8EKgHLQ1QCADUqscPgTpU+ih1e21FQSRjXckHn3txn6kQ==", "dev": true, "dependencies": { - "@typescript-eslint/scope-manager": "7.11.0", - "@typescript-eslint/types": "7.11.0", - "@typescript-eslint/typescript-estree": "7.11.0", - "@typescript-eslint/visitor-keys": "7.11.0", + "@typescript-eslint/scope-manager": "7.12.0", + "@typescript-eslint/types": "7.12.0", + "@typescript-eslint/typescript-estree": "7.12.0", + "@typescript-eslint/visitor-keys": "7.12.0", "debug": "^4.3.4" }, "engines": { @@ -2536,89 +2536,14 @@ } } }, - "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/scope-manager": { - "version": "7.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.11.0.tgz", - "integrity": "sha512-27tGdVEiutD4POirLZX4YzT180vevUURJl4wJGmm6TrQoiYwuxTIY98PBp6L2oN+JQxzE0URvYlzJaBHIekXAw==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.11.0", - "@typescript-eslint/visitor-keys": "7.11.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/types": { - "version": "7.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.11.0.tgz", - "integrity": "sha512-MPEsDRZTyCiXkD4vd3zywDCifi7tatc4K37KqTprCvaXptP7Xlpdw0NR2hRJTetG5TxbWDB79Ys4kLmHliEo/w==", - "dev": true, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/typescript-estree": { - "version": "7.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.11.0.tgz", - "integrity": "sha512-cxkhZ2C/iyi3/6U9EPc5y+a6csqHItndvN/CzbNXTNrsC3/ASoYQZEt9uMaEp+xFNjasqQyszp5TumAVKKvJeQ==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.11.0", - "@typescript-eslint/visitor-keys": "7.11.0", - "debug": "^4.3.4", - "globby": "^11.1.0", - "is-glob": "^4.0.3", - "minimatch": "^9.0.4", - "semver": "^7.6.0", - "ts-api-utils": "^1.3.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/visitor-keys": { - "version": "7.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.11.0.tgz", - "integrity": "sha512-7syYk4MzjxTEk0g/w3iqtgxnFQspDJfn6QKD36xMuuhTzjcxY7F8EmBLnALjVyaOF1/bVocu3bS/2/F7rXrveQ==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.11.0", - "eslint-visitor-keys": "^3.4.3" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, "node_modules/@typescript-eslint/scope-manager": { - "version": "7.10.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.10.0.tgz", - "integrity": "sha512-7L01/K8W/VGl7noe2mgH0K7BE29Sq6KAbVmxurj8GGaPDZXPr8EEQ2seOeAS+mEV9DnzxBQB6ax6qQQ5C6P4xg==", + "version": "7.12.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.12.0.tgz", + "integrity": "sha512-itF1pTnN6F3unPak+kutH9raIkL3lhH1YRPGgt7QQOh43DQKVJXmWkpb+vpc/TiDHs6RSd9CTbDsc/Y+Ygq7kg==", "dev": true, "dependencies": { - "@typescript-eslint/types": "7.10.0", - "@typescript-eslint/visitor-keys": "7.10.0" + "@typescript-eslint/types": "7.12.0", + "@typescript-eslint/visitor-keys": "7.12.0" }, "engines": { "node": "^18.18.0 || >=20.0.0" @@ -2714,9 +2639,9 @@ } }, "node_modules/@typescript-eslint/types": { - "version": "7.10.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.10.0.tgz", - "integrity": "sha512-7fNj+Ya35aNyhuqrA1E/VayQX9Elwr8NKZ4WueClR3KwJ7Xx9jcCdOrLW04h51de/+gNbyFMs+IDxh5xIwfbNg==", + "version": "7.12.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.12.0.tgz", + "integrity": "sha512-o+0Te6eWp2ppKY3mLCU+YA9pVJxhUJE15FV7kxuD9jgwIAa+w/ycGJBMrYDTpVGUM/tgpa9SeMOugSabWFq7bg==", "dev": true, "engines": { "node": "^18.18.0 || >=20.0.0" @@ -2727,13 +2652,13 @@ } }, "node_modules/@typescript-eslint/typescript-estree": { - "version": "7.10.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.10.0.tgz", - "integrity": "sha512-LXFnQJjL9XIcxeVfqmNj60YhatpRLt6UhdlFwAkjNc6jSUlK8zQOl1oktAP8PlWFzPQC1jny/8Bai3/HPuvN5g==", + "version": "7.12.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.12.0.tgz", + "integrity": "sha512-5bwqLsWBULv1h6pn7cMW5dXX/Y2amRqLaKqsASVwbBHMZSnHqE/HN4vT4fE0aFsiwxYvr98kqOWh1a8ZKXalCQ==", "dev": true, "dependencies": { - "@typescript-eslint/types": "7.10.0", - "@typescript-eslint/visitor-keys": "7.10.0", + "@typescript-eslint/types": "7.12.0", + "@typescript-eslint/visitor-keys": "7.12.0", "debug": "^4.3.4", "globby": "^11.1.0", "is-glob": "^4.0.3", @@ -2852,12 +2777,12 @@ } }, "node_modules/@typescript-eslint/visitor-keys": { - "version": "7.10.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.10.0.tgz", - "integrity": "sha512-9ntIVgsi6gg6FIq9xjEO4VQJvwOqA3jaBFQJ/6TK5AvEup2+cECI6Fh7QiBxmfMHXU0V0J4RyPeOU1VDNzl9cg==", + "version": "7.12.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.12.0.tgz", + "integrity": "sha512-uZk7DevrQLL3vSnfFl5bj4sL75qC9D6EdjemIdbtkuUmIheWpuiiylSY01JxJE7+zGrOWDZrp1WxOuDntvKrHQ==", "dev": true, "dependencies": { - "@typescript-eslint/types": "7.10.0", + "@typescript-eslint/types": "7.12.0", "eslint-visitor-keys": "^3.4.3" }, "engines": { diff --git a/playground/package.json b/playground/package.json index 68e88506c6..99c1959e86 100644 --- a/playground/package.json +++ b/playground/package.json @@ -21,7 +21,7 @@ "@types/react-dom": "^18.3.0", "@types/swagger-ui-react": "^4.18.3", "@typescript-eslint/eslint-plugin": "^7.11.0", - "@typescript-eslint/parser": "^7.11.0", + "@typescript-eslint/parser": "^7.12.0", "@vitejs/plugin-react-swc": "^3.7.0", "eslint": "^8.57.0", "eslint-plugin-react-hooks": "^4.6.2", From cbb3f23ad118fc06e047d74515af488bb93b3276 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Jun 2024 21:09:52 -0400 Subject: [PATCH 29/78] bot: Bump @typescript-eslint/eslint-plugin from 7.11.0 to 7.12.0 in /playground (#2675) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [@typescript-eslint/eslint-plugin](https://github.com/typescript-eslint/typescript-eslint/tree/HEAD/packages/eslint-plugin) from 7.11.0 to 7.12.0.
Release notes

Sourced from @​typescript-eslint/eslint-plugin's releases.

v7.12.0

7.12.0 (2024-06-03)

🚀 Features

  • eslint-plugin: [no-useless-template-literals] rename to no-useless-template-expression (deprecate no-useless-template-literals) (#8821)
  • eslint-plugin: [no-floating-promises] add option 'allowForKnownSafePromises' (#9186)
  • rule-tester: check for parsing errors in suggestion fixes (#9052)
  • rule-tester: port checkDuplicateTestCases from ESLint (#9026)

🩹 Fixes

  • no-useless-template-expression -> no-unnecessary-template-expression (#9174)
  • eslint-plugin: [no-unnecessary-type-assertion] combine template literal check with const variable check (#8820)
  • eslint-plugin: [dot-notation] fix false positive when accessing private/protected property with optional chaining (#8851)
  • eslint-plugin: [explicit-member-accessibility] refine report locations (#8869)
  • eslint-plugin: [no-unnecessary-type-assertion] declares are always defined, so always check declares (#8901)
  • eslint-plugin: [prefer-literal-enum-member] allow using member it self on allowBitwiseExpressions (#9114)
  • eslint-plugin: [return-await] clean up in-try-catch detection and make autofixes safe (#9031)
  • eslint-plugin: [member-ordering] also TSMethodSignature can be get/set (#9193)
  • types: correct typing ParserOptions (#9202)

❤️ Thank You

You can read about our versioning strategy and releases on our website.

Changelog

Sourced from @​typescript-eslint/eslint-plugin's changelog.

7.12.0 (2024-06-03)

🚀 Features

  • eslint-plugin: [no-useless-template-literals] rename to no-useless-template-expression (deprecate no-useless-template-literals)

  • rule-tester: check for parsing errors in suggestion fixes

  • rule-tester: port checkDuplicateTestCases from ESLint

  • eslint-plugin: [no-floating-promises] add option 'allowForKnownSafePromises'

🩹 Fixes

  • no-useless-template-expression -> no-unnecessary-template-expression

  • eslint-plugin: [no-unnecessary-type-assertion] combine template literal check with const variable check

  • eslint-plugin: [dot-notation] fix false positive when accessing private/protected property with optional chaining

  • eslint-plugin: [explicit-member-accessibility] refine report locations

  • eslint-plugin: [no-unnecessary-type-assertion] declares are always defined, so always check declares

  • eslint-plugin: [prefer-literal-enum-member] allow using member it self on allowBitwiseExpressions

  • eslint-plugin: [return-await] clean up in-try-catch detection and make autofixes safe

  • eslint-plugin: [member-ordering] also TSMethodSignature can be get/set

❤️ Thank You

  • Abraham Guo
  • Han Yeong-woo
  • Joshua Chen
  • Kim Sang Du
  • Kirk Waiblinger
  • YeonJuan

You can read about our versioning strategy and releases on our website.

Commits
  • 7e93b28 chore(release): publish 7.12.0
  • d0adcf1 docs: clarify what require-await does (#9200)
  • 04990d5 feat(eslint-plugin): [no-floating-promises] add option 'allowForKnownSafeProm...
  • ad85249 docs: mention related ESLint rules in no-unused-vars page (#9198)
  • e80a8d6 docs: improve description for no-dynamic-delete (#9195)
  • 9f92b30 docs: explicitly mention unbound-method limitation with thisArg (#9197)
  • 08a9448 docs: add example with PascalCase function components (#9196)
  • 5ca7f6e feat(rule-tester): port checkDuplicateTestCases from ESLint (#9026)
  • a9dd526 fix(eslint-plugin): [member-ordering] also TSMethodSignature can be get/set (...
  • 2619c3b fix(eslint-plugin): [return-await] clean up in-try-catch detection and make a...
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=@typescript-eslint/eslint-plugin&package-manager=npm_and_yarn&previous-version=7.11.0&new-version=7.12.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- playground/package-lock.json | 218 +++-------------------------------- playground/package.json | 2 +- 2 files changed, 20 insertions(+), 200 deletions(-) diff --git a/playground/package-lock.json b/playground/package-lock.json index 4503e9c4f3..8e23698c2c 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -18,7 +18,7 @@ "@types/react": "^18.3.3", "@types/react-dom": "^18.3.0", "@types/swagger-ui-react": "^4.18.3", - "@typescript-eslint/eslint-plugin": "^7.11.0", + "@typescript-eslint/eslint-plugin": "^7.12.0", "@typescript-eslint/parser": "^7.12.0", "@vitejs/plugin-react-swc": "^3.7.0", "eslint": "^8.57.0", @@ -2429,16 +2429,16 @@ "integrity": "sha512-EwmlvuaxPNej9+T4v5AuBPJa2x2UOJVdjCtDHgcDqitUeOtjnJKJ+apYjVcAoBEMjKW1VVFGZLUb5+qqa09XFA==" }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "7.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-7.11.0.tgz", - "integrity": "sha512-P+qEahbgeHW4JQ/87FuItjBj8O3MYv5gELDzr8QaQ7fsll1gSMTYb6j87MYyxwf3DtD7uGFB9ShwgmCJB5KmaQ==", + "version": "7.12.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-7.12.0.tgz", + "integrity": "sha512-7F91fcbuDf/d3S8o21+r3ZncGIke/+eWk0EpO21LXhDfLahriZF9CGj4fbAetEjlaBdjdSm9a6VeXbpbT6Z40Q==", "dev": true, "dependencies": { "@eslint-community/regexpp": "^4.10.0", - "@typescript-eslint/scope-manager": "7.11.0", - "@typescript-eslint/type-utils": "7.11.0", - "@typescript-eslint/utils": "7.11.0", - "@typescript-eslint/visitor-keys": "7.11.0", + "@typescript-eslint/scope-manager": "7.12.0", + "@typescript-eslint/type-utils": "7.12.0", + "@typescript-eslint/utils": "7.12.0", + "@typescript-eslint/visitor-keys": "7.12.0", "graphemer": "^1.4.0", "ignore": "^5.3.1", "natural-compare": "^1.4.0", @@ -2461,53 +2461,6 @@ } } }, - "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/scope-manager": { - "version": "7.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.11.0.tgz", - "integrity": "sha512-27tGdVEiutD4POirLZX4YzT180vevUURJl4wJGmm6TrQoiYwuxTIY98PBp6L2oN+JQxzE0URvYlzJaBHIekXAw==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.11.0", - "@typescript-eslint/visitor-keys": "7.11.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/types": { - "version": "7.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.11.0.tgz", - "integrity": "sha512-MPEsDRZTyCiXkD4vd3zywDCifi7tatc4K37KqTprCvaXptP7Xlpdw0NR2hRJTetG5TxbWDB79Ys4kLmHliEo/w==", - "dev": true, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/visitor-keys": { - "version": "7.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.11.0.tgz", - "integrity": "sha512-7syYk4MzjxTEk0g/w3iqtgxnFQspDJfn6QKD36xMuuhTzjcxY7F8EmBLnALjVyaOF1/bVocu3bS/2/F7rXrveQ==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.11.0", - "eslint-visitor-keys": "^3.4.3" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, "node_modules/@typescript-eslint/parser": { "version": "7.12.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-7.12.0.tgz", @@ -2554,13 +2507,13 @@ } }, "node_modules/@typescript-eslint/type-utils": { - "version": "7.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-7.11.0.tgz", - "integrity": "sha512-WmppUEgYy+y1NTseNMJ6mCFxt03/7jTOy08bcg7bxJJdsM4nuhnchyBbE8vryveaJUf62noH7LodPSo5Z0WUCg==", + "version": "7.12.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-7.12.0.tgz", + "integrity": "sha512-lib96tyRtMhLxwauDWUp/uW3FMhLA6D0rJ8T7HmH7x23Gk1Gwwu8UZ94NMXBvOELn6flSPiBrCKlehkiXyaqwA==", "dev": true, "dependencies": { - "@typescript-eslint/typescript-estree": "7.11.0", - "@typescript-eslint/utils": "7.11.0", + "@typescript-eslint/typescript-estree": "7.12.0", + "@typescript-eslint/utils": "7.12.0", "debug": "^4.3.4", "ts-api-utils": "^1.3.0" }, @@ -2580,64 +2533,6 @@ } } }, - "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/types": { - "version": "7.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.11.0.tgz", - "integrity": "sha512-MPEsDRZTyCiXkD4vd3zywDCifi7tatc4K37KqTprCvaXptP7Xlpdw0NR2hRJTetG5TxbWDB79Ys4kLmHliEo/w==", - "dev": true, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/typescript-estree": { - "version": "7.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.11.0.tgz", - "integrity": "sha512-cxkhZ2C/iyi3/6U9EPc5y+a6csqHItndvN/CzbNXTNrsC3/ASoYQZEt9uMaEp+xFNjasqQyszp5TumAVKKvJeQ==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.11.0", - "@typescript-eslint/visitor-keys": "7.11.0", - "debug": "^4.3.4", - "globby": "^11.1.0", - "is-glob": "^4.0.3", - "minimatch": "^9.0.4", - "semver": "^7.6.0", - "ts-api-utils": "^1.3.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/visitor-keys": { - "version": "7.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.11.0.tgz", - "integrity": "sha512-7syYk4MzjxTEk0g/w3iqtgxnFQspDJfn6QKD36xMuuhTzjcxY7F8EmBLnALjVyaOF1/bVocu3bS/2/F7rXrveQ==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.11.0", - "eslint-visitor-keys": "^3.4.3" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, "node_modules/@typescript-eslint/types": { "version": "7.12.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.12.0.tgz", @@ -2680,15 +2575,15 @@ } }, "node_modules/@typescript-eslint/utils": { - "version": "7.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-7.11.0.tgz", - "integrity": "sha512-xlAWwPleNRHwF37AhrZurOxA1wyXowW4PqVXZVUNCLjB48CqdPJoJWkrpH2nij9Q3Lb7rtWindtoXwxjxlKKCA==", + "version": "7.12.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-7.12.0.tgz", + "integrity": "sha512-Y6hhwxwDx41HNpjuYswYp6gDbkiZ8Hin9Bf5aJQn1bpTs3afYY4GX+MPYxma8jtoIV2GRwTM/UJm/2uGCVv+DQ==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.4.0", - "@typescript-eslint/scope-manager": "7.11.0", - "@typescript-eslint/types": "7.11.0", - "@typescript-eslint/typescript-estree": "7.11.0" + "@typescript-eslint/scope-manager": "7.12.0", + "@typescript-eslint/types": "7.12.0", + "@typescript-eslint/typescript-estree": "7.12.0" }, "engines": { "node": "^18.18.0 || >=20.0.0" @@ -2701,81 +2596,6 @@ "eslint": "^8.56.0" } }, - "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/scope-manager": { - "version": "7.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.11.0.tgz", - "integrity": "sha512-27tGdVEiutD4POirLZX4YzT180vevUURJl4wJGmm6TrQoiYwuxTIY98PBp6L2oN+JQxzE0URvYlzJaBHIekXAw==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.11.0", - "@typescript-eslint/visitor-keys": "7.11.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/types": { - "version": "7.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.11.0.tgz", - "integrity": "sha512-MPEsDRZTyCiXkD4vd3zywDCifi7tatc4K37KqTprCvaXptP7Xlpdw0NR2hRJTetG5TxbWDB79Ys4kLmHliEo/w==", - "dev": true, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/typescript-estree": { - "version": "7.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.11.0.tgz", - "integrity": "sha512-cxkhZ2C/iyi3/6U9EPc5y+a6csqHItndvN/CzbNXTNrsC3/ASoYQZEt9uMaEp+xFNjasqQyszp5TumAVKKvJeQ==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.11.0", - "@typescript-eslint/visitor-keys": "7.11.0", - "debug": "^4.3.4", - "globby": "^11.1.0", - "is-glob": "^4.0.3", - "minimatch": "^9.0.4", - "semver": "^7.6.0", - "ts-api-utils": "^1.3.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/visitor-keys": { - "version": "7.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.11.0.tgz", - "integrity": "sha512-7syYk4MzjxTEk0g/w3iqtgxnFQspDJfn6QKD36xMuuhTzjcxY7F8EmBLnALjVyaOF1/bVocu3bS/2/F7rXrveQ==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.11.0", - "eslint-visitor-keys": "^3.4.3" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, "node_modules/@typescript-eslint/visitor-keys": { "version": "7.12.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.12.0.tgz", diff --git a/playground/package.json b/playground/package.json index 99c1959e86..ab4b99b625 100644 --- a/playground/package.json +++ b/playground/package.json @@ -20,7 +20,7 @@ "@types/react": "^18.3.3", "@types/react-dom": "^18.3.0", "@types/swagger-ui-react": "^4.18.3", - "@typescript-eslint/eslint-plugin": "^7.11.0", + "@typescript-eslint/eslint-plugin": "^7.12.0", "@typescript-eslint/parser": "^7.12.0", "@vitejs/plugin-react-swc": "^3.7.0", "eslint": "^8.57.0", From da3d057b3e1f3f84935542f2a215bd3d64372d8f Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Tue, 4 Jun 2024 11:37:15 -0400 Subject: [PATCH 30/78] test: Add relation substitute mechanic to tests (#2682) ## Relevant issue(s) Resolves #2681 ## Description Adds a relation substitute mechanic to tests, allowing us to not worry about doc ids in tests that don't need to care about the exact string. Only converts a handful of tests to the new system, I don't think we should actively spend large chunks of time migrating to the new system - we can just write new tests using this where appropriate, and passively convert existing when convenient. --- ...uery_with_compound_filter_relation_test.go | 64 +++++++++---------- .../one_to_many/with_alias_test.go | 37 ++++------- .../one_to_many/with_simple_test.go | 14 ++-- tests/integration/test_case.go | 34 ++++++++++ tests/integration/utils2.go | 45 ++++++++++++- 5 files changed, 124 insertions(+), 70 deletions(-) diff --git a/tests/integration/index/query_with_compound_filter_relation_test.go b/tests/integration/index/query_with_compound_filter_relation_test.go index ff503d6d38..31667d8bc7 100644 --- a/tests/integration/index/query_with_compound_filter_relation_test.go +++ b/tests/integration/index/query_with_compound_filter_relation_test.go @@ -144,17 +144,17 @@ func TestIndex_QueryWithIndexOnOneToManyRelationAndFilter_Data(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - Doc: `{ - "certificationBodyOrg": "bae-2b020aba-0681-5896-91d6-e3224938c32e", - "name": "DefraDB" - }`, + DocMap: map[string]any{ + "name": "DefraDB", + "certificationBodyOrg": testUtils.NewDocIndex(1, 0), + }, }, testUtils.CreateDoc{ CollectionID: 0, - Doc: `{ - "certificationBodyOrg": "bae-2b020aba-0681-5896-91d6-e3224938c32e", - "name": "LensVM" - }`, + DocMap: map[string]any{ + "name": "LensVM", + "certificationBodyOrg": testUtils.NewDocIndex(1, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, @@ -164,10 +164,10 @@ func TestIndex_QueryWithIndexOnOneToManyRelationAndFilter_Data(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - Doc: `{ - "certificationBodyOrg": "bae-5e7a0a2c-40a0-572c-93b6-79930cab3317", - "name": "Horizon" - }`, + DocMap: map[string]any{ + "name": "Horizon", + "certificationBodyOrg": testUtils.NewDocIndex(1, 1), + }, }, testUtils.CreateDoc{ CollectionID: 0, @@ -225,17 +225,17 @@ func TestIndex_QueryWithIndexOnOneToManyRelationOrFilter_Data(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - Doc: `{ - "certificationBodyOrg": "bae-2b020aba-0681-5896-91d6-e3224938c32e", - "name": "DefraDB" - }`, + DocMap: map[string]any{ + "name": "DefraDB", + "certificationBodyOrg": testUtils.NewDocIndex(1, 0), + }, }, testUtils.CreateDoc{ CollectionID: 0, - Doc: `{ - "certificationBodyOrg": "bae-2b020aba-0681-5896-91d6-e3224938c32e", - "name": "LensVM" - }`, + DocMap: map[string]any{ + "name": "LensVM", + "certificationBodyOrg": testUtils.NewDocIndex(1, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, @@ -245,10 +245,10 @@ func TestIndex_QueryWithIndexOnOneToManyRelationOrFilter_Data(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - Doc: `{ - "certificationBodyOrg": "bae-5e7a0a2c-40a0-572c-93b6-79930cab3317", - "name": "Horizon" - }`, + DocMap: map[string]any{ + "name": "Horizon", + "certificationBodyOrg": testUtils.NewDocIndex(1, 1), + }, }, testUtils.CreateDoc{ CollectionID: 0, @@ -310,10 +310,10 @@ func TestIndex_QueryWithIndexOnOneToManyRelationNotFilter_Data(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - Doc: `{ - "certificationBodyOrg": "bae-2b020aba-0681-5896-91d6-e3224938c32e", - "name": "DefraDB" - }`, + DocMap: map[string]any{ + "name": "DefraDB", + "certificationBodyOrg": testUtils.NewDocIndex(1, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, @@ -323,10 +323,10 @@ func TestIndex_QueryWithIndexOnOneToManyRelationNotFilter_Data(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - Doc: `{ - "certificationBodyOrg": "bae-5e7a0a2c-40a0-572c-93b6-79930cab3317", - "name": "Horizon" - }`, + DocMap: map[string]any{ + "name": "Horizon", + "certificationBodyOrg": testUtils.NewDocIndex(1, 1), + }, }, testUtils.CreateDoc{ CollectionID: 0, diff --git a/tests/integration/mutation/create/field_kinds/one_to_many/with_alias_test.go b/tests/integration/mutation/create/field_kinds/one_to_many/with_alias_test.go index 27ddcf0e68..43275f8404 100644 --- a/tests/integration/mutation/create/field_kinds/one_to_many/with_alias_test.go +++ b/tests/integration/mutation/create/field_kinds/one_to_many/with_alias_test.go @@ -11,7 +11,6 @@ package one_to_many import ( - "fmt" "testing" "github.com/sourcenetwork/immutable" @@ -122,8 +121,6 @@ func TestMutationCreateOneToMany_AliasedRelationNameInvalidIDManySide_CreatedDoc } func TestMutationCreateOneToMany_AliasedRelationNameToLinkFromManySide(t *testing.T) { - authorID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" - test := testUtils.TestCase{ Description: "One to many create mutation using relation id from many side, with alias.", Actions: []any{ @@ -135,13 +132,10 @@ func TestMutationCreateOneToMany_AliasedRelationNameToLinkFromManySide(t *testin }, testUtils.CreateDoc{ CollectionID: 0, - Doc: fmt.Sprintf( - `{ - "name": "Painted House", - "author": "%s" - }`, - authorID, - ), + DocMap: map[string]any{ + "name": "Painted House", + "author": testUtils.NewDocIndex(1, 0), + }, }, testUtils.Request{ Request: `query { @@ -189,7 +183,6 @@ func TestMutationCreateOneToMany_AliasedRelationNameToLinkFromManySide(t *testin func TestMutationUpdateOneToMany_AliasRelationNameAndInternalIDBothProduceSameDocID(t *testing.T) { // These IDs MUST be shared by both tests below. - authorID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" bookID := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" nonAliasedTest := testUtils.TestCase{ @@ -203,13 +196,10 @@ func TestMutationUpdateOneToMany_AliasRelationNameAndInternalIDBothProduceSameDo }, testUtils.CreateDoc{ CollectionID: 0, - Doc: fmt.Sprintf( - `{ - "name": "Painted House", - "author_id": "%s" - }`, - authorID, - ), + DocMap: map[string]any{ + "name": "Painted House", + "author": testUtils.NewDocIndex(1, 0), + }, }, testUtils.Request{ Request: `query { @@ -241,13 +231,10 @@ func TestMutationUpdateOneToMany_AliasRelationNameAndInternalIDBothProduceSameDo }, testUtils.CreateDoc{ CollectionID: 0, - Doc: fmt.Sprintf( - `{ - "name": "Painted House", - "author": "%s" - }`, - authorID, - ), + DocMap: map[string]any{ + "name": "Painted House", + "author": testUtils.NewDocIndex(1, 0), + }, }, testUtils.Request{ Request: `query { diff --git a/tests/integration/mutation/create/field_kinds/one_to_many/with_simple_test.go b/tests/integration/mutation/create/field_kinds/one_to_many/with_simple_test.go index 2a8b64d1b1..3d15e52323 100644 --- a/tests/integration/mutation/create/field_kinds/one_to_many/with_simple_test.go +++ b/tests/integration/mutation/create/field_kinds/one_to_many/with_simple_test.go @@ -11,7 +11,6 @@ package one_to_many import ( - "fmt" "testing" testUtils "github.com/sourcenetwork/defradb/tests/integration" @@ -95,8 +94,6 @@ func TestMutationCreateOneToMany_NonExistingRelationManySide_CreatedDoc(t *testi } func TestMutationCreateOneToMany_RelationIDToLinkFromManySide(t *testing.T) { - authorKey := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" - test := testUtils.TestCase{ Description: "One to many create mutation using relation id from many side", Actions: []any{ @@ -108,13 +105,10 @@ func TestMutationCreateOneToMany_RelationIDToLinkFromManySide(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - Doc: fmt.Sprintf( - `{ - "name": "Painted House", - "author_id": "%s" - }`, - authorKey, - ), + DocMap: map[string]any{ + "name": "Painted House", + "author_id": testUtils.NewDocIndex(1, 0), + }, }, testUtils.Request{ Request: `query { diff --git a/tests/integration/test_case.go b/tests/integration/test_case.go index bea260c773..9b30dd5e35 100644 --- a/tests/integration/test_case.go +++ b/tests/integration/test_case.go @@ -225,8 +225,15 @@ type CreateDoc struct { CollectionID int // The document to create, in JSON string format. + // + // If [DocMap] is provided this value will be ignored. Doc string + // The document to create, in map format. + // + // If this is provided [Doc] will be ignored. + DocMap map[string]any + // Any error expected from the action. Optional. // // String can be a partial, and the test will pass if an error is returned that @@ -234,6 +241,33 @@ type CreateDoc struct { ExpectedError string } +// DocIndex represents a relation field value, it allows relation fields to be set without worrying +// about the specific document id. +// +// The test harness will substitute this struct for the document at the given index before +// performing the host action. +// +// The targeted document must have been defined in an action prior to the action that this index +// is hosted upon. +type DocIndex struct { + // CollectionIndex is the index of the collection holding the document to target. + CollectionIndex int + + // Index is the index within the target collection at which the document exists. + // + // This is dependent on the order in which test [CreateDoc] actions were defined. + Index int +} + +// NewDocIndex creates a new [DocIndex] instance allowing relation fields to be set without worrying +// about the specific document id. +func NewDocIndex(collectionIndex int, index int) DocIndex { + return DocIndex{ + CollectionIndex: collectionIndex, + Index: index, + } +} + // DeleteDoc will attempt to delete the given document in the given collection // using the collection api. type DeleteDoc struct { diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index 708e14450b..00c47fcfc2 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -1152,6 +1152,10 @@ func createDoc( s *state, action CreateDoc, ) { + if action.DocMap != nil { + substituteRelations(s, action) + } + var mutation func(*state, CreateDoc, client.P2P, []client.Collection) (*client.Document, error) switch mutationType { @@ -1197,7 +1201,12 @@ func createDocViaColSave( collections []client.Collection, ) (*client.Document, error) { var err error - doc, err := client.NewDocFromJSON([]byte(action.Doc), collections[action.CollectionID].Definition()) + var doc *client.Document + if action.DocMap != nil { + doc, err = client.NewDocFromMap(action.DocMap, collections[action.CollectionID].Definition()) + } else { + doc, err = client.NewDocFromJSON([]byte(action.Doc), collections[action.CollectionID].Definition()) + } if err != nil { return nil, err } @@ -1217,7 +1226,12 @@ func createDocViaColCreate( collections []client.Collection, ) (*client.Document, error) { var err error - doc, err := client.NewDocFromJSON([]byte(action.Doc), collections[action.CollectionID].Definition()) + var doc *client.Document + if action.DocMap != nil { + doc, err = client.NewDocFromMap(action.DocMap, collections[action.CollectionID].Definition()) + } else { + doc, err = client.NewDocFromJSON([]byte(action.Doc), collections[action.CollectionID].Definition()) + } if err != nil { return nil, err } @@ -1237,8 +1251,14 @@ func createDocViaGQL( collections []client.Collection, ) (*client.Document, error) { collection := collections[action.CollectionID] + var err error + var input string - input, err := jsonToGQL(action.Doc) + if action.DocMap != nil { + input, err = valueToGQL(action.DocMap) + } else { + input, err = jsonToGQL(action.Doc) + } require.NoError(s.t, err) request := fmt.Sprintf( @@ -1279,6 +1299,25 @@ func createDocViaGQL( return doc, nil } +// substituteRelations scans the fields defined in [action.DocMap], if any are of type [DocIndex] +// it will substitute the [DocIndex] for the the corresponding document ID found in the state. +// +// If a document at that index is not found it will panic. +func substituteRelations( + s *state, + action CreateDoc, +) { + for k, v := range action.DocMap { + index, isIndex := v.(DocIndex) + if !isIndex { + continue + } + + doc := s.documents[index.CollectionIndex][index.Index] + action.DocMap[k] = doc.ID().String() + } +} + // deleteDoc deletes a document using the collection api and caches it in the // given documents slice. func deleteDoc( From 1eb1fb566eec3f290d28bb6ea35ac69e3497ec43 Mon Sep 17 00:00:00 2001 From: Islam Aliev Date: Tue, 4 Jun 2024 22:25:14 +0200 Subject: [PATCH 31/78] feat: Sec. indexes on relations (#2670) ## Relevant issue(s) Resolves #2601 #2578 #2577 ## Description Enables fetching related objects via secondary indexes. It also fixes a bug with queries that contain multiple aggregates on the same collection. --- client/normal_nil.go | 2 +- client/normal_value_test.go | 2 +- client/schema_field_description.go | 2 +- .../i2670-sec-index-on-relations.md | 3 + internal/db/collection_index.go | 24 +- internal/db/index.go | 2 +- internal/planner/explain.go | 16 +- internal/planner/mapper/mapper.go | 35 +- internal/planner/mapper/select.go | 5 + internal/planner/planner.go | 12 +- internal/planner/select.go | 15 + internal/planner/type_join.go | 661 +++++++++++------- .../explain/execute/with_count_test.go | 2 +- .../index/query_with_relation_filter_test.go | 301 +++++++- 14 files changed, 729 insertions(+), 353 deletions(-) create mode 100644 docs/data_format_changes/i2670-sec-index-on-relations.md diff --git a/client/normal_nil.go b/client/normal_nil.go index 7cd2df3f16..f78a0cc63e 100644 --- a/client/normal_nil.go +++ b/client/normal_nil.go @@ -30,7 +30,7 @@ func NewNormalNil(kind FieldKind) (NormalValue, error) { return NewNormalNillableFloat(immutable.None[float64]()), nil case FieldKind_NILLABLE_DATETIME: return NewNormalNillableTime(immutable.None[time.Time]()), nil - case FieldKind_NILLABLE_STRING, FieldKind_NILLABLE_JSON: + case FieldKind_NILLABLE_STRING, FieldKind_NILLABLE_JSON, FieldKind_DocID: return NewNormalNillableString(immutable.None[string]()), nil case FieldKind_NILLABLE_BLOB: return NewNormalNillableBytes(immutable.None[[]byte]()), nil diff --git a/client/normal_value_test.go b/client/normal_value_test.go index 33cd20c46e..73e9def5d6 100644 --- a/client/normal_value_test.go +++ b/client/normal_value_test.go @@ -1404,7 +1404,7 @@ func TestNormalValue_NewNormalNil(t *testing.T) { assert.True(t, v.IsNil()) } else { _, err := NewNormalNil(kind) - require.Error(t, err) + require.Error(t, err, "field kind: "+kind.String()) } } } diff --git a/client/schema_field_description.go b/client/schema_field_description.go index 87ee843ec8..cad233b67c 100644 --- a/client/schema_field_description.go +++ b/client/schema_field_description.go @@ -104,7 +104,7 @@ func (k ScalarKind) Underlying() string { } func (k ScalarKind) IsNillable() bool { - return k != FieldKind_DocID + return true } func (k ScalarKind) IsObject() bool { diff --git a/docs/data_format_changes/i2670-sec-index-on-relations.md b/docs/data_format_changes/i2670-sec-index-on-relations.md new file mode 100644 index 0000000000..4f56429166 --- /dev/null +++ b/docs/data_format_changes/i2670-sec-index-on-relations.md @@ -0,0 +1,3 @@ +# Enable secondary index on relations + +This naturally caused some explain metrics to change and change detector complain about it. \ No newline at end of file diff --git a/internal/db/collection_index.go b/internal/db/collection_index.go index 14f9a1b805..c2f02bf3bf 100644 --- a/internal/db/collection_index.go +++ b/internal/db/collection_index.go @@ -21,6 +21,7 @@ import ( "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/internal/core" "github.com/sourcenetwork/defradb/internal/db/base" @@ -264,7 +265,7 @@ func (c *collection) createIndex( return nil, err } - err = c.checkExistingFields(desc.Fields) + err = c.checkExistingFieldsAndAdjustRelFieldNames(desc.Fields) if err != nil { return nil, err } @@ -493,20 +494,19 @@ func (c *collection) GetIndexes(ctx context.Context) ([]client.IndexDescription, return c.Description().Indexes, nil } -func (c *collection) checkExistingFields( +// checkExistingFieldsAndAdjustRelFieldNames checks if the fields in the index description +// exist in the collection schema. +// If a field is a relation, it will be adjusted to relation id field name, a.k.a. `field_name + _id`. +func (c *collection) checkExistingFieldsAndAdjustRelFieldNames( fields []client.IndexedFieldDescription, ) error { - collectionFields := c.Schema().Fields - for _, field := range fields { - found := false - for _, colField := range collectionFields { - if field.Name == colField.Name { - found = true - break - } - } + for i := range fields { + field, found := c.Schema().GetFieldByName(fields[i].Name) if !found { - return NewErrNonExistingFieldForIndex(field.Name) + return NewErrNonExistingFieldForIndex(fields[i].Name) + } + if field.Kind.IsObject() { + fields[i].Name = fields[i].Name + request.RelatedObjectID } } return nil diff --git a/internal/db/index.go b/internal/db/index.go index 71569e64db..bd11e9f94b 100644 --- a/internal/db/index.go +++ b/internal/db/index.go @@ -41,7 +41,7 @@ func getValidateIndexFieldFunc(kind client.FieldKind) func(any) bool { } switch kind { - case client.FieldKind_NILLABLE_STRING: + case client.FieldKind_NILLABLE_STRING, client.FieldKind_DocID: return canConvertIndexFieldValue[string] case client.FieldKind_NILLABLE_INT: return canConvertIndexFieldValue[int64] diff --git a/internal/planner/explain.go b/internal/planner/explain.go index 5ab2f292f8..f6d3f57209 100644 --- a/internal/planner/explain.go +++ b/internal/planner/explain.go @@ -92,8 +92,8 @@ func buildDebugExplainGraph(source planNode) (map[string]any, error) { var explainGraphBuilder = map[string]any{} // If root is not the last child then keep walking and explaining the root graph. - if node.root != nil { - indexJoinRootExplainGraph, err := buildDebugExplainGraph(node.root) + if node.parentSide.plan != nil { + indexJoinRootExplainGraph, err := buildDebugExplainGraph(node.parentSide.plan) if err != nil { return nil, err } @@ -101,8 +101,8 @@ func buildDebugExplainGraph(source planNode) (map[string]any, error) { explainGraphBuilder[joinRootLabel] = indexJoinRootExplainGraph } - if node.subType != nil { - indexJoinSubTypeExplainGraph, err := buildDebugExplainGraph(node.subType) + if node.childSide.plan != nil { + indexJoinSubTypeExplainGraph, err := buildDebugExplainGraph(node.childSide.plan) if err != nil { return nil, err } @@ -117,8 +117,8 @@ func buildDebugExplainGraph(source planNode) (map[string]any, error) { var explainGraphBuilder = map[string]any{} // If root is not the last child then keep walking and explaining the root graph. - if node.root != nil { - indexJoinRootExplainGraph, err := buildDebugExplainGraph(node.root) + if node.parentSide.plan != nil { + indexJoinRootExplainGraph, err := buildDebugExplainGraph(node.parentSide.plan) if err != nil { return nil, err } @@ -128,8 +128,8 @@ func buildDebugExplainGraph(source planNode) (map[string]any, error) { explainGraphBuilder[joinRootLabel] = nil } - if node.subType != nil { - indexJoinSubTypeExplainGraph, err := buildDebugExplainGraph(node.subType) + if node.childSide.plan != nil { + indexJoinSubTypeExplainGraph, err := buildDebugExplainGraph(node.childSide.plan) if err != nil { return nil, err } diff --git a/internal/planner/mapper/mapper.go b/internal/planner/mapper/mapper.go index 07ec0db8e6..be52066b54 100644 --- a/internal/planner/mapper/mapper.go +++ b/internal/planner/mapper/mapper.go @@ -413,25 +413,23 @@ func resolveAggregates( childMapping = childMapping.CloneWithoutRender() mapping.SetChildAt(index, childMapping) - if !childIsMapped { - filterDependencies, err := resolveFilterDependencies( - ctx, - store, - rootSelectType, - childCollectionName, - target.filter, - mapping.ChildMappings[index], - childFields, - ) - if err != nil { - return nil, err - } - childFields = append(childFields, filterDependencies...) - - // If the child was not mapped, the filter will not have been converted yet - // so we must do that now. - convertedFilter = ToFilter(target.filter.Value(), mapping.ChildMappings[index]) + filterDependencies, err := resolveFilterDependencies( + ctx, + store, + rootSelectType, + childCollectionName, + target.filter, + mapping.ChildMappings[index], + childFields, + ) + if err != nil { + return nil, err } + childFields = append(childFields, filterDependencies...) + + // If the child was not mapped, the filter will not have been converted yet + // so we must do that now. + convertedFilter = ToFilter(target.filter.Value(), mapping.ChildMappings[index]) dummyJoin := &Select{ Targetable: Targetable{ @@ -989,6 +987,7 @@ func resolveInnerFilterDependencies( return nil, err } + childSelect.SkipResolve = true newFields = append(newFields, childSelect) } diff --git a/internal/planner/mapper/select.go b/internal/planner/mapper/select.go index 8b67b60937..5d9e8d39f0 100644 --- a/internal/planner/mapper/select.go +++ b/internal/planner/mapper/select.go @@ -38,6 +38,11 @@ type Select struct { // These can include stuff such as version information, aggregates, and other // Selects. Fields []Requestable + + // SkipResolve is a flag that indicates that the fields in this Select don't need to be resolved, + // i.e. it's value doesn't need to be fetched and provided to the user. + // It is used to avoid resolving related objects if they are used only in a filter and not requested in a response. + SkipResolve bool } func (s *Select) AsTargetable() (*Targetable, bool) { diff --git a/internal/planner/planner.go b/internal/planner/planner.go index 13e1e0b2e9..f7a875af70 100644 --- a/internal/planner/planner.go +++ b/internal/planner/planner.go @@ -357,18 +357,20 @@ func (p *Planner) tryOptimizeJoinDirection(node *invertibleTypeJoin, parentPlan parentPlan.selectNode.filter.Conditions, node.documentMapping, ) - slct := node.subType.(*selectTopNode).selectNode + slct := node.childSide.plan.(*selectTopNode).selectNode desc := slct.collection.Description() for subFieldName, subFieldInd := range filteredSubFields { indexes := desc.GetIndexesOnField(subFieldName) if len(indexes) > 0 && !filter.IsComplex(parentPlan.selectNode.filter) { - subInd := node.documentMapping.FirstIndexOfName(node.subTypeName) - relatedField := mapper.Field{Name: node.subTypeName, Index: subInd} + subInd := node.documentMapping.FirstIndexOfName(node.parentSide.relFieldDef.Name) + relatedField := mapper.Field{Name: node.parentSide.relFieldDef.Name, Index: subInd} fieldFilter := filter.UnwrapRelation(filter.CopyField( parentPlan.selectNode.filter, relatedField, mapper.Field{Name: subFieldName, Index: subFieldInd}, ), relatedField) + // At the moment we just take the first index, but later we want to run some kind of analysis to + // determine which index is best to use. https://github.com/sourcenetwork/defradb/issues/2680 err := node.invertJoinDirectionWithIndex(fieldFilter, indexes[0]) if err != nil { return err @@ -383,7 +385,7 @@ func (p *Planner) tryOptimizeJoinDirection(node *invertibleTypeJoin, parentPlan // expandTypeJoin does a plan graph expansion and other optimizations on invertibleTypeJoin. func (p *Planner) expandTypeJoin(node *invertibleTypeJoin, parentPlan *selectTopNode) error { if parentPlan.selectNode.filter == nil { - return p.expandPlan(node.subType, parentPlan) + return p.expandPlan(node.childSide.plan, parentPlan) } err := p.tryOptimizeJoinDirection(node, parentPlan) @@ -391,7 +393,7 @@ func (p *Planner) expandTypeJoin(node *invertibleTypeJoin, parentPlan *selectTop return err } - return p.expandPlan(node.subType, parentPlan) + return p.expandPlan(node.childSide.plan, parentPlan) } func (p *Planner) expandGroupNodePlan(topNodeSelect *selectTopNode) error { diff --git a/internal/planner/select.go b/internal/planner/select.go index c3e7f4dd2d..5d7e448c73 100644 --- a/internal/planner/select.go +++ b/internal/planner/select.go @@ -315,6 +315,21 @@ func findIndexByFilteringField(scanNode *scanNode) immutable.Option[client.Index return immutable.None[client.IndexDescription]() } +func findIndexByFieldName(col client.Collection, fieldName string) immutable.Option[client.IndexDescription] { + for _, field := range col.Schema().Fields { + if field.Name != fieldName { + continue + } + indexes := col.Description().GetIndexesOnField(field.Name) + if len(indexes) > 0 { + // At the moment we just take the first index, but later we want to run some kind of analysis to + // determine which index is best to use. https://github.com/sourcenetwork/defradb/issues/2680 + return immutable.Some(indexes[0]) + } + } + return immutable.None[client.IndexDescription]() +} + func (n *selectNode) initFields(selectReq *mapper.Select) ([]aggregateNode, error) { aggregates := []aggregateNode{} // loop over the sub type diff --git a/internal/planner/type_join.go b/internal/planner/type_join.go index f745e3c5cf..cd20ad3c8d 100644 --- a/internal/planner/type_join.go +++ b/internal/planner/type_join.go @@ -147,45 +147,41 @@ func (n *typeIndexJoin) simpleExplain() (map[string]any, error) { // Add the type attribute. simpleExplainMap[joinTypeLabel] = n.joinPlan.Kind() - switch joinType := n.joinPlan.(type) { - case *typeJoinOne: - // Add the direction attribute. - if joinType.isSecondary { - simpleExplainMap[joinDirectionLabel] = joinDirectionSecondaryLabel - } else { - simpleExplainMap[joinDirectionLabel] = joinDirectionPrimaryLabel - } - + addExplainData := func(j *invertibleTypeJoin) error { // Add the attribute(s). - simpleExplainMap[joinRootLabel] = joinType.rootName - simpleExplainMap[joinSubTypeNameLabel] = joinType.subTypeName + simpleExplainMap[joinRootLabel] = immutable.Some(j.childSide.relFieldDef.Name) + simpleExplainMap[joinSubTypeNameLabel] = j.parentSide.relFieldDef.Name - subTypeExplainGraph, err := buildSimpleExplainGraph(joinType.subType) + subTypeExplainGraph, err := buildSimpleExplainGraph(j.childSide.plan) if err != nil { - return nil, err + return err } // Add the joined (subType) type's entire explain graph. simpleExplainMap[joinSubTypeLabel] = subTypeExplainGraph + return nil + } - case *typeJoinMany: - // Add the attribute(s). - simpleExplainMap[joinRootLabel] = joinType.rootName - simpleExplainMap[joinSubTypeNameLabel] = joinType.subTypeName - - subTypeExplainGraph, err := buildSimpleExplainGraph(joinType.subType) - if err != nil { - return nil, err + var err error + switch joinType := n.joinPlan.(type) { + case *typeJoinOne: + // Add the direction attribute. + if joinType.parentSide.isPrimary() { + simpleExplainMap[joinDirectionLabel] = joinDirectionPrimaryLabel + } else { + simpleExplainMap[joinDirectionLabel] = joinDirectionSecondaryLabel } - // Add the joined (subType) type's entire explain graph. - simpleExplainMap[joinSubTypeLabel] = subTypeExplainGraph + err = addExplainData(&joinType.invertibleTypeJoin) + + case *typeJoinMany: + err = addExplainData(&joinType.invertibleTypeJoin) default: - return simpleExplainMap, client.NewErrUnhandledType("join plan", n.joinPlan) + err = client.NewErrUnhandledType("join plan", n.joinPlan) } - return simpleExplainMap, nil + return simpleExplainMap, err } // Explain method returns a map containing all attributes of this node that @@ -201,10 +197,10 @@ func (n *typeIndexJoin) Explain(explainType request.ExplainType) (map[string]any } var subScan *scanNode if joinMany, isJoinMany := n.joinPlan.(*typeJoinMany); isJoinMany { - subScan = getScanNode(joinMany.subType) + subScan = getScanNode(joinMany.childSide.plan) } if joinOne, isJoinOne := n.joinPlan.(*typeJoinOne); isJoinOne { - subScan = getScanNode(joinOne.subType) + subScan = getScanNode(joinOne.childSide.plan) } if subScan != nil { subScanExplain, err := subScan.Explain(explainType) @@ -228,100 +224,38 @@ type typeJoinOne struct { func (p *Planner) makeTypeJoinOne( parent *selectNode, - source planNode, - subType *mapper.Select, + sourcePlan planNode, + subSelect *mapper.Select, ) (*typeJoinOne, error) { - prepareScanNodeFilterForTypeJoin(parent, source, subType) - - selectPlan, err := p.Select(subType) + invertibleTypeJoin, err := p.newInvertableTypeJoin(parent, sourcePlan, subSelect) if err != nil { return nil, err } - - // get the correct sub field schema type (collection) - subTypeFieldDesc, ok := parent.collection.Definition().GetFieldByName(subType.Name) - if !ok { - return nil, client.NewErrFieldNotExist(subType.Name) - } - - subTypeCol, err := p.db.GetCollectionByName(p.ctx, subType.CollectionName) - if err != nil { - return nil, err - } - - subTypeField, subTypeFieldNameFound := subTypeCol.Description().GetFieldByRelation( - subTypeFieldDesc.RelationName, - parent.collection.Name().Value(), - subTypeFieldDesc.Name, - ) - if !subTypeFieldNameFound { - return nil, client.NewErrFieldNotExist(subTypeFieldDesc.RelationName) - } - - var secondaryFieldIndex immutable.Option[int] - if !subTypeFieldDesc.IsPrimaryRelation { - idFieldName := subTypeFieldDesc.Name + request.RelatedObjectID - secondaryFieldIndex = immutable.Some( - parent.documentMapping.FirstIndexOfName(idFieldName), - ) - } - - dir := joinDirection{ - firstNode: source, - secondNode: selectPlan, - secondaryField: immutable.Some(subTypeField.Name + request.RelatedObjectID), - primaryField: subTypeFieldDesc.Name + request.RelatedObjectID, - } - - return &typeJoinOne{ - invertibleTypeJoin: invertibleTypeJoin{ - docMapper: docMapper{parent.documentMapping}, - root: source, - subType: selectPlan, - subSelect: subType, - subSelectFieldDef: subTypeFieldDesc, - rootName: immutable.Some(subTypeField.Name), - subTypeName: subType.Name, - isSecondary: !subTypeFieldDesc.IsPrimaryRelation, - secondaryFieldIndex: secondaryFieldIndex, - secondaryFetchLimit: 1, - dir: dir, - }, - }, nil + invertibleTypeJoin.secondaryFetchLimit = 1 + return &typeJoinOne{invertibleTypeJoin: invertibleTypeJoin}, nil } func (n *typeJoinOne) Kind() string { return "typeJoinOne" } -func fetchDocsWithFieldValue(plan planNode, fieldName string, val any) ([]core.Doc, error) { - propIndex := plan.DocumentMap().FirstIndexOfName(fieldName) - setSubTypeFilterToScanNode(plan, propIndex, val) - - if err := plan.Init(); err != nil { - return nil, NewErrSubTypeInit(err) - } - - var docs []core.Doc - for { - next, err := plan.Next() - if err != nil { - return nil, err - } - if !next { - break - } - - docs = append(docs, plan.Value()) - } - - return docs, nil -} - type typeJoinMany struct { invertibleTypeJoin } +func (p *Planner) makeTypeJoinMany( + parent *selectNode, + sourcePlan planNode, + subSelect *mapper.Select, +) (*typeJoinMany, error) { + invertibleTypeJoin, err := p.newInvertableTypeJoin(parent, sourcePlan, subSelect) + if err != nil { + return nil, err + } + invertibleTypeJoin.secondaryFetchLimit = 0 + return &typeJoinMany{invertibleTypeJoin: invertibleTypeJoin}, nil +} + func prepareScanNodeFilterForTypeJoin( parent *selectNode, source planNode, @@ -357,83 +291,149 @@ func prepareScanNodeFilterForTypeJoin( } } -func (p *Planner) makeTypeJoinMany( +func (p *Planner) newInvertableTypeJoin( parent *selectNode, - source planNode, - subType *mapper.Select, -) (*typeJoinMany, error) { - prepareScanNodeFilterForTypeJoin(parent, source, subType) + sourcePlan planNode, + subSelect *mapper.Select, +) (invertibleTypeJoin, error) { + prepareScanNodeFilterForTypeJoin(parent, sourcePlan, subSelect) - selectPlan, err := p.Select(subType) + subSelectPlan, err := p.Select(subSelect) if err != nil { - return nil, err + return invertibleTypeJoin{}, err } - subTypeFieldDesc, ok := parent.collection.Definition().GetFieldByName(subType.Name) + parentsRelFieldDef, ok := parent.collection.Definition().GetFieldByName(subSelect.Name) if !ok { - return nil, client.NewErrFieldNotExist(subType.Name) + return invertibleTypeJoin{}, client.NewErrFieldNotExist(subSelect.Name) } - subTypeCol, err := p.db.GetCollectionByName(p.ctx, subType.CollectionName) + skipChild := false + for _, field := range parent.selectReq.Fields { + if field.GetName() == subSelect.Name { + if childSelect, ok := field.AsSelect(); ok { + if childSelect.SkipResolve { + skipChild = true + } + } + break + } + } + + subCol, err := p.db.GetCollectionByName(p.ctx, subSelect.CollectionName) if err != nil { - return nil, err + return invertibleTypeJoin{}, err } - var secondaryFieldName immutable.Option[string] - var rootName immutable.Option[string] - if subTypeFieldDesc.RelationName != "" { - rootField, rootNameFound := subTypeCol.Description().GetFieldByRelation( - subTypeFieldDesc.RelationName, - parent.collection.Name().Value(), - subTypeFieldDesc.Name, - ) - if rootNameFound { - rootName = immutable.Some(rootField.Name) - secondaryFieldName = immutable.Some(rootField.Name + request.RelatedObjectID) - } + childsRelFieldDesc, ok := subCol.Description().GetFieldByRelation( + parentsRelFieldDef.RelationName, + parent.collection.Name().Value(), + parentsRelFieldDef.Name, + ) + if !ok { + return invertibleTypeJoin{}, client.NewErrFieldNotExist(parentsRelFieldDef.Name) } - dir := joinDirection{ - firstNode: source, - secondNode: selectPlan, - secondaryField: secondaryFieldName, - primaryField: subTypeFieldDesc.Name + request.RelatedObjectID, - } - - return &typeJoinMany{ - invertibleTypeJoin: invertibleTypeJoin{ - docMapper: docMapper{parent.documentMapping}, - root: source, - subType: selectPlan, - subSelect: subType, - subSelectFieldDef: subTypeFieldDesc, - rootName: rootName, - isSecondary: true, - subTypeName: subType.Name, - secondaryFetchLimit: 0, - dir: dir, - }, + childsRelFieldDef, ok := subCol.Definition().GetFieldByName(childsRelFieldDesc.Name) + if !ok { + return invertibleTypeJoin{}, client.NewErrFieldNotExist(subSelect.Name) + } + + parentSide := joinSide{ + plan: sourcePlan, + relFieldDef: parentsRelFieldDef, + relFieldMapIndex: immutable.Some(subSelect.Index), + col: parent.collection, + isFirst: true, + isParent: true, + } + + ind := parent.documentMapping.IndexesByName[parentsRelFieldDef.Name+request.RelatedObjectID] + if len(ind) > 0 { + parentSide.relIDFieldMapIndex = immutable.Some(ind[0]) + } + + childSide := joinSide{ + plan: subSelectPlan, + relFieldDef: childsRelFieldDef, + col: subCol, + isFirst: false, + isParent: false, + } + + ind = subSelectPlan.DocumentMap().IndexesByName[childsRelFieldDef.Name+request.RelatedObjectID] + if len(ind) > 0 { + childSide.relIDFieldMapIndex = immutable.Some(ind[0]) + } + + return invertibleTypeJoin{ + docMapper: docMapper{parent.documentMapping}, + parentSide: parentSide, + childSide: childSide, + skipChild: skipChild, }, nil } -func (n *typeJoinMany) Kind() string { - return "typeJoinMany" +type joinSide struct { + plan planNode + relFieldDef client.FieldDefinition + relFieldMapIndex immutable.Option[int] + relIDFieldMapIndex immutable.Option[int] + col client.Collection + isFirst bool + isParent bool } -func fetchPrimaryDoc(node, subNode planNode, parentProp string) (bool, error) { - subDoc := subNode.Value() - ind := subNode.DocumentMap().FirstIndexOfName(parentProp) +func (s *joinSide) isPrimary() bool { + return s.relFieldDef.IsPrimaryRelation +} - docIDStr, isStr := subDoc.Fields[ind].(string) - if !isStr { - return false, nil +func (join *invertibleTypeJoin) getFirstSide() *joinSide { + if join.parentSide.isFirst { + return &join.parentSide } + return &join.childSide +} + +func (join *invertibleTypeJoin) getSecondSide() *joinSide { + if !join.parentSide.isFirst { + return &join.parentSide + } + return &join.childSide +} +func (join *invertibleTypeJoin) getPrimarySide() *joinSide { + if join.parentSide.isPrimary() { + return &join.parentSide + } + return &join.childSide +} + +func (join *invertibleTypeJoin) getSecondarySide() *joinSide { + if !join.parentSide.isPrimary() { + return &join.parentSide + } + return &join.childSide +} + +func (n *typeJoinMany) Kind() string { + return "typeJoinMany" +} + +// getForeignKey returns the docID of the related object referenced by the given relation field. +func getForeignKey(node planNode, relFieldName string) string { + ind := node.DocumentMap().FirstIndexOfName(relFieldName + request.RelatedObjectID) + docIDStr, _ := node.Value().Fields[ind].(string) + return docIDStr +} + +// fetchDocWithID fetches a document with the given docID from the given planNode. +func fetchDocWithID(node planNode, docID string) (bool, error) { scan := getScanNode(node) if scan == nil { return false, nil } - dsKey := base.MakeDataStoreKeyWithCollectionAndDocID(scan.col.Description(), docIDStr) + dsKey := base.MakeDataStoreKeyWithCollectionAndDocID(scan.col.Description(), docID) spans := core.NewSpans(core.NewSpan(dsKey, dsKey.PrefixEnd())) @@ -452,108 +452,206 @@ func fetchPrimaryDoc(node, subNode planNode, parentProp string) (bool, error) { return true, nil } -type joinDirection struct { - firstNode planNode - secondNode planNode - secondaryField immutable.Option[string] - primaryField string - isInverted bool -} - -func (dir *joinDirection) invert() { - if !dir.secondaryField.HasValue() { - // If the secondary field has no value it cannot be inverted - return - } - dir.isInverted = !dir.isInverted - dir.firstNode, dir.secondNode = dir.secondNode, dir.firstNode - dir.secondaryField, dir.primaryField = immutable.Some(dir.primaryField), dir.secondaryField.Value() -} - type invertibleTypeJoin struct { docMapper - root planNode - subType planNode - rootName immutable.Option[string] - subTypeName string + skipChild bool - subSelect *mapper.Select - subSelectFieldDef client.FieldDefinition + parentSide joinSide + childSide joinSide - isSecondary bool - secondaryFieldIndex immutable.Option[int] secondaryFetchLimit uint // docsToYield contains documents read and ready to be yielded by this node. - docsToYield []core.Doc - - dir joinDirection + docsToYield []core.Doc + encounteredDocIDs []string } func (join *invertibleTypeJoin) replaceRoot(node planNode) { - join.root = node - if join.dir.isInverted { - join.dir.secondNode = node - } else { - join.dir.firstNode = node - } + join.getFirstSide().plan = node } func (join *invertibleTypeJoin) Init() error { - if err := join.subType.Init(); err != nil { + if err := join.childSide.plan.Init(); err != nil { return err } - return join.root.Init() + return join.parentSide.plan.Init() } func (join *invertibleTypeJoin) Start() error { - if err := join.subType.Start(); err != nil { + if err := join.childSide.plan.Start(); err != nil { return err } - return join.root.Start() + return join.parentSide.plan.Start() } func (join *invertibleTypeJoin) Close() error { - if err := join.root.Close(); err != nil { + if err := join.parentSide.plan.Close(); err != nil { return err } - return join.subType.Close() + return join.childSide.plan.Close() } func (join *invertibleTypeJoin) Spans(spans core.Spans) { - join.root.Spans(spans) + join.parentSide.plan.Spans(spans) +} + +func (join *invertibleTypeJoin) Source() planNode { return join.parentSide.plan } + +type primaryObjectsRetriever struct { + relIDFieldDef client.FieldDefinition + primarySide *joinSide + secondarySide *joinSide + + primaryScan *scanNode + + resultPrimaryDocs []core.Doc + resultSecondaryDoc core.Doc +} + +func newPrimaryObjectsRetriever( + primarySide, secondarySide *joinSide, +) primaryObjectsRetriever { + j := primaryObjectsRetriever{ + primarySide: primarySide, + secondarySide: secondarySide, + } + return j } -func (join *invertibleTypeJoin) Source() planNode { return join.root } +func (j *primaryObjectsRetriever) retrievePrimaryDocsReferencingSecondaryDoc() error { + relIDFieldDef, ok := j.primarySide.col.Definition().GetFieldByName( + j.primarySide.relFieldDef.Name + request.RelatedObjectID) + if !ok { + return client.NewErrFieldNotExist(j.primarySide.relFieldDef.Name + request.RelatedObjectID) + } + + j.primaryScan = getScanNode(j.primarySide.plan) + + j.relIDFieldDef = relIDFieldDef -func (tj *invertibleTypeJoin) invert() { - tj.dir.invert() - tj.isSecondary = !tj.isSecondary + primaryDocs, err := j.retrievePrimaryDocs() + + if err != nil { + return err + } + + j.resultPrimaryDocs, j.resultSecondaryDoc = joinPrimaryDocs(primaryDocs, j.secondarySide, j.primarySide) + + return nil } -func (join *invertibleTypeJoin) processSecondResult(secondDocs []core.Doc) (any, any) { - var secondResult any - var secondIDResult any - if join.secondaryFetchLimit == 1 { - if len(secondDocs) != 0 { - secondResult = secondDocs[0] - secondIDResult = secondDocs[0].GetID() +func (j *primaryObjectsRetriever) addIDFieldToScanner() { + found := false + for i := range j.primaryScan.fields { + if j.primaryScan.fields[i].Name == j.relIDFieldDef.Name { + found = true + break } - } else { - secondResult = secondDocs - secondDocIDs := make([]string, len(secondDocs)) - for i, doc := range secondDocs { - secondDocIDs[i] = doc.GetID() + } + if !found { + j.primaryScan.fields = append(j.primaryScan.fields, j.relIDFieldDef) + } +} + +func (j *primaryObjectsRetriever) collectDocs(numDocs int) ([]core.Doc, error) { + p := j.primarySide.plan + if err := p.Init(); err != nil { + return nil, NewErrSubTypeInit(err) + } + + docs := make([]core.Doc, 0, numDocs) + + for { + hasValue, err := p.Next() + + if err != nil { + return nil, err } - secondIDResult = secondDocIDs + + if !hasValue { + break + } + + docs = append(docs, p.Value()) } - join.root.Value().Fields[join.subSelect.Index] = secondResult - if join.secondaryFieldIndex.HasValue() { - join.root.Value().Fields[join.secondaryFieldIndex.Value()] = secondIDResult + + return docs, nil +} + +func (j *primaryObjectsRetriever) retrievePrimaryDocs() ([]core.Doc, error) { + j.addIDFieldToScanner() + + secondaryDoc := j.secondarySide.plan.Value() + addFilterOnIDField(j.primaryScan, j.primarySide.relIDFieldMapIndex.Value(), secondaryDoc.GetID()) + + oldFetcher := j.primaryScan.fetcher + + indexOnRelation := findIndexByFieldName(j.primaryScan.col, j.relIDFieldDef.Name) + j.primaryScan.initFetcher(immutable.None[string](), indexOnRelation) + + docs, err := j.collectDocs(0) + if err != nil { + return nil, err } - return secondResult, secondIDResult + + err = j.primaryScan.fetcher.Close() + if err != nil { + return nil, err + } + + j.primaryScan.fetcher = oldFetcher + + return docs, nil +} + +func docsToDocIDs(docs []core.Doc) []string { + docIDs := make([]string, len(docs)) + for i, doc := range docs { + docIDs[i] = doc.GetID() + } + return docIDs +} + +func joinPrimaryDocs(primaryDocs []core.Doc, secondarySide, primarySide *joinSide) ([]core.Doc, core.Doc) { + secondaryDoc := secondarySide.plan.Value() + + if secondarySide.relFieldMapIndex.HasValue() { + if secondarySide.relFieldDef.Kind.IsArray() { + secondaryDoc.Fields[secondarySide.relFieldMapIndex.Value()] = primaryDocs + } else if len(primaryDocs) > 0 { + secondaryDoc.Fields[secondarySide.relFieldMapIndex.Value()] = primaryDocs[0] + } + } + + if secondarySide.relIDFieldMapIndex.HasValue() { + if secondarySide.relFieldDef.Kind.IsArray() { + secondaryDoc.Fields[secondarySide.relIDFieldMapIndex.Value()] = docsToDocIDs(primaryDocs) + } else if len(primaryDocs) > 0 { + secondaryDoc.Fields[secondarySide.relIDFieldMapIndex.Value()] = primaryDocs[0].GetID() + } + } + + if primarySide.relFieldMapIndex.HasValue() { + for i := range primaryDocs { + primaryDocs[i].Fields[primarySide.relFieldMapIndex.Value()] = secondaryDoc + } + } + + if primarySide.relIDFieldMapIndex.HasValue() { + for i := range primaryDocs { + primaryDocs[i].Fields[primarySide.relIDFieldMapIndex.Value()] = secondaryDoc.GetID() + } + } + + return primaryDocs, secondaryDoc +} + +func (join *invertibleTypeJoin) fetchPrimaryDocsReferencingSecondaryDoc() ([]core.Doc, core.Doc, error) { + retriever := newPrimaryObjectsRetriever(join.getPrimarySide(), join.getSecondarySide()) + err := retriever.retrievePrimaryDocsReferencingSecondaryDoc() + return retriever.resultPrimaryDocs, retriever.resultSecondaryDoc, err } func (join *invertibleTypeJoin) Next() (bool, error) { @@ -568,54 +666,86 @@ func (join *invertibleTypeJoin) Next() (bool, error) { } } - hasFirstValue, err := join.dir.firstNode.Next() + firstSide := join.getFirstSide() + hasFirstValue, err := firstSide.plan.Next() if err != nil || !hasFirstValue { return false, err } - firstDoc := join.dir.firstNode.Value() - - if join.isSecondary { - secondDocs, err := fetchDocsWithFieldValue( - join.dir.secondNode, - // As the join is from the secondary field, we know that [join.dir.secondaryField] must have a value - // otherwise the user would not have been able to request it. - join.dir.secondaryField.Value(), - firstDoc.GetID(), - ) + if firstSide.isPrimary() { + return join.nextJoinedSecondaryDoc() + } else { + primaryDocs, secondaryDoc, err := join.fetchPrimaryDocsReferencingSecondaryDoc() if err != nil { return false, err } - if join.dir.secondNode == join.root { - if len(secondDocs) == 0 { - return false, nil - } - for i := range secondDocs { - secondDocs[i].Fields[join.subSelect.Index] = join.subType.Value() - } - join.docsToYield = append(join.docsToYield, secondDocs...) - return true, nil + if join.parentSide.isPrimary() { + join.docsToYield = append(join.docsToYield, primaryDocs...) } else { - secondResult, secondIDResult := join.processSecondResult(secondDocs) - join.dir.firstNode.Value().Fields[join.subSelect.Index] = secondResult - if join.secondaryFieldIndex.HasValue() { - join.dir.firstNode.Value().Fields[join.secondaryFieldIndex.Value()] = secondIDResult - } + join.docsToYield = append(join.docsToYield, secondaryDoc) } - } else { - hasDoc, err := fetchPrimaryDoc(join.dir.secondNode, join.dir.firstNode, join.dir.primaryField) - if err != nil { - return false, err + } + + return true, nil +} + +func (join *invertibleTypeJoin) nextJoinedSecondaryDoc() (bool, error) { + firstSide := join.getFirstSide() + secondSide := join.getSecondSide() + + secondaryDocID := getForeignKey(firstSide.plan, firstSide.relFieldDef.Name) + if secondaryDocID == "" { + if firstSide.isParent { + join.docsToYield = append(join.docsToYield, firstSide.plan.Value()) + return true, nil } + return join.Next() + } - if hasDoc { - join.root.Value().Fields[join.subSelect.Index] = join.subType.Value() + if !firstSide.isParent { + for i := range join.encounteredDocIDs { + if join.encounteredDocIDs[i] == secondaryDocID { + return join.Next() + } } + join.encounteredDocIDs = append(join.encounteredDocIDs, secondaryDocID) } - join.docsToYield = append(join.docsToYield, join.root.Value()) + hasDoc, err := fetchDocWithID(secondSide.plan, secondaryDocID) + if err != nil { + return false, err + } + if !hasDoc { + if firstSide.isParent { + join.docsToYield = append(join.docsToYield, firstSide.plan.Value()) + return true, nil + } + return join.Next() + } + + if join.parentSide.relFieldDef.Kind.IsArray() { + var primaryDocs []core.Doc + var secondaryDoc core.Doc + // if child is not requested as part of the response, we just add the existing one (fetched by the secondary index + // on a filtered value) so that top select node that runs the filter again can yield it. + if join.skipChild { + primaryDocs, secondaryDoc = joinPrimaryDocs([]core.Doc{firstSide.plan.Value()}, secondSide, firstSide) + } else { + primaryDocs, secondaryDoc, err = join.fetchPrimaryDocsReferencingSecondaryDoc() + if err != nil { + return false, err + } + } + secondaryDoc.Fields[join.parentSide.relFieldMapIndex.Value()] = primaryDocs + + join.docsToYield = append(join.docsToYield, secondaryDoc) + } else { + parentDoc := join.parentSide.plan.Value() + parentDoc.Fields[join.parentSide.relFieldMapIndex.Value()] = join.childSide.plan.Value() + join.docsToYield = append(join.docsToYield, parentDoc) + } return true, nil } @@ -630,26 +760,19 @@ func (join *invertibleTypeJoin) invertJoinDirectionWithIndex( fieldFilter *mapper.Filter, index client.IndexDescription, ) error { - if !join.rootName.HasValue() { - // If the root field has no value it cannot be inverted - return nil - } - if join.subSelectFieldDef.Kind.IsArray() { - // invertibleTypeJoin does not support inverting one-many relations atm - return nil - } - subScan := getScanNode(join.subType) - subScan.tryAddField(join.rootName.Value() + request.RelatedObjectID) - subScan.filter = fieldFilter - subScan.initFetcher(immutable.Option[string]{}, immutable.Some(index)) + p := join.childSide.plan + s := getScanNode(p) + s.tryAddField(join.childSide.relFieldDef.Name + request.RelatedObjectID) + s.filter = fieldFilter + s.initFetcher(immutable.Option[string]{}, immutable.Some(index)) - join.invert() + join.childSide.isFirst = join.parentSide.isFirst + join.parentSide.isFirst = !join.parentSide.isFirst return nil } -func setSubTypeFilterToScanNode(plan planNode, propIndex int, val any) { - scan := getScanNode(plan) +func addFilterOnIDField(scan *scanNode, propIndex int, val any) { if scan == nil { return } diff --git a/tests/integration/explain/execute/with_count_test.go b/tests/integration/explain/execute/with_count_test.go index 4a30b9f52a..43ff3d13df 100644 --- a/tests/integration/explain/execute/with_count_test.go +++ b/tests/integration/explain/execute/with_count_test.go @@ -62,7 +62,7 @@ func TestExecuteExplainRequestWithCountOnOneToManyRelation(t *testing.T) { "subTypeScanNode": dataMap{ "iterations": uint64(5), "docFetches": uint64(6), - "fieldFetches": uint64(14), + "fieldFetches": uint64(6), "indexFetches": uint64(0), }, }, diff --git a/tests/integration/index/query_with_relation_filter_test.go b/tests/integration/index/query_with_relation_filter_test.go index aa49dd2623..94160a5e3c 100644 --- a/tests/integration/index/query_with_relation_filter_test.go +++ b/tests/integration/index/query_with_relation_filter_test.go @@ -17,6 +17,7 @@ import ( ) func TestQueryWithIndexOnOneToManyRelation_IfFilterOnIndexedRelation_ShouldFilter2(t *testing.T) { + // 3 users have a MacBook Pro: Islam, Shahzad, Keenan req1 := `query { User(filter: { devices: {model: {_eq: "MacBook Pro"}} @@ -24,6 +25,7 @@ func TestQueryWithIndexOnOneToManyRelation_IfFilterOnIndexedRelation_ShouldFilte name } }` + // 1 user has an iPhone 10: Addo req2 := `query { User(filter: { devices: {model: {_eq: "iPhone 10"}} @@ -53,16 +55,14 @@ func TestQueryWithIndexOnOneToManyRelation_IfFilterOnIndexedRelation_ShouldFilte testUtils.Request{ Request: req1, Results: []map[string]any{ - {"name": "Keenan"}, {"name": "Islam"}, {"name": "Shahzad"}, + {"name": "Keenan"}, }, }, testUtils.Request{ - Request: makeExplainQuery(req1), - // The invertable join does not support inverting one-many relations, so the index is - // not used. - Asserter: testUtils.NewExplainAsserter().WithFieldFetches(450).WithIndexFetches(0), + Request: makeExplainQuery(req1), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(6).WithIndexFetches(3), }, testUtils.Request{ Request: req2, @@ -71,10 +71,8 @@ func TestQueryWithIndexOnOneToManyRelation_IfFilterOnIndexedRelation_ShouldFilte }, }, testUtils.Request{ - Request: makeExplainQuery(req2), - // The invertable join does not support inverting one-many relations, so the index is - // not used. - Asserter: testUtils.NewExplainAsserter().WithFieldFetches(450).WithIndexFetches(0), + Request: makeExplainQuery(req2), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(1), }, }, } @@ -83,6 +81,7 @@ func TestQueryWithIndexOnOneToManyRelation_IfFilterOnIndexedRelation_ShouldFilte } func TestQueryWithIndexOnOneToManyRelation_IfFilterOnIndexedRelation_ShouldFilter(t *testing.T) { + // 3 users have a MacBook Pro: Islam, Shahzad, Keenan req1 := `query { User(filter: { devices: {model: {_eq: "MacBook Pro"}} @@ -90,6 +89,7 @@ func TestQueryWithIndexOnOneToManyRelation_IfFilterOnIndexedRelation_ShouldFilte name } }` + // 1 user has an iPhone 10: Addo req2 := `query { User(filter: { devices: {model: {_eq: "iPhone 10"}} @@ -119,16 +119,14 @@ func TestQueryWithIndexOnOneToManyRelation_IfFilterOnIndexedRelation_ShouldFilte testUtils.Request{ Request: req1, Results: []map[string]any{ - {"name": "Keenan"}, {"name": "Islam"}, {"name": "Shahzad"}, + {"name": "Keenan"}, }, }, testUtils.Request{ - Request: makeExplainQuery(req1), - // The invertable join does not support inverting one-many relations, so the index is - // not used. - Asserter: testUtils.NewExplainAsserter().WithFieldFetches(450).WithIndexFetches(0), + Request: makeExplainQuery(req1), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(6).WithIndexFetches(3), }, testUtils.Request{ Request: req2, @@ -137,10 +135,8 @@ func TestQueryWithIndexOnOneToManyRelation_IfFilterOnIndexedRelation_ShouldFilte }, }, testUtils.Request{ - Request: makeExplainQuery(req2), - // The invertable join does not support inverting one-many relations, so the index is - // not used. - Asserter: testUtils.NewExplainAsserter().WithFieldFetches(450).WithIndexFetches(0), + Request: makeExplainQuery(req2), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(1), }, }, } @@ -149,6 +145,7 @@ func TestQueryWithIndexOnOneToManyRelation_IfFilterOnIndexedRelation_ShouldFilte } func TestQueryWithIndexOnOneToOnesSecondaryRelation_IfFilterOnIndexedRelation_ShouldFilter(t *testing.T) { + // 1 user lives in Munich: Islam req1 := `query { User(filter: { address: {city: {_eq: "Munich"}} @@ -156,6 +153,7 @@ func TestQueryWithIndexOnOneToOnesSecondaryRelation_IfFilterOnIndexedRelation_Sh name } }` + // 3 users live in Montreal: Shahzad, Fred, John req2 := `query { User(filter: { address: {city: {_eq: "Montreal"}} @@ -176,7 +174,7 @@ func TestQueryWithIndexOnOneToOnesSecondaryRelation_IfFilterOnIndexedRelation_Sh type Address { user: User @primary - city: String @index + city: String @index }`, }, testUtils.CreatePredefinedDocs{ @@ -210,7 +208,79 @@ func TestQueryWithIndexOnOneToOnesSecondaryRelation_IfFilterOnIndexedRelation_Sh testUtils.ExecuteTestCase(t, test) } +func TestQueryWithIndexOnOneToOnePrimaryRelation_IfFilterOnIndexedFieldOfRelationAndRelation_ShouldFilter(t *testing.T) { + // 1 user lives in London: Andy + req1 := `query { + User(filter: { + address: {city: {_eq: "London"}} + }) { + name + } + }` + // 3 users live in Montreal: Shahzad, Fred, John + req2 := `query { + User(filter: { + address: {city: {_eq: "Montreal"}} + }) { + name + } + }` + test := testUtils.TestCase{ + Description: "Filter on indexed field of primary relation in 1-1 relation", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + age: Int + address: Address @primary @index + } + + type Address { + user: User + city: String @index + street: String + }`, + }, + testUtils.CreatePredefinedDocs{ + Docs: getUserDocs(), + }, + testUtils.Request{ + Request: req1, + Results: []map[string]any{ + {"name": "Andy"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req1), + // we make 2 index fetches: 1. to get the only address with city == "London" + // and 2. to get the corresponding user + // then 1 field fetch to get the name of the user + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(1).WithIndexFetches(2), + }, + testUtils.Request{ + Request: req2, + Results: []map[string]any{ + {"name": "John"}, + {"name": "Fred"}, + {"name": "Shahzad"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req2), + // we make 3 index fetches to get the 3 address with city == "Montreal" + // and 3 more index fetches to get the corresponding users + // then 3 field fetches to get the name of each user + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(3).WithIndexFetches(6), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + func TestQueryWithIndexOnOneToOnePrimaryRelation_IfFilterOnIndexedFieldOfRelation_ShouldFilter(t *testing.T) { + // 1 user lives in London: Andy req1 := `query { User(filter: { address: {city: {_eq: "London"}} @@ -218,6 +288,7 @@ func TestQueryWithIndexOnOneToOnePrimaryRelation_IfFilterOnIndexedFieldOfRelatio name } }` + // 3 users live in Montreal: Shahzad, Fred, John req2 := `query { User(filter: { address: {city: {_eq: "Montreal"}} @@ -256,7 +327,6 @@ func TestQueryWithIndexOnOneToOnePrimaryRelation_IfFilterOnIndexedFieldOfRelatio // we make 1 index fetch to get the only address with city == "London" // then we scan all 10 users to find one with matching "address_id" // after this we fetch the name of the user - // it should be optimized after this is done https://github.com/sourcenetwork/defradb/issues/2601 Asserter: testUtils.NewExplainAsserter().WithFieldFetches(11).WithIndexFetches(1), }, testUtils.Request{ @@ -272,7 +342,6 @@ func TestQueryWithIndexOnOneToOnePrimaryRelation_IfFilterOnIndexedFieldOfRelatio // we make 3 index fetch to get the 3 address with city == "Montreal" // then we scan all 10 users to find one with matching "address_id" for each address // after this we fetch the name of each user - // it should be optimized after this is done https://github.com/sourcenetwork/defradb/issues/2601 Asserter: testUtils.NewExplainAsserter().WithFieldFetches(33).WithIndexFetches(3), }, }, @@ -282,6 +351,7 @@ func TestQueryWithIndexOnOneToOnePrimaryRelation_IfFilterOnIndexedFieldOfRelatio } func TestQueryWithIndexOnOneToOnePrimaryRelation_IfFilterOnIndexedRelationWhileIndexedForeignField_ShouldFilter(t *testing.T) { + // 1 user lives in London: Andy req := `query { User(filter: { address: {city: {_eq: "London"}} @@ -317,7 +387,7 @@ func TestQueryWithIndexOnOneToOnePrimaryRelation_IfFilterOnIndexedRelationWhileI }, testUtils.Request{ Request: makeExplainQuery(req), - Asserter: testUtils.NewExplainAsserter().WithFieldFetches(11).WithIndexFetches(1), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(1).WithIndexFetches(2), }, }, } @@ -500,10 +570,8 @@ func TestQueryWithIndexOnOneToMany_IfFilterOnIndexedRelation_ShouldFilterWithExp }, }, testUtils.Request{ - Request: makeExplainQuery(req), - // The invertable join does not support inverting one-many relations, so the index is - // not used. - Asserter: testUtils.NewExplainAsserter().WithFieldFetches(10).WithIndexFetches(0), + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(14).WithIndexFetches(2), }, }, } @@ -512,6 +580,7 @@ func TestQueryWithIndexOnOneToMany_IfFilterOnIndexedRelation_ShouldFilterWithExp } func TestQueryWithIndexOnOneToOne_IfFilterOnIndexedRelation_ShouldFilter(t *testing.T) { + // 1 user lives in Munich: Islam req := `query { User(filter: { address: {city: {_eq: "Munich"}} @@ -563,8 +632,8 @@ func TestQueryWithIndexOnOneToOne_IfFilterOnIndexedRelation_ShouldFilter(t *test } func TestQueryWithIndexOnManyToOne_IfFilterOnIndexedField_ShouldFilterWithExplain(t *testing.T) { - // This query will fetch first a matching device which is secondary doc and therefore - // has a reference to the primary User doc. + // This query will fetch first a matching device which is primary doc and therefore + // has a reference to the secondary User doc. req := `query { Device(filter: { year: {_eq: 2021} @@ -633,7 +702,6 @@ func TestQueryWithIndexOnManyToOne_IfFilterOnIndexedField_ShouldFilterWithExplai func TestQueryWithIndexOnManyToOne_IfFilterOnIndexedRelation_ShouldFilterWithExplain(t *testing.T) { // This query will fetch first a matching user (owner) which is primary doc and therefore // has no direct reference to secondary Device docs. - // At the moment the db has to make a full scan of the Device docs to find the matching ones. // Keenan has 3 devices. req := `query { Device(filter: { @@ -650,11 +718,11 @@ func TestQueryWithIndexOnManyToOne_IfFilterOnIndexedRelation_ShouldFilterWithExp type User { name: String @index devices: [Device] - } + } type Device { - model: String - owner: User + model: String + owner: User @index } `, }, @@ -671,10 +739,171 @@ func TestQueryWithIndexOnManyToOne_IfFilterOnIndexedRelation_ShouldFilterWithExp }, testUtils.Request{ Request: makeExplainQuery(req), - // we make only 1 index fetch to get the owner by it's name - // and 44 field fetches to get 2 fields for all 22 devices in the db. - // it should be optimized after this is done https://github.com/sourcenetwork/defradb/issues/2601 - Asserter: testUtils.NewExplainAsserter().WithFieldFetches(44).WithIndexFetches(1), + // we make 1 index fetch to get the owner by it's name + // and 3 index fetches to get all 3 devices of the owner + // and 3 field fetches to get 1 'model' field for every fetched device. + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(3).WithIndexFetches(4), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndexOnOneToMany_IfIndexedRelationIsNil_NeNilFilterShouldUseIndex(t *testing.T) { + req := `query { + Device(filter: { + owner_id: {_ne: null} + }) { + model + } + }` + test := testUtils.TestCase{ + Description: "Filter on indexed relation field in 1-N relations", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + devices: [Device] + } + + type Device { + model: String + manufacturer: String + owner: User @index + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ + "name": "Chris" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ + "model": "Walkman", + "manufacturer": "Sony", + "owner": "bae-403d7337-f73e-5c81-8719-e853938c8985" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ + "model": "iPhone", + "manufacturer": "Apple", + "owner": "bae-403d7337-f73e-5c81-8719-e853938c8985" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ + "model": "Running Man", + "manufacturer": "Braveworld Productions" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ + "model": "PlayStation 5", + "manufacturer": "Sony" + }`, + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"model": "iPhone"}, + {"model": "Walkman"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + // we make 4 index fetches to find 2 devices with owner_id != null + // and 2 field fetches to get 1 'model' field for every fetched device + // plus 2 more field fetches to get related User docs + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(4).WithIndexFetches(4), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndexOnOneToMany_IfIndexedRelationIsNil_EqNilFilterShouldUseIndex(t *testing.T) { + req := `query { + Device(filter: { + owner_id: {_eq: null} + }) { + model + } + }` + test := testUtils.TestCase{ + Description: "Filter on indexed relation field in 1-N relations", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + devices: [Device] + } + + type Device { + model: String + manufacturer: String + owner: User @index + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ + "name": "Chris" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ + "model": "Walkman", + "manufacturer": "Sony", + "owner": "bae-403d7337-f73e-5c81-8719-e853938c8985" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ + "model": "iPhone", + "manufacturer": "Apple", + "owner": "bae-403d7337-f73e-5c81-8719-e853938c8985" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ + "model": "Running Man", + "manufacturer": "Braveworld Productions" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ + "model": "PlayStation 5", + "manufacturer": "Sony" + }`, + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"model": "Running Man"}, + {"model": "PlayStation 5"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + // we make 2 index fetches to get all 2 devices with owner_id == null + // and 2 field fetches to get 1 'model' field for every fetched device. + Asserter: testUtils.NewExplainAsserter().WithFieldFetches(2).WithIndexFetches(2), }, }, } From 7dc07e74b13e8b6a1ca7c1a93c27f4cab46c62ab Mon Sep 17 00:00:00 2001 From: Shahzad Lone Date: Tue, 4 Jun 2024 19:17:54 -0400 Subject: [PATCH 32/78] docs: Add http/openapi documentation & ci workflow (#2678) ## Relevant issue(s) Related #510 Resolve #2677 ## Description - Detect OpenAPI / HTTP documentation is always up to date. - Generate open-api docs in the appropriate dir. ## How has this been tested? - using `act` tool - manually introducing a change and seeing the action fail: https://github.com/sourcenetwork/defradb/actions/runs/9359749777/job/25763961265?pr=2678 Specify the platform(s) on which this was tested: - WSL2 instance --- .github/workflows/check-cli-documentation.yml | 9 - .../workflows/check-http-documentation.yml | 52 + Makefile | 5 + docs/website/references/http/openapi.json | 2094 +++++++++++++++++ 4 files changed, 2151 insertions(+), 9 deletions(-) create mode 100644 .github/workflows/check-http-documentation.yml create mode 100644 docs/website/references/http/openapi.json diff --git a/.github/workflows/check-cli-documentation.yml b/.github/workflows/check-cli-documentation.yml index 06a7bd7bdc..be50351518 100644 --- a/.github/workflows/check-cli-documentation.yml +++ b/.github/workflows/check-cli-documentation.yml @@ -35,15 +35,6 @@ jobs: - name: Checkout code into the directory uses: actions/checkout@v3 - # This check is there as a safety to ensure we start clean (without any changes). - # If there are ever changes here, the rest of the job will output false result. - - name: Check no changes exist initially - uses: tj-actions/verify-changed-files@v20 - with: - fail-if-changed: true - files: | - docs/website/references/cli - - name: Setup Go environment explicitly uses: actions/setup-go@v3 with: diff --git a/.github/workflows/check-http-documentation.yml b/.github/workflows/check-http-documentation.yml new file mode 100644 index 0000000000..e68471c162 --- /dev/null +++ b/.github/workflows/check-http-documentation.yml @@ -0,0 +1,52 @@ +# Copyright 2024 Democratized Data Foundation +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +# This workflow checks that all HTTP documentation is up to date. +# If the documentation is not up to date then this action will fail. +name: Check HTTP Documentation Workflow + +on: + pull_request: + branches: + - master + - develop + + push: + tags: + - 'v[0-9]+.[0-9]+.[0-9]+' + branches: + - master + - develop + +jobs: + check-http-documentation: + name: Check http documentation job + + runs-on: ubuntu-latest + + steps: + - name: Checkout code into the directory + uses: actions/checkout@v3 + + - name: Setup Go environment explicitly + uses: actions/setup-go@v3 + with: + go-version: "1.21" + check-latest: true + + - name: Try generating http documentation + run: make docs:http + + - name: Check no new changes exist + uses: tj-actions/verify-changed-files@v20 + with: + fail-if-changed: true + files: | + docs/website/references/http diff --git a/Makefile b/Makefile index bc4093ddbf..1f50ab816c 100644 --- a/Makefile +++ b/Makefile @@ -356,12 +356,17 @@ chglog: docs: @$(MAKE) docs\:cli @$(MAKE) docs\:manpages + @$(MAKE) docs\:http .PHONY: docs\:cli docs\:cli: rm -f docs/website/references/cli/*.md go run cmd/genclidocs/main.go -o docs/website/references/cli +.PHONY: docs\:http +docs\:http: + go run cmd/genopenapi/main.go | python -m json.tool > docs/website/references/http/openapi.json + .PHONY: docs\:manpages docs\:manpages: go run cmd/genmanpages/main.go -o build/man/ diff --git a/docs/website/references/http/openapi.json b/docs/website/references/http/openapi.json new file mode 100644 index 0000000000..a8c02e72bf --- /dev/null +++ b/docs/website/references/http/openapi.json @@ -0,0 +1,2094 @@ +{ + "components": { + "parameters": { + "txn": { + "description": "Transaction id", + "in": "header", + "name": "x-defradb-tx", + "schema": { + "format": "int64", + "type": "integer" + } + } + }, + "responses": { + "error": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/error" + } + } + }, + "description": "error" + }, + "success": { + "description": "ok" + } + }, + "schemas": { + "add_view_request": { + "properties": { + "Query": { + "type": "string" + }, + "SDL": { + "type": "string" + }, + "Transform": {} + }, + "type": "object" + }, + "backup_config": { + "properties": { + "collections": { + "items": { + "type": "string" + }, + "type": "array" + }, + "filepath": { + "type": "string" + }, + "format": { + "type": "string" + }, + "pretty": { + "type": "boolean" + } + }, + "type": "object" + }, + "ccip_request": { + "properties": { + "data": { + "type": "string" + }, + "sender": { + "type": "string" + } + }, + "type": "object" + }, + "ccip_response": { + "properties": { + "data": { + "type": "string" + } + }, + "type": "object" + }, + "collection": { + "properties": { + "Fields": { + "items": { + "properties": { + "ID": { + "maximum": 4294967295, + "minimum": 0, + "type": "integer" + }, + "Kind": {}, + "Name": { + "type": "string" + }, + "RelationName": {} + }, + "type": "object" + }, + "type": "array" + }, + "ID": { + "maximum": 4294967295, + "minimum": 0, + "type": "integer" + }, + "Indexes": { + "items": { + "properties": { + "Fields": { + "items": { + "properties": { + "Descending": { + "type": "boolean" + }, + "Name": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "ID": { + "maximum": 4294967295, + "minimum": 0, + "type": "integer" + }, + "Name": { + "type": "string" + }, + "Unique": { + "type": "boolean" + } + }, + "type": "object" + }, + "type": "array" + }, + "Name": {}, + "Policy": {}, + "RootID": { + "maximum": 4294967295, + "minimum": 0, + "type": "integer" + }, + "SchemaVersionID": { + "type": "string" + }, + "Sources": { + "items": {}, + "type": "array" + } + }, + "type": "object" + }, + "collection_definition": { + "properties": { + "description": { + "properties": { + "Fields": { + "items": { + "properties": { + "ID": { + "maximum": 4294967295, + "minimum": 0, + "type": "integer" + }, + "Kind": {}, + "Name": { + "type": "string" + }, + "RelationName": {} + }, + "type": "object" + }, + "type": "array" + }, + "ID": { + "maximum": 4294967295, + "minimum": 0, + "type": "integer" + }, + "Indexes": { + "items": { + "properties": { + "Fields": { + "items": { + "properties": { + "Descending": { + "type": "boolean" + }, + "Name": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "ID": { + "maximum": 4294967295, + "minimum": 0, + "type": "integer" + }, + "Name": { + "type": "string" + }, + "Unique": { + "type": "boolean" + } + }, + "type": "object" + }, + "type": "array" + }, + "Name": {}, + "Policy": {}, + "RootID": { + "maximum": 4294967295, + "minimum": 0, + "type": "integer" + }, + "SchemaVersionID": { + "type": "string" + }, + "Sources": { + "items": {}, + "type": "array" + } + }, + "type": "object" + }, + "schema": { + "properties": { + "Fields": { + "items": { + "properties": { + "Kind": {}, + "Name": { + "type": "string" + }, + "Typ": { + "maximum": 255, + "minimum": 0, + "type": "integer" + } + }, + "type": "object" + }, + "type": "array" + }, + "Name": { + "type": "string" + }, + "Root": { + "type": "string" + }, + "VersionID": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "collection_delete": { + "properties": { + "filter": {} + }, + "type": "object" + }, + "collection_update": { + "properties": { + "filter": {}, + "updater": { + "type": "string" + } + }, + "type": "object" + }, + "create_tx": { + "properties": { + "id": { + "maximum": 18446744073709552000, + "minimum": 0, + "type": "integer" + } + }, + "type": "object" + }, + "delete_result": { + "properties": { + "Count": { + "format": "int64", + "type": "integer" + }, + "DocIDs": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "document": { + "additionalProperties": true, + "type": "object" + }, + "error": { + "properties": { + "error": {} + }, + "type": "object" + }, + "graphql_request": { + "properties": { + "query": { + "type": "string" + } + }, + "type": "object" + }, + "graphql_response": { + "properties": { + "data": {}, + "errors": { + "items": {}, + "type": "array" + } + }, + "type": "object" + }, + "index": { + "properties": { + "Fields": { + "items": { + "properties": { + "Descending": { + "type": "boolean" + }, + "Name": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "ID": { + "maximum": 4294967295, + "minimum": 0, + "type": "integer" + }, + "Name": { + "type": "string" + }, + "Unique": { + "type": "boolean" + } + }, + "type": "object" + }, + "lens_config": { + "properties": { + "DestinationSchemaVersionID": { + "type": "string" + }, + "Lenses": { + "items": { + "properties": { + "Arguments": { + "additionalProperties": {}, + "type": "object" + }, + "Inverse": { + "type": "boolean" + }, + "Path": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "SourceSchemaVersionID": { + "type": "string" + } + }, + "type": "object" + }, + "migrate_request": { + "properties": { + "CollectionID": { + "maximum": 4294967295, + "minimum": 0, + "type": "integer" + }, + "Data": { + "items": { + "additionalProperties": {}, + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "patch_schema_request": { + "properties": { + "Migration": {}, + "Patch": { + "type": "string" + }, + "SetAsDefaultVersion": { + "type": "boolean" + } + }, + "type": "object" + }, + "peer_info": { + "properties": { + "Addrs": { + "items": {}, + "type": "array" + }, + "ID": { + "type": "string" + } + }, + "type": "object" + }, + "replicator": { + "properties": { + "Info": { + "properties": { + "Addrs": { + "items": {}, + "type": "array" + }, + "ID": { + "type": "string" + } + }, + "type": "object" + }, + "Schemas": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "schema": { + "properties": { + "Fields": { + "items": { + "properties": { + "Kind": {}, + "Name": { + "type": "string" + }, + "Typ": { + "maximum": 255, + "minimum": 0, + "type": "integer" + } + }, + "type": "object" + }, + "type": "array" + }, + "Name": { + "type": "string" + }, + "Root": { + "type": "string" + }, + "VersionID": { + "type": "string" + } + }, + "type": "object" + }, + "set_migration_request": { + "properties": { + "CollectionID": { + "maximum": 4294967295, + "minimum": 0, + "type": "integer" + }, + "Config": { + "properties": { + "Lenses": { + "items": { + "properties": { + "Arguments": { + "additionalProperties": {}, + "type": "object" + }, + "Inverse": { + "type": "boolean" + }, + "Path": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "update_result": { + "properties": { + "Count": { + "format": "int64", + "type": "integer" + }, + "DocIDs": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + } + }, + "securitySchemes": { + "bearerToken": { + "bearerFormat": "JWT", + "scheme": "bearer", + "type": "http" + } + } + }, + "externalDocs": { + "description": "Learn more about DefraDB", + "url": "https://docs.source.network" + }, + "info": { + "title": "DefraDB API", + "version": "0" + }, + "openapi": "3.0.3", + "paths": { + "/acp/policy": { + "post": { + "description": "Add a policy using acp system", + "operationId": "add policy", + "requestBody": { + "content": { + "text/plain": { + "schema": { + "type": "string" + } + } + }, + "required": true + }, + "responses": { + "200": { + "$ref": "#/components/responses/success" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "acp_policy" + ] + } + }, + "/backup/export": { + "post": { + "description": "Export a database backup to file", + "operationId": "backup_export", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/backup_config" + } + } + }, + "required": true + }, + "responses": { + "200": { + "$ref": "#/components/responses/success" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "backup" + ] + } + }, + "/backup/import": { + "post": { + "description": "Import a database backup from file", + "operationId": "backup_import", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/backup_config" + } + } + }, + "required": true + }, + "responses": { + "200": { + "$ref": "#/components/responses/success" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "backup" + ] + } + }, + "/ccip": { + "post": { + "description": "CCIP POST endpoint", + "operationId": "ccip_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ccip_request" + } + } + } + }, + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ccip_response" + } + } + }, + "description": "GraphQL response" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "ccip" + ] + } + }, + "/ccip/{sender}/{data}": { + "get": { + "description": "CCIP GET endpoint", + "operationId": "ccip_get", + "parameters": [ + { + "description": "Hex encoded request data", + "in": "path", + "name": "data", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Hex encoded sender address", + "in": "path", + "name": "sender", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ccip_response" + } + } + }, + "description": "GraphQL response" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "ccip" + ] + } + }, + "/collections": { + "get": { + "description": "Introspect collection(s) by name, schema id, or version id.", + "operationId": "collection_describe", + "parameters": [ + { + "description": "Collection name", + "in": "query", + "name": "name", + "schema": { + "type": "string" + } + }, + { + "description": "Collection schema root", + "in": "query", + "name": "schema_root", + "schema": { + "type": "string" + } + }, + { + "description": "Collection schema version id", + "in": "query", + "name": "version_id", + "schema": { + "type": "string" + } + }, + { + "description": "If true, inactive collections will be returned in addition to active ones", + "in": "query", + "name": "get_inactive", + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/collection" + }, + { + "items": { + "$ref": "#/components/schemas/collection" + }, + "type": "array" + } + ] + } + } + }, + "description": "Collection(s) with matching name, schema id, or version id." + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "collection" + ] + }, + "patch": { + "description": "Update collection definitions", + "operationId": "patch_collection", + "requestBody": { + "content": { + "application/json": { + "schema": { + "type": "string" + } + } + } + }, + "responses": { + "200": { + "$ref": "#/components/responses/success" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "collection" + ] + } + }, + "/collections/{name}": { + "delete": { + "description": "Delete document(s) from a collection", + "operationId": "collection_delete_with_filter", + "parameters": [ + { + "description": "Collection name", + "in": "path", + "name": "name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/collection_delete" + } + } + }, + "required": true + }, + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/delete_result" + } + } + }, + "description": "Delete results" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "collection" + ] + }, + "get": { + "description": "Get all document IDs", + "operationId": "collection_keys", + "parameters": [ + { + "description": "Collection name", + "in": "path", + "name": "name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "$ref": "#/components/responses/success" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "collection" + ] + }, + "patch": { + "description": "Update document(s) in a collection", + "operationId": "collection_update_with_filter", + "parameters": [ + { + "description": "Collection name", + "in": "path", + "name": "name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/collection_update" + } + } + }, + "required": true + }, + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/update_result" + } + } + }, + "description": "Update results" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "collection" + ] + }, + "post": { + "description": "Create document(s) in a collection", + "operationId": "collection_create", + "parameters": [ + { + "description": "Collection name", + "in": "path", + "name": "name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/document" + }, + { + "items": { + "$ref": "#/components/schemas/document" + }, + "type": "array" + } + ] + } + } + }, + "required": true + }, + "responses": { + "200": { + "$ref": "#/components/responses/success" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "collection" + ] + } + }, + "/collections/{name}/indexes": { + "get": { + "description": "List secondary indexes", + "operationId": "index_list", + "parameters": [ + { + "description": "Collection name", + "in": "path", + "name": "name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "items": { + "$ref": "#/components/schemas/index" + }, + "type": "array" + } + } + }, + "description": "List of indexes" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "index" + ] + }, + "post": { + "description": "Create a secondary index", + "operationId": "index_create", + "parameters": [ + { + "description": "Collection name", + "in": "path", + "name": "name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/index" + } + } + }, + "required": true + }, + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/index" + } + } + }, + "description": "Index description" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "index" + ] + } + }, + "/collections/{name}/indexes/{index}": { + "delete": { + "description": "Delete a secondary index", + "operationId": "index_drop", + "parameters": [ + { + "description": "Collection name", + "in": "path", + "name": "name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "index", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "$ref": "#/components/responses/success" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "index" + ] + } + }, + "/collections/{name}/{docID}": { + "delete": { + "description": "Delete a document by docID", + "operationId": "collection_delete", + "parameters": [ + { + "description": "Collection name", + "in": "path", + "name": "name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "docID", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "$ref": "#/components/responses/success" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "collection" + ] + }, + "get": { + "description": "Get a document by docID", + "operationId": "collection_get", + "parameters": [ + { + "description": "Collection name", + "in": "path", + "name": "name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "docID", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/document" + } + } + }, + "description": "Document value" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "collection" + ] + }, + "patch": { + "description": "Update a document by docID", + "operationId": "collection_update", + "parameters": [ + { + "description": "Collection name", + "in": "path", + "name": "name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "docID", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "$ref": "#/components/responses/success" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "collection" + ] + } + }, + "/debug/dump": { + "get": { + "description": "Dump database", + "operationId": "debug_dump", + "responses": { + "200": { + "$ref": "#/components/responses/success" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "debug" + ] + } + }, + "/graphql": { + "get": { + "description": "GraphQL GET endpoint", + "operationId": "graphql_get", + "parameters": [ + { + "in": "query", + "name": "query", + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/graphql_response" + } + } + }, + "description": "GraphQL response" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "graphql" + ] + }, + "post": { + "description": "GraphQL POST endpoint", + "operationId": "graphql_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/graphql_request" + } + } + } + }, + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/graphql_response" + } + } + }, + "description": "GraphQL response" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "graphql" + ] + } + }, + "/lens": { + "post": { + "description": "Add a new lens migration", + "operationId": "lens_set_migration", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/lens_config" + } + } + }, + "required": true + }, + "responses": { + "200": { + "$ref": "#/components/responses/success" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "lens" + ] + } + }, + "/lens/registry": { + "post": { + "description": "Add a new lens migration to registry", + "operationId": "lens_registry_set_migration", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/set_migration_request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "$ref": "#/components/responses/success" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "lens" + ] + } + }, + "/lens/registry/reload": { + "post": { + "description": "Reload lens migrations", + "operationId": "lens_registry_reload", + "responses": { + "200": { + "$ref": "#/components/responses/success" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "lens" + ] + } + }, + "/lens/registry/{version}/down": { + "post": { + "description": "Migrate documents from a collection", + "operationId": "lens_registry_migrate_down", + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/migrate_request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "$ref": "#/components/responses/success" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "lens" + ] + } + }, + "/lens/registry/{version}/up": { + "post": { + "description": "Migrate documents to a collection", + "operationId": "lens_registry_migrate_up", + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/migrate_request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "$ref": "#/components/responses/success" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "lens" + ] + } + }, + "/p2p/collections": { + "delete": { + "description": "Remove peer collections", + "operationId": "peer_collection_remove", + "requestBody": { + "content": { + "application/json": { + "schema": { + "items": { + "type": "string" + }, + "type": "array" + } + } + }, + "required": true + }, + "responses": { + "200": { + "$ref": "#/components/responses/success" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "p2p" + ] + }, + "get": { + "description": "List peer collections", + "operationId": "peer_collection_list", + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "items": { + "type": "string" + }, + "type": "array" + } + } + }, + "description": "Peer collections" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "p2p" + ] + }, + "post": { + "description": "Add peer collections", + "operationId": "peer_collection_add", + "requestBody": { + "content": { + "application/json": { + "schema": { + "items": { + "type": "string" + }, + "type": "array" + } + } + }, + "required": true + }, + "responses": { + "200": { + "$ref": "#/components/responses/success" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "p2p" + ] + } + }, + "/p2p/info": { + "get": { + "operationId": "peer_info", + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/peer_info" + } + } + }, + "description": "Peer network info" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "p2p" + ] + } + }, + "/p2p/replicators": { + "delete": { + "description": "Delete peer replicators", + "operationId": "peer_replicator_delete", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/replicator" + } + } + }, + "required": true + }, + "responses": { + "200": { + "$ref": "#/components/responses/success" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "p2p" + ] + }, + "get": { + "description": "List peer replicators", + "operationId": "peer_replicator_list", + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "items": { + "$ref": "#/components/schemas/replicator" + }, + "type": "array" + } + } + }, + "description": "Replicators" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "p2p" + ] + }, + "post": { + "description": "Add peer replicators", + "operationId": "peer_replicator_set", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/replicator" + } + } + }, + "required": true + }, + "responses": { + "200": { + "$ref": "#/components/responses/success" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "p2p" + ] + } + }, + "/schema": { + "get": { + "description": "Introspect schema(s) by name, schema root, or version id.", + "operationId": "schema_describe", + "parameters": [ + { + "description": "Schema name", + "in": "query", + "name": "name", + "schema": { + "type": "string" + } + }, + { + "description": "Schema root", + "in": "query", + "name": "root", + "schema": { + "type": "string" + } + }, + { + "description": "Schema version id", + "in": "query", + "name": "version_id", + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/schema" + }, + { + "items": { + "$ref": "#/components/schemas/schema" + }, + "type": "array" + } + ] + } + } + }, + "description": "Schema(s) with matching name, schema id, or version id." + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "schema" + ] + }, + "patch": { + "description": "Update a schema definition", + "operationId": "patch_schema", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/patch_schema_request" + } + } + } + }, + "responses": { + "200": { + "$ref": "#/components/responses/success" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "schema" + ] + }, + "post": { + "description": "Add a new schema definition", + "operationId": "add_schema", + "requestBody": { + "content": { + "text/plain": { + "schema": { + "type": "string" + } + } + } + }, + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "items": { + "$ref": "#/components/schemas/collection" + }, + "type": "array" + } + } + }, + "description": "Collection(s)" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "schema" + ] + } + }, + "/schema/default": { + "post": { + "description": "Set the default schema version for a collection", + "operationId": "set_default_schema_version", + "requestBody": { + "content": { + "text/plain": { + "schema": { + "type": "string" + } + } + } + }, + "responses": { + "200": { + "$ref": "#/components/responses/success" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "schema" + ] + } + }, + "/tx": { + "post": { + "description": "Create a new transaction", + "operationId": "new_transaction", + "parameters": [ + { + "description": "Read only transaction", + "in": "query", + "name": "read_only", + "schema": { + "default": false, + "type": "boolean" + } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/create_tx" + } + } + }, + "description": "Transaction info" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "transaction" + ] + } + }, + "/tx/concurrent": { + "post": { + "description": "Create a new concurrent transaction", + "operationId": "new_concurrent_transaction", + "parameters": [ + { + "description": "Read only transaction", + "in": "query", + "name": "read_only", + "schema": { + "default": false, + "type": "boolean" + } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/create_tx" + } + } + }, + "description": "Transaction info" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "transaction" + ] + } + }, + "/tx/{id}": { + "delete": { + "description": "Discard a transaction", + "operationId": "transaction_discard", + "parameters": [ + { + "in": "path", + "name": "id", + "required": true, + "schema": { + "format": "int64", + "type": "integer" + } + } + ], + "responses": { + "200": { + "$ref": "#/components/responses/success" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "transaction" + ] + }, + "post": { + "description": "Commit a transaction", + "operationId": "transaction_commit", + "parameters": [ + { + "in": "path", + "name": "id", + "required": true, + "schema": { + "format": "int64", + "type": "integer" + } + } + ], + "responses": { + "200": { + "$ref": "#/components/responses/success" + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "transaction" + ] + } + }, + "/view": { + "post": { + "description": "Manage database views.", + "operationId": "view", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/add_view_request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/collection_definition" + }, + { + "items": { + "$ref": "#/components/schemas/collection_definition" + }, + "type": "array" + } + ] + } + } + }, + "description": "The created collection and embedded schemas for the added view." + }, + "400": { + "$ref": "#/components/responses/error" + }, + "default": { + "description": "" + } + }, + "tags": [ + "view" + ] + } + } + }, + "servers": [ + { + "description": "Local DefraDB instance", + "url": "http://localhost:9181/api/v0" + } + ], + "tags": [ + { + "description": "Add or update schema definitions", + "name": "schema" + }, + { + "description": "Add, remove, or update documents", + "name": "collection" + }, + { + "description": "Add views", + "name": "view" + }, + { + "description": "Add, update, or remove indexes", + "name": "index" + }, + { + "description": "Migrate documents to and from schema versions", + "name": "lens" + }, + { + "description": "Peer-to-peer network operations", + "name": "p2p" + }, + { + "description": "Access control policy operations", + "name": "acp" + }, + { + "description": "Database transaction operations", + "name": "transaction" + }, + { + "description": "Database backup operations", + "name": "backup" + }, + { + "description": "GraphQL query endpoints", + "name": "graphql" + }, + { + "externalDocs": { + "description": "EIP-3668", + "url": "https://eips.ethereum.org/EIPS/eip-3668" + }, + "name": "ccip" + } + ] +} From 25fc07da90988194ca3d52b41c00a8ea0da2e0eb Mon Sep 17 00:00:00 2001 From: Shahzad Lone Date: Tue, 4 Jun 2024 19:49:53 -0400 Subject: [PATCH 33/78] ci(i): Add workflow to detect out of date mocks (#2679) ## Relevant issue(s) Resolves #1616 ## Description - Add github action that will fail if there are any out of date mocks - Proposed a year ago here: https://github.com/sourcenetwork/defradb/pull/1612#pullrequestreview-1516813391 ## How has this been tested? - using `act` tool - manually through introducing a mock change in this commit: [`4b20f8f` (#2679)](https://github.com/sourcenetwork/defradb/pull/2679/commits/4b20f8f992866b58839c2f23c4c2dc1ac3d7f5d7) and then seeing the action fail here: https://github.com/sourcenetwork/defradb/actions/runs/9361512099/job/25768647150?pr=2679 - the last commit reverts the commit that was introduce the test the mock detection works, I didn't drop the commit and used the revert to have it documented better. Specify the platform(s) on which this was tested: - WSL2 --- .github/workflows/check-mocks.yml | 50 +++++++++++++++++++++++++++++++ Makefile | 12 ++++---- tests/README.md | 2 +- 3 files changed, 57 insertions(+), 7 deletions(-) create mode 100644 .github/workflows/check-mocks.yml diff --git a/.github/workflows/check-mocks.yml b/.github/workflows/check-mocks.yml new file mode 100644 index 0000000000..80d9dbee20 --- /dev/null +++ b/.github/workflows/check-mocks.yml @@ -0,0 +1,50 @@ +# Copyright 2024 Democratized Data Foundation +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +# This workflow checks that all mocks are up to date. +# If the mocks are not up to date then this action will fail. +name: Check Mocks Workflow + +on: + pull_request: + branches: + - master + - develop + + push: + tags: + - 'v[0-9]+.[0-9]+.[0-9]+' + branches: + - master + - develop + +jobs: + check-mocks: + name: Check mocks job + + runs-on: ubuntu-latest + + steps: + - name: Checkout code into the directory + uses: actions/checkout@v3 + + - name: Setup Go environment explicitly + uses: actions/setup-go@v3 + with: + go-version: "1.21" + check-latest: true + + - name: Try generating mocks + run: make mocks + + - name: Check no new changes exist + uses: tj-actions/verify-changed-files@v20 + with: + fail-if-changed: true diff --git a/Makefile b/Makefile index 1f50ab816c..c4c02da3d7 100644 --- a/Makefile +++ b/Makefile @@ -158,8 +158,8 @@ deps\:chglog: deps\:modules: go mod download -.PHONY: deps\:mock -deps\:mock: +.PHONY: deps\:mocks +deps\:mocks: go install github.com/vektra/mockery/v2@v2.32.0 .PHONY: deps\:playground @@ -173,11 +173,11 @@ deps: $(MAKE) deps:chglog && \ $(MAKE) deps:lint && \ $(MAKE) deps:test && \ - $(MAKE) deps:mock + $(MAKE) deps:mocks -.PHONY: mock -mock: - @$(MAKE) deps:mock +.PHONY: mocks +mocks: + @$(MAKE) deps:mocks mockery --config="tools/configs/mockery.yaml" .PHONY: dev\:start diff --git a/tests/README.md b/tests/README.md index a17fbcaf08..2dfe2d17c7 100644 --- a/tests/README.md +++ b/tests/README.md @@ -23,7 +23,7 @@ These tests focus on small, isolated parts of the code to ensure each part is wo For unit tests, we sometimes use mocks. Mocks are automatically generated from Go interfaces using the mockery tool. This helps to isolate the code being tested and provide more focused and reliable tests. -To regenerate the mocks, run `make mock`. +To regenerate the mocks, run `make mocks`. The mocks are typically generated into a separate mocks directory. From dd0e5af48f19ee58724256563f4beb4a888bdd36 Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Wed, 5 Jun 2024 17:51:41 -0400 Subject: [PATCH 34/78] feat: Allow lens runtime selection via config (#2684) ## Relevant issue(s) Resolves #2683 ## Description Allows lens runtime selection via config/cli param. Also adds CI jobs to the matrix to test wasmer and wazero. --- .../workflows/test-and-upload-coverage.yml | 19 ++++ cli/server_dump.go | 2 +- cli/start.go | 1 + client/lens.go | 11 +++ docs/config.md | 9 ++ go.mod | 2 + go.sum | 4 + http/client_lens.go | 2 + http/handler_ccip_test.go | 2 +- internal/db/config.go | 17 ---- internal/db/config_test.go | 13 --- internal/db/db.go | 27 +++--- internal/db/db_test.go | 4 +- internal/lens/registry.go | 25 +----- internal/lens/txn_registry.go | 11 ++- net/node_test.go | 20 ++--- net/peer_test.go | 18 ++-- node/errors.go | 25 ++++++ node/lens.go | 90 +++++++++++++++++++ node/lens_wasmer.go | 24 +++++ node/lens_wasmtime.go | 25 ++++++ node/lens_wazero.go | 24 +++++ node/node.go | 11 ++- tests/clients/cli/wrapper_lens.go | 2 + tests/gen/cli/util_test.go | 2 +- tests/integration/db.go | 3 +- tests/integration/lens.go | 15 ++++ tools/defradb.containerfile | 5 ++ 28 files changed, 315 insertions(+), 98 deletions(-) create mode 100644 node/errors.go create mode 100644 node/lens.go create mode 100644 node/lens_wasmer.go create mode 100644 node/lens_wasmtime.go create mode 100644 node/lens_wazero.go diff --git a/.github/workflows/test-and-upload-coverage.yml b/.github/workflows/test-and-upload-coverage.yml index d613f32f98..f20860bb7c 100644 --- a/.github/workflows/test-and-upload-coverage.yml +++ b/.github/workflows/test-and-upload-coverage.yml @@ -34,6 +34,7 @@ jobs: client-type: [go, http, cli] database-type: [badger-file, badger-memory] mutation-type: [gql, collection-named, collection-save] + lens-type: [wasm-time] detect-changes: [false] database-encryption: [false] include: @@ -41,18 +42,21 @@ jobs: client-type: go database-type: badger-memory mutation-type: collection-save + lens-type: wasm-time detect-changes: true database-encryption: false - os: ubuntu-latest client-type: go database-type: badger-memory mutation-type: collection-save + lens-type: wasm-time detect-changes: false database-encryption: true - os: macos-latest client-type: go database-type: badger-memory mutation-type: collection-save + lens-type: wasm-time detect-changes: false database-encryption: false ## TODO: https://github.com/sourcenetwork/defradb/issues/2080 @@ -63,6 +67,20 @@ jobs: ## mutation-type: collection-save ## detect-changes: false ## database-encryption: false + - os: ubuntu-latest + client-type: go + database-type: badger-memory + mutation-type: collection-save + lens-type: wazero + detect-changes: false + database-encryption: false + - os: ubuntu-latest + client-type: go + database-type: badger-memory + mutation-type: collection-save + lens-type: wasmer + detect-changes: false + database-encryption: false runs-on: ${{ matrix.os }} @@ -80,6 +98,7 @@ jobs: DEFRA_BADGER_FILE: ${{ matrix.database-type == 'badger-file' }} DEFRA_BADGER_ENCRYPTION: ${{ matrix.database-encryption }} DEFRA_MUTATION_TYPE: ${{ matrix.mutation-type }} + DEFRA_LENS_TYPE: ${{ matrix.lens-type }} steps: - name: Checkout code into the directory diff --git a/cli/server_dump.go b/cli/server_dump.go index 1d3c68e54a..9008c81730 100644 --- a/cli/server_dump.go +++ b/cli/server_dump.go @@ -37,7 +37,7 @@ func MakeServerDumpCmd() *cobra.Command { if err != nil { return err } - db, err := db.NewDB(cmd.Context(), rootstore, acp.NoACP) + db, err := db.NewDB(cmd.Context(), rootstore, acp.NoACP, nil) if err != nil { return errors.Wrap("failed to initialize database", err) } diff --git a/cli/start.go b/cli/start.go index 9b863a1f07..9505fd7fff 100644 --- a/cli/start.go +++ b/cli/start.go @@ -126,6 +126,7 @@ func MakeStartCommand() *cobra.Command { http.WithAllowedOrigins(cfg.GetStringSlice("api.allowed-origins")...), http.WithTLSCertPath(cfg.GetString("api.pubKeyPath")), http.WithTLSKeyPath(cfg.GetString("api.privKeyPath")), + node.WithLensRuntime(node.LensRuntimeType(cfg.GetString("lens.runtime"))), } if cfg.GetString("datastore.store") != configStoreMemory { diff --git a/client/lens.go b/client/lens.go index 3f5befc604..997ddb4831 100644 --- a/client/lens.go +++ b/client/lens.go @@ -15,6 +15,8 @@ import ( "github.com/lens-vm/lens/host-go/config/model" "github.com/sourcenetwork/immutable/enumerable" + + "github.com/sourcenetwork/defradb/datastore" ) // LensConfig represents the configuration of a Lens migration in Defra. @@ -38,9 +40,18 @@ type LensConfig struct { model.Lens } +// TxnSource represents an object capable of constructing the transactions that +// implicit-transaction registries need internally. +type TxnSource interface { + NewTxn(context.Context, bool) (datastore.Txn, error) +} + // LensRegistry exposes several useful thread-safe migration related functions which may // be used to manage migrations. type LensRegistry interface { + // Init initializes the registry with the provided transaction source. + Init(TxnSource) + // SetMigration caches the migration for the given collection ID. It does not persist the migration in long // term storage, for that one should call [Store.SetMigration(ctx, cfg)]. // diff --git a/docs/config.md b/docs/config.md index 0981ad9a2e..ca69d6afd2 100644 --- a/docs/config.md +++ b/docs/config.md @@ -111,3 +111,12 @@ Keyring backend to use. Defaults to `file`. - `file` Stores keys in encrypted files - `system` Stores keys in the OS managed keyring + +## `lens.runtime` + +The LensVM wasm runtime to run lens modules in. + +Possible values: +- `wasm-time` (default): https://github.com/bytecodealliance/wasmtime-go +- `wasmer` (windows not supported): https://github.com/wasmerio/wasmer-go +- `wazero`: https://github.com/tetratelabs/wazero diff --git a/go.mod b/go.mod index 5e47eeadf2..f9bac2daa8 100644 --- a/go.mod +++ b/go.mod @@ -282,8 +282,10 @@ require ( github.com/tendermint/go-amino v0.16.0 // indirect github.com/tendermint/tm-db v0.6.7 // indirect github.com/teserakt-io/golang-ed25519 v0.0.0-20210104091850-3888c087a4c8 // indirect + github.com/tetratelabs/wazero v1.5.0 // indirect github.com/textileio/go-log/v2 v2.1.3-gke-2 // indirect github.com/ugorji/go/codec v1.2.12 // indirect + github.com/wasmerio/wasmer-go v1.0.4 // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect diff --git a/go.sum b/go.sum index 64f0b7b56b..3203ff8b9c 100644 --- a/go.sum +++ b/go.sum @@ -1139,6 +1139,8 @@ github.com/tendermint/tm-db v0.6.7 h1:fE00Cbl0jayAoqlExN6oyQJ7fR/ZtoVOmvPJ//+shu github.com/tendermint/tm-db v0.6.7/go.mod h1:byQDzFkZV1syXr/ReXS808NxA2xvyuuVgXOJ/088L6I= github.com/teserakt-io/golang-ed25519 v0.0.0-20210104091850-3888c087a4c8 h1:RBkacARv7qY5laaXGlF4wFB/tk5rnthhPb8oIBGoagY= github.com/teserakt-io/golang-ed25519 v0.0.0-20210104091850-3888c087a4c8/go.mod h1:9PdLyPiZIiW3UopXyRnPYyjUXSpiQNHRLu8fOsR3o8M= +github.com/tetratelabs/wazero v1.5.0 h1:Yz3fZHivfDiZFUXnWMPUoiW7s8tC1sjdBtlJn08qYa0= +github.com/tetratelabs/wazero v1.5.0/go.mod h1:0U0G41+ochRKoPKCJlh0jMg1CHkyfK8kDqiirMmKY8A= github.com/textileio/go-datastore-extensions v1.0.1 h1:qIJGqJaigQ1wD4TdwS/hf73u0HChhXvvUSJuxBEKS+c= github.com/textileio/go-datastore-extensions v1.0.1/go.mod h1:Pzj9FDRkb55910dr/FX8M7WywvnS26gBgEDez1ZBuLE= github.com/textileio/go-ds-badger3 v0.1.0 h1:q0kBuBmAcRUR3ClMSYlyw0224XeuzjjGinU53Qz1uXI= @@ -1168,6 +1170,8 @@ github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsX github.com/warpfork/go-testmark v0.12.1/go.mod h1:kHwy7wfvGSPh1rQJYKayD4AbtNaeyZdcGi9tNJTaa5Y= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/wasmerio/wasmer-go v1.0.4 h1:MnqHoOGfiQ8MMq2RF6wyCeebKOe84G88h5yv+vmxJgs= +github.com/wasmerio/wasmer-go v1.0.4/go.mod h1:0gzVdSfg6pysA6QVp6iVRPTagC6Wq9pOE8J86WKb2Fk= github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc h1:BCPnHtcboadS0DvysUuJXZ4lWVv5Bh5i7+tbIyi+ck4= github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= diff --git a/http/client_lens.go b/http/client_lens.go index 34945a41d6..249eb99984 100644 --- a/http/client_lens.go +++ b/http/client_lens.go @@ -35,6 +35,8 @@ type setMigrationRequest struct { Config model.Lens } +func (w *LensRegistry) Init(txnSource client.TxnSource) {} + func (c *LensRegistry) SetMigration(ctx context.Context, collectionID uint32, config model.Lens) error { methodURL := c.http.baseURL.JoinPath("lens", "registry") diff --git a/http/handler_ccip_test.go b/http/handler_ccip_test.go index ab8381565a..e17d8a882a 100644 --- a/http/handler_ccip_test.go +++ b/http/handler_ccip_test.go @@ -193,7 +193,7 @@ func TestCCIPPost_WithInvalidBody(t *testing.T) { func setupDatabase(t *testing.T) client.DB { ctx := context.Background() - cdb, err := db.NewDB(ctx, memory.NewDatastore(ctx), acp.NoACP, db.WithUpdateEvents()) + cdb, err := db.NewDB(ctx, memory.NewDatastore(ctx), acp.NoACP, nil, db.WithUpdateEvents()) require.NoError(t, err) _, err = cdb.AddSchema(ctx, `type User { diff --git a/internal/db/config.go b/internal/db/config.go index a655647df7..1364cab09b 100644 --- a/internal/db/config.go +++ b/internal/db/config.go @@ -11,7 +11,6 @@ package db import ( - "github.com/lens-vm/lens/host-go/engine/module" "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/events" @@ -40,19 +39,3 @@ func WithMaxRetries(num int) Option { db.maxTxnRetries = immutable.Some(num) } } - -// WithLensPoolSize sets the maximum number of cached migrations instances to preserve per schema version. -// -// Will default to `5` if not set. -func WithLensPoolSize(size int) Option { - return func(db *db) { - db.lensPoolSize = immutable.Some(size) - } -} - -// WithLensRuntime returns an option that sets the lens registry runtime. -func WithLensRuntime(runtime module.Runtime) Option { - return func(db *db) { - db.lensRuntime = immutable.Some(runtime) - } -} diff --git a/internal/db/config_test.go b/internal/db/config_test.go index d4dbadaec6..f80e538b4f 100644 --- a/internal/db/config_test.go +++ b/internal/db/config_test.go @@ -13,7 +13,6 @@ package db import ( "testing" - "github.com/lens-vm/lens/host-go/runtimes/wasmtime" "github.com/stretchr/testify/assert" ) @@ -29,15 +28,3 @@ func TestWithMaxRetries(t *testing.T) { assert.True(t, d.maxTxnRetries.HasValue()) assert.Equal(t, 10, d.maxTxnRetries.Value()) } - -func TestWithLensPoolSize(t *testing.T) { - d := &db{} - WithLensPoolSize(10)(d) - assert.Equal(t, 10, d.lensPoolSize.Value()) -} - -func TestWithLensRuntime(t *testing.T) { - d := &db{} - WithLensRuntime(wasmtime.New())(d) - assert.NotNil(t, d.lensRuntime.Value()) -} diff --git a/internal/db/db.go b/internal/db/db.go index 4379928c82..979626034c 100644 --- a/internal/db/db.go +++ b/internal/db/db.go @@ -21,7 +21,6 @@ import ( ds "github.com/ipfs/go-datastore" dsq "github.com/ipfs/go-datastore/query" - "github.com/lens-vm/lens/host-go/engine/module" "github.com/sourcenetwork/corelog" "github.com/sourcenetwork/immutable" @@ -32,7 +31,6 @@ import ( "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/events" "github.com/sourcenetwork/defradb/internal/core" - "github.com/sourcenetwork/defradb/internal/lens" "github.com/sourcenetwork/defradb/internal/request/graphql" ) @@ -57,10 +55,6 @@ type db struct { parser core.Parser - // The maximum number of cached migrations instances to preserve per schema version. - lensPoolSize immutable.Option[int] - lensRuntime immutable.Option[module.Runtime] - lensRegistry client.LensRegistry // The maximum number of retries per transaction. @@ -81,15 +75,17 @@ func NewDB( ctx context.Context, rootstore datastore.RootStore, acp immutable.Option[acp.ACP], + lens client.LensRegistry, options ...Option, ) (client.DB, error) { - return newDB(ctx, rootstore, acp, options...) + return newDB(ctx, rootstore, acp, lens, options...) } func newDB( ctx context.Context, rootstore datastore.RootStore, acp immutable.Option[acp.ACP], + lens client.LensRegistry, options ...Option, ) (*db, error) { multistore := datastore.MultiStoreFrom(rootstore) @@ -100,11 +96,12 @@ func newDB( } db := &db{ - rootstore: rootstore, - multistore: multistore, - acp: acp, - parser: parser, - options: options, + rootstore: rootstore, + multistore: multistore, + acp: acp, + lensRegistry: lens, + parser: parser, + options: options, } // apply options @@ -112,9 +109,9 @@ func newDB( opt(db) } - // lens options may be set by `WithLens` funcs, and because they are funcs on db - // we have to mutate `db` here to set the registry. - db.lensRegistry = lens.NewRegistry(db, db.lensPoolSize, db.lensRuntime) + if lens != nil { + lens.Init(db) + } err = db.initialize(ctx) if err != nil { diff --git a/internal/db/db_test.go b/internal/db/db_test.go index fe60449cc2..6f5a03e809 100644 --- a/internal/db/db_test.go +++ b/internal/db/db_test.go @@ -26,7 +26,7 @@ func newMemoryDB(ctx context.Context) (*db, error) { if err != nil { return nil, err } - return newDB(ctx, rootstore, acp.NoACP) + return newDB(ctx, rootstore, acp.NoACP, nil) } func TestNewDB(t *testing.T) { @@ -38,7 +38,7 @@ func TestNewDB(t *testing.T) { return } - _, err = NewDB(ctx, rootstore, acp.NoACP) + _, err = NewDB(ctx, rootstore, acp.NoACP, nil) if err != nil { t.Error(err) } diff --git a/internal/lens/registry.go b/internal/lens/registry.go index 1d9c51ab46..c0fc87a14f 100644 --- a/internal/lens/registry.go +++ b/internal/lens/registry.go @@ -17,8 +17,6 @@ import ( "github.com/lens-vm/lens/host-go/config" "github.com/lens-vm/lens/host-go/config/model" "github.com/lens-vm/lens/host-go/engine/module" - "github.com/lens-vm/lens/host-go/runtimes/wasmtime" - "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/immutable/enumerable" "github.com/sourcenetwork/defradb/client" @@ -72,12 +70,6 @@ func newTxnCtx(txn datastore.Txn) *txnContext { } } -// TxnSource represents an object capable of constructing the transactions that -// implicit-transaction registries need internally. -type TxnSource interface { - NewTxn(context.Context, bool) (datastore.Txn, error) -} - // DefaultPoolSize is the default size of the lens pool for each schema version. const DefaultPoolSize int = 5 @@ -85,28 +77,19 @@ const DefaultPoolSize int = 5 // // It will be of size 5 (per schema version) if a size is not provided. func NewRegistry( - db TxnSource, - poolSize immutable.Option[int], - runtime immutable.Option[module.Runtime], + poolSize int, + runtime module.Runtime, ) client.LensRegistry { registry := &lensRegistry{ - poolSize: DefaultPoolSize, - runtime: wasmtime.New(), + poolSize: poolSize, + runtime: runtime, modulesByPath: map[string]module.Module{}, lensPoolsByCollectionID: map[uint32]*lensPool{}, reversedPoolsByCollectionID: map[uint32]*lensPool{}, txnCtxs: map[uint64]*txnContext{}, } - if poolSize.HasValue() { - registry.poolSize = poolSize.Value() - } - if runtime.HasValue() { - registry.runtime = runtime.Value() - } - return &implicitTxnLensRegistry{ - db: db, registry: registry, } } diff --git a/internal/lens/txn_registry.go b/internal/lens/txn_registry.go index 8093dedbdd..65ad12cf2b 100644 --- a/internal/lens/txn_registry.go +++ b/internal/lens/txn_registry.go @@ -22,7 +22,7 @@ import ( type implicitTxnLensRegistry struct { registry *lensRegistry - db TxnSource + db client.TxnSource } type explicitTxnLensRegistry struct { @@ -33,13 +33,12 @@ type explicitTxnLensRegistry struct { var _ client.LensRegistry = (*implicitTxnLensRegistry)(nil) var _ client.LensRegistry = (*explicitTxnLensRegistry)(nil) -func (r *implicitTxnLensRegistry) WithTxn(txn datastore.Txn) client.LensRegistry { - return &explicitTxnLensRegistry{ - registry: r.registry, - txn: txn, - } +func (r *implicitTxnLensRegistry) Init(txnSource client.TxnSource) { + r.db = txnSource } +func (r *explicitTxnLensRegistry) Init(txnSource client.TxnSource) {} + func (r *explicitTxnLensRegistry) WithTxn(txn datastore.Txn) client.LensRegistry { return &explicitTxnLensRegistry{ registry: r.registry, diff --git a/net/node_test.go b/net/node_test.go index 5e0b30570f..55b0573474 100644 --- a/net/node_test.go +++ b/net/node_test.go @@ -36,7 +36,7 @@ func FixtureNewMemoryDBWithBroadcaster(t *testing.T) client.DB { opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} rootstore, err := badgerds.NewDatastore("", &opts) require.NoError(t, err) - database, err = db.NewDB(ctx, rootstore, acp.NoACP, db.WithUpdateEvents()) + database, err = db.NewDB(ctx, rootstore, acp.NoACP, nil, db.WithUpdateEvents()) require.NoError(t, err) return database } @@ -44,7 +44,7 @@ func FixtureNewMemoryDBWithBroadcaster(t *testing.T) client.DB { func TestNewNode_WithEnableRelay_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) require.NoError(t, err) n, err := NewNode( context.Background(), @@ -59,7 +59,7 @@ func TestNewNode_WithDBClosed_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) require.NoError(t, err) db.Close() @@ -73,7 +73,7 @@ func TestNewNode_WithDBClosed_NoError(t *testing.T) { func TestNewNode_NoPubSub_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) require.NoError(t, err) n, err := NewNode( context.Background(), @@ -88,7 +88,7 @@ func TestNewNode_NoPubSub_NoError(t *testing.T) { func TestNewNode_WithEnablePubSub_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) require.NoError(t, err) n, err := NewNode( @@ -106,7 +106,7 @@ func TestNewNode_WithEnablePubSub_NoError(t *testing.T) { func TestNodeClose_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) require.NoError(t, err) n, err := NewNode( context.Background(), @@ -119,7 +119,7 @@ func TestNodeClose_NoError(t *testing.T) { func TestNewNode_BootstrapWithNoPeer_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) require.NoError(t, err) n1, err := NewNode( @@ -135,7 +135,7 @@ func TestNewNode_BootstrapWithNoPeer_NoError(t *testing.T) { func TestNewNode_BootstrapWithOnePeer_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) require.NoError(t, err) n1, err := NewNode( @@ -162,7 +162,7 @@ func TestNewNode_BootstrapWithOnePeer_NoError(t *testing.T) { func TestNewNode_BootstrapWithOneValidPeerAndManyInvalidPeers_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) require.NoError(t, err) n1, err := NewNode( @@ -192,7 +192,7 @@ func TestNewNode_BootstrapWithOneValidPeerAndManyInvalidPeers_NoError(t *testing func TestListenAddrs_WithListenAddresses_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) require.NoError(t, err) n, err := NewNode( context.Background(), diff --git a/net/peer_test.go b/net/peer_test.go index e708ff0708..dca864a1e3 100644 --- a/net/peer_test.go +++ b/net/peer_test.go @@ -75,7 +75,7 @@ func newTestNode(ctx context.Context, t *testing.T) (client.DB, *Node) { store := memory.NewDatastore(ctx) acpLocal := acp.NewLocalACP() acpLocal.Init(context.Background(), "") - db, err := db.NewDB(ctx, store, immutable.Some[acp.ACP](acpLocal), db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, immutable.Some[acp.ACP](acpLocal), nil, db.WithUpdateEvents()) require.NoError(t, err) n, err := NewNode( @@ -91,7 +91,7 @@ func newTestNode(ctx context.Context, t *testing.T) (client.DB, *Node) { func TestNewPeer_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) require.NoError(t, err) h, err := libp2p.New() @@ -114,7 +114,7 @@ func TestNewPeer_NoDB_NilDBError(t *testing.T) { func TestNewPeer_WithExistingTopic_TopicAlreadyExistsError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) require.NoError(t, err) _, err = db.AddSchema(ctx, `type User { @@ -164,11 +164,11 @@ func TestStartAndClose_NoError(t *testing.T) { func TestStart_WithKnownPeer_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db1, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) + db1, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) require.NoError(t, err) store2 := memory.NewDatastore(ctx) - db2, err := db.NewDB(ctx, store2, acp.NoACP, db.WithUpdateEvents()) + db2, err := db.NewDB(ctx, store2, acp.NoACP, nil, db.WithUpdateEvents()) require.NoError(t, err) n1, err := NewNode( @@ -200,11 +200,11 @@ func TestStart_WithKnownPeer_NoError(t *testing.T) { func TestStart_WithOfflineKnownPeer_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db1, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) + db1, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) require.NoError(t, err) store2 := memory.NewDatastore(ctx) - db2, err := db.NewDB(ctx, store2, acp.NoACP, db.WithUpdateEvents()) + db2, err := db.NewDB(ctx, store2, acp.NoACP, nil, db.WithUpdateEvents()) require.NoError(t, err) n1, err := NewNode( @@ -240,7 +240,7 @@ func TestStart_WithOfflineKnownPeer_NoError(t *testing.T) { func TestStart_WithNoUpdateChannel_NilUpdateChannelError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP) + db, err := db.NewDB(ctx, store, acp.NoACP, nil) require.NoError(t, err) n, err := NewNode( @@ -259,7 +259,7 @@ func TestStart_WithNoUpdateChannel_NilUpdateChannelError(t *testing.T) { func TestStart_WitClosedUpdateChannel_ClosedChannelError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) require.NoError(t, err) n, err := NewNode( diff --git a/node/errors.go b/node/errors.go new file mode 100644 index 0000000000..d19b53359b --- /dev/null +++ b/node/errors.go @@ -0,0 +1,25 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package node + +import ( + "github.com/sourcenetwork/defradb/errors" +) + +const ( + errLensRuntimeNotSupported string = "the selected lens runtime is not supported by this build" +) + +var ErrLensRuntimeNotSupported = errors.New(errLensRuntimeNotSupported) + +func NewErrLensRuntimeNotSupported(lens LensRuntimeType) error { + return errors.New(errLensRuntimeNotSupported, errors.NewKV("Lens", lens)) +} diff --git a/node/lens.go b/node/lens.go new file mode 100644 index 0000000000..aa8bfcc8d0 --- /dev/null +++ b/node/lens.go @@ -0,0 +1,90 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package node + +import ( + "context" + + "github.com/lens-vm/lens/host-go/engine/module" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/internal/lens" +) + +type LensRuntimeType string + +const ( + // The Go-enum default LensRuntimeType. + // + // The actual runtime type that this resolves to depends on the build target. + DefaultLens LensRuntimeType = "" +) + +// runtimeConstructors is a map of [LensRuntimeType]s to lens runtimes. +// +// Is is populated by the `init` functions in the runtime-specific files - this +// allows it's population to be managed by build flags. +var runtimeConstructors = map[LensRuntimeType]func() module.Runtime{} + +// LensOptions contains Lens configuration values. +type LensOptions struct { + lensRuntime LensRuntimeType + + // The maximum number of cached migrations instances to preserve per schema version. + lensPoolSize int +} + +// DefaultACPOptions returns new options with default values. +func DefaultLensOptions() *LensOptions { + return &LensOptions{ + lensPoolSize: lens.DefaultPoolSize, + } +} + +type LenOpt func(*LensOptions) + +// WithLensRuntime returns an option that sets the lens registry runtime. +func WithLensRuntime(runtime LensRuntimeType) Option { + return func(o *LensOptions) { + o.lensRuntime = runtime + } +} + +// WithLensPoolSize sets the maximum number of cached migrations instances to preserve per schema version. +// +// Will default to `5` if not set. +func WithLensPoolSize(size int) Option { + return func(o *LensOptions) { + o.lensPoolSize = size + } +} + +func NewLens( + ctx context.Context, + opts ...LenOpt, +) (client.LensRegistry, error) { + options := DefaultLensOptions() + for _, opt := range opts { + opt(options) + } + + var runtime module.Runtime + if runtimeConstructor, ok := runtimeConstructors[options.lensRuntime]; ok { + runtime = runtimeConstructor() + } else { + return nil, NewErrLensRuntimeNotSupported(options.lensRuntime) + } + + return lens.NewRegistry( + options.lensPoolSize, + runtime, + ), nil +} diff --git a/node/lens_wasmer.go b/node/lens_wasmer.go new file mode 100644 index 0000000000..fd99357ab7 --- /dev/null +++ b/node/lens_wasmer.go @@ -0,0 +1,24 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +//go:build !windows && !js + +package node + +import ( + "github.com/lens-vm/lens/host-go/engine/module" + "github.com/lens-vm/lens/host-go/runtimes/wasmer" +) + +const Wasmer LensRuntimeType = "wasmer" + +func init() { + runtimeConstructors[Wasmer] = func() module.Runtime { return wasmer.New() } +} diff --git a/node/lens_wasmtime.go b/node/lens_wasmtime.go new file mode 100644 index 0000000000..9f0070f3bf --- /dev/null +++ b/node/lens_wasmtime.go @@ -0,0 +1,25 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +//go:build !js + +package node + +import ( + "github.com/lens-vm/lens/host-go/engine/module" + "github.com/lens-vm/lens/host-go/runtimes/wasmtime" +) + +const WasmTime LensRuntimeType = "wasm-time" + +func init() { + runtimeConstructors[DefaultLens] = func() module.Runtime { return wasmtime.New() } + runtimeConstructors[WasmTime] = func() module.Runtime { return wasmtime.New() } +} diff --git a/node/lens_wazero.go b/node/lens_wazero.go new file mode 100644 index 0000000000..40d3f1b056 --- /dev/null +++ b/node/lens_wazero.go @@ -0,0 +1,24 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +//go:build !js + +package node + +import ( + "github.com/lens-vm/lens/host-go/engine/module" + "github.com/lens-vm/lens/host-go/runtimes/wazero" +) + +const Wazero LensRuntimeType = "wazero" + +func init() { + runtimeConstructors[Wazero] = func() module.Runtime { return wazero.New() } +} diff --git a/node/node.go b/node/node.go index bb3163834c..215cf05fc7 100644 --- a/node/node.go +++ b/node/node.go @@ -89,6 +89,7 @@ func NewNode(ctx context.Context, opts ...Option) (*Node, error) { netOpts []net.NodeOpt storeOpts []StoreOpt serverOpts []http.ServerOpt + lensOpts []LenOpt ) options := DefaultOptions() @@ -111,6 +112,9 @@ func NewNode(ctx context.Context, opts ...Option) (*Node, error) { case net.NodeOpt: netOpts = append(netOpts, t) + + case LenOpt: + lensOpts = append(lensOpts, t) } } @@ -124,7 +128,12 @@ func NewNode(ctx context.Context, opts ...Option) (*Node, error) { return nil, err } - db, err := db.NewDB(ctx, rootstore, acp, dbOpts...) + lens, err := NewLens(ctx, lensOpts...) + if err != nil { + return nil, err + } + + db, err := db.NewDB(ctx, rootstore, acp, lens, dbOpts...) if err != nil { return nil, err } diff --git a/tests/clients/cli/wrapper_lens.go b/tests/clients/cli/wrapper_lens.go index a9f3e20bd1..3aac1ae392 100644 --- a/tests/clients/cli/wrapper_lens.go +++ b/tests/clients/cli/wrapper_lens.go @@ -28,6 +28,8 @@ type LensRegistry struct { cmd *cliWrapper } +func (w *LensRegistry) Init(txnSource client.TxnSource) {} + func (w *LensRegistry) SetMigration(ctx context.Context, collectionID uint32, config model.Lens) error { args := []string{"client", "schema", "migration", "set-registry"} diff --git a/tests/gen/cli/util_test.go b/tests/gen/cli/util_test.go index dbfef99524..6f5535e6c6 100644 --- a/tests/gen/cli/util_test.go +++ b/tests/gen/cli/util_test.go @@ -50,7 +50,7 @@ func start(ctx context.Context) (*defraInstance, error) { return nil, errors.Wrap("failed to open datastore", err) } - db, err := db.NewDB(ctx, rootstore, acp.NoACP) + db, err := db.NewDB(ctx, rootstore, acp.NoACP, nil) if err != nil { return nil, errors.Wrap("failed to create a database", err) } diff --git a/tests/integration/db.go b/tests/integration/db.go index db2217a04d..c473e4cdd0 100644 --- a/tests/integration/db.go +++ b/tests/integration/db.go @@ -105,13 +105,14 @@ func NewBadgerFileDB(ctx context.Context, t testing.TB) (client.DB, error) { func setupDatabase(s *state) (client.DB, string, error) { opts := []node.Option{ db.WithUpdateEvents(), - db.WithLensPoolSize(lensPoolSize), + node.WithLensPoolSize(lensPoolSize), // The test framework sets this up elsewhere when required so that it may be wrapped // into a [client.DB]. node.WithDisableAPI(true), // The p2p is configured in the tests by [ConfigureNode] actions, we disable it here // to keep the tests as lightweight as possible. node.WithDisableP2P(true), + node.WithLensRuntime(lensType), } if badgerEncryption && encryptionKey == nil { diff --git a/tests/integration/lens.go b/tests/integration/lens.go index c99cc3f5b7..61ece97d73 100644 --- a/tests/integration/lens.go +++ b/tests/integration/lens.go @@ -11,12 +11,27 @@ package tests import ( + "os" + "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/internal/db" + "github.com/sourcenetwork/defradb/node" +) + +const ( + lensTypeEnvName = "DEFRA_LENS_TYPE" ) +var ( + lensType node.LensRuntimeType +) + +func init() { + lensType = node.LensRuntimeType(os.Getenv(lensTypeEnvName)) +} + // ConfigureMigration is a test action which will configure a Lens migration using the // provided configuration. type ConfigureMigration struct { diff --git a/tools/defradb.containerfile b/tools/defradb.containerfile index 53a849b6c6..7e07bf887d 100644 --- a/tools/defradb.containerfile +++ b/tools/defradb.containerfile @@ -18,11 +18,16 @@ RUN make deps:modules COPY . . COPY --from=PLAYGROUND_BUILD /repo/dist /repo/playground/dist/ ENV BUILD_TAGS=playground +# manually copy libwasmer.so to fix linking issue https://github.com/wasmerio/wasmer-go/issues/281 +RUN export WASMER_ARCH=$(go env GOHOSTARCH | sed "s/arm64/aarch64/") && \ + export WASMER_PATH=$(go env GOMODCACHE)/github.com/wasmerio/wasmer-go@v1.0.4/wasmer/packaged/lib/linux-$WASMER_ARCH/libwasmer.so && \ + cp $WASMER_PATH /lib/libwasmer.so RUN make build # Stage: RUN FROM debian:bookworm-slim COPY --from=BUILD /repo/build/defradb /defradb +COPY --from=BUILD /lib/libwasmer.so /lib/libwasmer.so # Documents which ports are normally used. # To publish the ports: `docker run -p 9181:9181` ... From a7004b25cbe760cf04154f74fed5635b38ba829f Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Wed, 5 Jun 2024 15:33:35 -0700 Subject: [PATCH 35/78] refactor: Replace subscription events publisher (#2686) ## Relevant issue(s) Resolves #2685 ## Description This PR replaces subscription event publishers with a simple go channel. This is a pre-requisite to a follow up events package refactor. ## Tasks - [x] I made sure the code is well commented, particularly hard-to-understand areas. - [x] I made sure the repository-held documentation is changed accordingly. - [x] I made sure the pull request title adheres to the conventional commit style (the subset used in the project can be found in [tools/configs/chglog/config.yml](tools/configs/chglog/config.yml)). - [x] I made sure to discuss its limitations such as threats to validity, vulnerability to mistake and misuse, robustness to invalidation of assumptions, resource requirements, ... ## How has this been tested? `make test` Specify the platform(s) on which this was tested: - MacOS --- cli/request.go | 4 +- client/db.go | 6 +- http/client.go | 28 ++++---- http/handler_ccip.go | 2 +- http/handler_store.go | 4 +- internal/db/request.go | 13 +--- internal/db/subscriptions.go | 127 +++++++++++++++++------------------ tests/clients/cli/wrapper.go | 19 ++---- tests/integration/utils2.go | 8 +-- 9 files changed, 95 insertions(+), 116 deletions(-) diff --git a/cli/request.go b/cli/request.go index 796a5091c0..b6ec8e05ce 100644 --- a/cli/request.go +++ b/cli/request.go @@ -77,12 +77,12 @@ To learn more about the DefraDB GraphQL Query Language, refer to https://docs.so for _, err := range result.GQL.Errors { errors = append(errors, err.Error()) } - if result.Pub == nil { + if result.Subscription == nil { cmd.Print(REQ_RESULTS_HEADER) return writeJSON(cmd, map[string]any{"data": result.GQL.Data, "errors": errors}) } cmd.Print(SUB_RESULTS_HEADER) - for item := range result.Pub.Stream() { + for item := range result.Subscription { writeJSON(cmd, item) //nolint:errcheck } return nil diff --git a/client/db.go b/client/db.go index 6c530dd419..6ab945a815 100644 --- a/client/db.go +++ b/client/db.go @@ -265,9 +265,9 @@ type RequestResult struct { // GQL contains the immediate results of the GQL request. GQL GQLResult - // Pub contains a pointer to an event stream which channels any subscription results - // if the request was a GQL subscription. - Pub *events.Publisher[events.Update] + // Subscription is an optional channel which returns results + // from a subscription request. + Subscription <-chan GQLResult } // CollectionFetchOptions represents a set of options used for fetching collections. diff --git a/http/client.go b/http/client.go index 49982bad2a..9792208214 100644 --- a/http/client.go +++ b/http/client.go @@ -366,7 +366,7 @@ func (c *Client) ExecRequest( return result } if res.Header.Get("Content-Type") == "text/event-stream" { - result.Pub = c.execRequestSubscription(res.Body) + result.Subscription = c.execRequestSubscription(res.Body) return result } // ignore close errors because they have @@ -389,19 +389,17 @@ func (c *Client) ExecRequest( return result } -func (c *Client) execRequestSubscription(r io.ReadCloser) *events.Publisher[events.Update] { - pubCh := events.New[events.Update](0, 0) - pub, err := events.NewPublisher[events.Update](pubCh, 0) - if err != nil { - return nil - } - +func (c *Client) execRequestSubscription(r io.ReadCloser) chan client.GQLResult { + resCh := make(chan client.GQLResult) go func() { eventReader := sse.NewReadCloser(r) - // ignore close errors because the status - // and body of the request are already - // checked and it cannot be handled properly - defer eventReader.Close() //nolint:errcheck + defer func() { + // ignore close errors because the status + // and body of the request are already + // checked and it cannot be handled properly + eventReader.Close() //nolint:errcheck + close(resCh) + }() for { evt, err := eventReader.Next() @@ -412,14 +410,14 @@ func (c *Client) execRequestSubscription(r io.ReadCloser) *events.Publisher[even if err := json.Unmarshal(evt.Data, &response); err != nil { return } - pub.Publish(client.GQLResult{ + resCh <- client.GQLResult{ Errors: response.Errors, Data: response.Data, - }) + } } }() - return pub + return resCh } func (c *Client) PrintDump(ctx context.Context) error { diff --git a/http/handler_ccip.go b/http/handler_ccip.go index 01597377e2..5b9aeb5402 100644 --- a/http/handler_ccip.go +++ b/http/handler_ccip.go @@ -61,7 +61,7 @@ func (c *ccipHandler) ExecCCIP(rw http.ResponseWriter, req *http.Request) { } result := store.ExecRequest(req.Context(), request.Query) - if result.Pub != nil { + if result.Subscription != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{ErrStreamingNotSupported}) return } diff --git a/http/handler_store.go b/http/handler_store.go index 521aa13775..de534a8c1d 100644 --- a/http/handler_store.go +++ b/http/handler_store.go @@ -314,7 +314,7 @@ func (s *storeHandler) ExecRequest(rw http.ResponseWriter, req *http.Request) { result := store.ExecRequest(req.Context(), request.Query) - if result.Pub == nil { + if result.Subscription == nil { responseJSON(rw, http.StatusOK, GraphQLResponse{result.GQL.Data, result.GQL.Errors}) return } @@ -335,7 +335,7 @@ func (s *storeHandler) ExecRequest(rw http.ResponseWriter, req *http.Request) { select { case <-req.Context().Done(): return - case item, open := <-result.Pub.Stream(): + case item, open := <-result.Subscription: if !open { return } diff --git a/internal/db/request.go b/internal/db/request.go index ff60c0835f..e5ba3d5cf5 100644 --- a/internal/db/request.go +++ b/internal/db/request.go @@ -35,27 +35,20 @@ func (db *db) execRequest(ctx context.Context, request string) *client.RequestRe return res } - pub, subRequest, err := db.checkForClientSubscriptions(parsedRequest) + pub, err := db.handleSubscription(ctx, parsedRequest) if err != nil { res.GQL.Errors = []error{err} return res } if pub != nil { - res.Pub = pub - go db.handleSubscription(ctx, pub, subRequest) + res.Subscription = pub return res } txn := mustGetContextTxn(ctx) identity := GetContextIdentity(ctx) - planner := planner.New( - ctx, - identity, - db.acp, - db, - txn, - ) + planner := planner.New(ctx, identity, db.acp, db, txn) results, err := planner.RunRequest(ctx, parsedRequest) if err != nil { diff --git a/internal/db/subscriptions.go b/internal/db/subscriptions.go index b52504467e..a1b0147df4 100644 --- a/internal/db/subscriptions.go +++ b/internal/db/subscriptions.go @@ -19,83 +19,78 @@ import ( "github.com/sourcenetwork/defradb/internal/planner" ) -func (db *db) checkForClientSubscriptions(r *request.Request) ( - *events.Publisher[events.Update], - *request.ObjectSubscription, - error, -) { +// handleSubscription checks for a subscription within the given request and +// starts a new go routine that will return all subscription results on the returned +// channel. If a subscription does not exist on the given request nil will be returned. +func (db *db) handleSubscription(ctx context.Context, r *request.Request) (<-chan client.GQLResult, error) { if len(r.Subscription) == 0 || len(r.Subscription[0].Selections) == 0 { - // This is not a subscription request and we have nothing to do here - return nil, nil, nil + return nil, nil // This is not a subscription request and we have nothing to do here } - if !db.events.Updates.HasValue() { - return nil, nil, ErrSubscriptionsNotAllowed + return nil, ErrSubscriptionsNotAllowed } - - s := r.Subscription[0].Selections[0] - if subRequest, ok := s.(*request.ObjectSubscription); ok { - pub, err := events.NewPublisher(db.events.Updates.Value(), 5) - if err != nil { - return nil, nil, err - } - - return pub, subRequest, nil + selections := r.Subscription[0].Selections[0] + subRequest, ok := selections.(*request.ObjectSubscription) + if !ok { + return nil, client.NewErrUnexpectedType[request.ObjectSubscription]("SubscriptionSelection", selections) + } + // unsubscribing from this publisher will cause a race condition + // https://github.com/sourcenetwork/defradb/issues/2687 + pub, err := events.NewPublisher(db.events.Updates.Value(), 5) + if err != nil { + return nil, err } - return nil, nil, client.NewErrUnexpectedType[request.ObjectSubscription]("SubscriptionSelection", s) -} + resCh := make(chan client.GQLResult) + go func() { + defer close(resCh) -func (db *db) handleSubscription( - ctx context.Context, - pub *events.Publisher[events.Update], - r *request.ObjectSubscription, -) { - for evt := range pub.Event() { - txn, err := db.NewTxn(ctx, false) - if err != nil { - log.ErrorContext(ctx, err.Error()) - continue - } + // listen for events and send to the result channel + for { + var evt events.Update + select { + case <-ctx.Done(): + return // context cancelled + case val, ok := <-pub.Event(): + if !ok { + return // channel closed + } + evt = val + } - ctx := SetContextTxn(ctx, txn) - db.handleEvent(ctx, pub, evt, r) - txn.Discard(ctx) - } -} + txn, err := db.NewTxn(ctx, false) + if err != nil { + log.ErrorContext(ctx, err.Error()) + continue + } -func (db *db) handleEvent( - ctx context.Context, - pub *events.Publisher[events.Update], - evt events.Update, - r *request.ObjectSubscription, -) { - txn := mustGetContextTxn(ctx) - identity := GetContextIdentity(ctx) - p := planner.New( - ctx, - identity, - db.acp, - db, - txn, - ) + ctx := SetContextTxn(ctx, txn) + identity := GetContextIdentity(ctx) - s := r.ToSelect(evt.DocID, evt.Cid.String()) + p := planner.New(ctx, identity, db.acp, db, txn) + s := subRequest.ToSelect(evt.DocID, evt.Cid.String()) - result, err := p.RunSubscriptionRequest(ctx, s) - if err != nil { - pub.Publish(client.GQLResult{ - Errors: []error{err}, - }) - return - } + result, err := p.RunSubscriptionRequest(ctx, s) + if err == nil && len(result) == 0 { + txn.Discard(ctx) + continue // Don't send anything back to the client if the request yields an empty dataset. + } + res := client.GQLResult{ + Data: result, + } + if err != nil { + res.Errors = []error{err} + } - // Don't send anything back to the client if the request yields an empty dataset. - if len(result) == 0 { - return - } + select { + case <-ctx.Done(): + txn.Discard(ctx) + return // context cancelled + case resCh <- res: + txn.Discard(ctx) + } + } + }() - pub.Publish(client.GQLResult{ - Data: result, - }) + return resCh, nil } diff --git a/tests/clients/cli/wrapper.go b/tests/clients/cli/wrapper.go index 18e560b16c..25e4c177bf 100644 --- a/tests/clients/cli/wrapper.go +++ b/tests/clients/cli/wrapper.go @@ -411,7 +411,7 @@ func (w *Wrapper) ExecRequest( return result } if header == cli.SUB_RESULTS_HEADER { - result.Pub = w.execRequestSubscription(buffer) + result.Subscription = w.execRequestSubscription(buffer) return result } data, err := io.ReadAll(buffer) @@ -439,29 +439,24 @@ func (w *Wrapper) ExecRequest( return result } -func (w *Wrapper) execRequestSubscription(r io.Reader) *events.Publisher[events.Update] { - pubCh := events.New[events.Update](0, 0) - pub, err := events.NewPublisher[events.Update](pubCh, 0) - if err != nil { - return nil - } - +func (w *Wrapper) execRequestSubscription(r io.Reader) chan client.GQLResult { + resCh := make(chan client.GQLResult) go func() { dec := json.NewDecoder(r) + defer close(resCh) for { var response http.GraphQLResponse if err := dec.Decode(&response); err != nil { return } - pub.Publish(client.GQLResult{ + resCh <- client.GQLResult{ Errors: response.Errors, Data: response.Data, - }) + } } }() - - return pub + return resCh } func (w *Wrapper) NewTxn(ctx context.Context, readOnly bool) (datastore.Txn, error) { diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index 00c47fcfc2..041b553548 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -1718,13 +1718,11 @@ func executeSubscriptionRequest( allActionsAreDone := false expectedDataRecieved := len(action.Results) == 0 - stream := result.Pub.Stream() for { select { - case s := <-stream: - sResult, _ := s.(client.GQLResult) - sData, _ := sResult.Data.([]map[string]any) - errs = append(errs, sResult.Errors...) + case s := <-result.Subscription: + sData, _ := s.Data.([]map[string]any) + errs = append(errs, s.Errors...) data = append(data, sData...) if len(data) >= len(action.Results) { From 0c134e54816a28121728f66655dee7733f6a63b3 Mon Sep 17 00:00:00 2001 From: Fred Carle Date: Fri, 7 Jun 2024 14:32:12 -0400 Subject: [PATCH 36/78] refactor: DAG sync and move merge outside of net package (#2658) ## Relevant issue(s) Resolves #2624 ## Description This PR simplifies the DAG sync process withing the `net` package and moves the merge functionality to the `db` package. The merge is now initiated via an event channel. Note: I did a search and replace for `SchemaVersionId` to `SchemaVersionID`. It's in its own commit. I've also remove the `tests/integration/net/order` tests as they are now annoying to maintain an will become even more irrelevant when we refactor the WaitForSync functionality of our test framework. Another note: I've reduced the severity of the race condition on my Mac. We had a lot of leaking go routines and what is left of them is WaitForSync methods that sometimes seem to leak and also badger cache and libp2p transport that seem to leak go routines on close but I'm not sure how to handle these last two. --- cli/start.go | 1 + client/collection.go | 15 - client/mocks/collection.go | 130 ------ datastore/memory/memory.go | 12 +- events/dag_sync.go | 33 ++ events/events.go | 3 + go.sum | 2 - http/client_collection.go | 12 - internal/core/crdt/composite.go | 4 +- internal/core/crdt/counter.go | 2 +- internal/core/crdt/lwwreg.go | 2 +- internal/core/key.go | 10 +- internal/db/collection_index.go | 46 +- internal/db/config.go | 11 +- internal/db/config_test.go | 8 +- internal/db/db.go | 11 + internal/db/db_test.go | 6 + internal/db/errors.go | 2 + internal/db/merge.go | 425 ++++++++++++++++++ internal/db/merge_test.go | 294 ++++++++++++ internal/merkle/clock/clock.go | 2 +- net/client_test.go | 9 +- net/dag.go | 161 ------- net/dag_test.go | 216 --------- net/errors.go | 20 +- net/node.go | 105 ++++- net/peer.go | 51 +-- net/process.go | 227 +++------- net/server.go | 180 ++------ net/server_test.go | 19 +- tests/clients/cli/wrapper_collection.go | 12 - tests/integration/db.go | 1 + .../peer/with_create_update_test.go | 0 .../replicator/with_create_test.go | 0 tests/integration/net/order/tcp_test.go | 170 ------- tests/integration/net/order/utils.go | 382 ---------------- .../simple/peer/crdt/pcounter_test.go | 0 .../simple/peer/crdt/pncounter_test.go | 0 .../subscribe/with_add_get_remove_test.go | 0 .../peer/subscribe/with_add_get_test.go | 0 .../peer/subscribe/with_add_remove_test.go | 0 .../simple/peer/subscribe/with_add_test.go | 0 .../simple/peer/subscribe/with_get_test.go | 0 .../simple/peer/with_create_add_field_test.go | 0 .../simple/peer/with_create_test.go | 0 .../simple/peer/with_delete_test.go | 0 .../simple/peer/with_update_add_field_test.go | 0 .../simple/peer/with_update_restart_test.go | 0 .../simple/peer/with_update_test.go | 0 .../peer_replicator/crdt/pcounter_test.go | 0 .../peer_replicator/crdt/pncounter_test.go | 0 .../peer_replicator/with_create_test.go | 0 .../peer_replicator/with_delete_test.go | 0 .../with_update_restart_test.go | 0 .../peer_replicator/with_update_test.go | 0 .../simple/replicator/crdt/pcounter_test.go | 0 .../simple/replicator/crdt/pncounter_test.go | 0 .../replicator/with_create_add_field_test.go | 0 .../replicator/with_create_restart_test.go | 0 .../simple/replicator/with_create_test.go | 0 .../replicator/with_create_update_test.go | 0 .../simple/replicator/with_delete_test.go | 0 .../replicator/with_update_add_field_test.go | 0 .../simple/replicator/with_update_test.go | 0 .../integration/query/commits/simple_test.go | 2 +- .../query/commits/with_field_test.go | 2 +- .../query/latest_commits/with_doc_id_test.go | 2 +- .../query/simple/with_version_test.go | 2 +- .../updates/add/field/create_update_test.go | 18 +- 69 files changed, 1035 insertions(+), 1575 deletions(-) create mode 100644 events/dag_sync.go create mode 100644 internal/db/merge.go create mode 100644 internal/db/merge_test.go delete mode 100644 net/dag.go delete mode 100644 net/dag_test.go rename tests/integration/net/{state => }/one_to_many/peer/with_create_update_test.go (100%) rename tests/integration/net/{state => }/one_to_many/replicator/with_create_test.go (100%) delete mode 100644 tests/integration/net/order/tcp_test.go delete mode 100644 tests/integration/net/order/utils.go rename tests/integration/net/{state => }/simple/peer/crdt/pcounter_test.go (100%) rename tests/integration/net/{state => }/simple/peer/crdt/pncounter_test.go (100%) rename tests/integration/net/{state => }/simple/peer/subscribe/with_add_get_remove_test.go (100%) rename tests/integration/net/{state => }/simple/peer/subscribe/with_add_get_test.go (100%) rename tests/integration/net/{state => }/simple/peer/subscribe/with_add_remove_test.go (100%) rename tests/integration/net/{state => }/simple/peer/subscribe/with_add_test.go (100%) rename tests/integration/net/{state => }/simple/peer/subscribe/with_get_test.go (100%) rename tests/integration/net/{state => }/simple/peer/with_create_add_field_test.go (100%) rename tests/integration/net/{state => }/simple/peer/with_create_test.go (100%) rename tests/integration/net/{state => }/simple/peer/with_delete_test.go (100%) rename tests/integration/net/{state => }/simple/peer/with_update_add_field_test.go (100%) rename tests/integration/net/{state => }/simple/peer/with_update_restart_test.go (100%) rename tests/integration/net/{state => }/simple/peer/with_update_test.go (100%) rename tests/integration/net/{state => }/simple/peer_replicator/crdt/pcounter_test.go (100%) rename tests/integration/net/{state => }/simple/peer_replicator/crdt/pncounter_test.go (100%) rename tests/integration/net/{state => }/simple/peer_replicator/with_create_test.go (100%) rename tests/integration/net/{state => }/simple/peer_replicator/with_delete_test.go (100%) rename tests/integration/net/{state => }/simple/peer_replicator/with_update_restart_test.go (100%) rename tests/integration/net/{state => }/simple/peer_replicator/with_update_test.go (100%) rename tests/integration/net/{state => }/simple/replicator/crdt/pcounter_test.go (100%) rename tests/integration/net/{state => }/simple/replicator/crdt/pncounter_test.go (100%) rename tests/integration/net/{state => }/simple/replicator/with_create_add_field_test.go (100%) rename tests/integration/net/{state => }/simple/replicator/with_create_restart_test.go (100%) rename tests/integration/net/{state => }/simple/replicator/with_create_test.go (100%) rename tests/integration/net/{state => }/simple/replicator/with_create_update_test.go (100%) rename tests/integration/net/{state => }/simple/replicator/with_delete_test.go (100%) rename tests/integration/net/{state => }/simple/replicator/with_update_add_field_test.go (100%) rename tests/integration/net/{state => }/simple/replicator/with_update_test.go (100%) diff --git a/cli/start.go b/cli/start.go index 9505fd7fff..4ae60b2bb0 100644 --- a/cli/start.go +++ b/cli/start.go @@ -116,6 +116,7 @@ func MakeStartCommand() *cobra.Command { node.WithPeers(peers...), // db options db.WithUpdateEvents(), + db.WithDAGMergeEvents(), db.WithMaxRetries(cfg.GetInt("datastore.MaxTxnRetries")), // net node options net.WithListenAddresses(cfg.GetStringSlice("net.p2pAddresses")...), diff --git a/client/collection.go b/client/collection.go index 38c309a0e8..b557e2e335 100644 --- a/client/collection.go +++ b/client/collection.go @@ -122,21 +122,6 @@ type Collection interface { // GetIndexes returns all the indexes that exist on the collection. GetIndexes(ctx context.Context) ([]IndexDescription, error) - - // CreateDocIndex creates an index for the given document. - // WARNING: This method is only for internal use and is not supposed to be called by the client - // as it might compromise the integrity of the database. This method will be removed in the future - CreateDocIndex(context.Context, *Document) error - - // UpdateDocIndex updates the index for the given document. - // WARNING: This method is only for internal use and is not supposed to be called by the client - // as it might compromise the integrity of the database. This method will be removed in the future - UpdateDocIndex(ctx context.Context, oldDoc, newDoc *Document) error - - // DeleteDocIndex deletes the index for the given document. - // WARNING: This method is only for internal use and is not supposed to be called by the client - // as it might compromise the integrity of the database. This method will be removed in the future - DeleteDocIndex(context.Context, *Document) error } // DocIDResult wraps the result of an attempt at a DocID retrieval operation. diff --git a/client/mocks/collection.go b/client/mocks/collection.go index 7c227edd2b..9e1cf9b654 100644 --- a/client/mocks/collection.go +++ b/client/mocks/collection.go @@ -68,49 +68,6 @@ func (_c *Collection_Create_Call) RunAndReturn(run func(context.Context, *client return _c } -// CreateDocIndex provides a mock function with given fields: _a0, _a1 -func (_m *Collection) CreateDocIndex(_a0 context.Context, _a1 *client.Document) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *client.Document) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Collection_CreateDocIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateDocIndex' -type Collection_CreateDocIndex_Call struct { - *mock.Call -} - -// CreateDocIndex is a helper method to define mock.On call -// - _a0 context.Context -// - _a1 *client.Document -func (_e *Collection_Expecter) CreateDocIndex(_a0 interface{}, _a1 interface{}) *Collection_CreateDocIndex_Call { - return &Collection_CreateDocIndex_Call{Call: _e.mock.On("CreateDocIndex", _a0, _a1)} -} - -func (_c *Collection_CreateDocIndex_Call) Run(run func(_a0 context.Context, _a1 *client.Document)) *Collection_CreateDocIndex_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*client.Document)) - }) - return _c -} - -func (_c *Collection_CreateDocIndex_Call) Return(_a0 error) *Collection_CreateDocIndex_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *Collection_CreateDocIndex_Call) RunAndReturn(run func(context.Context, *client.Document) error) *Collection_CreateDocIndex_Call { - _c.Call.Return(run) - return _c -} - // CreateIndex provides a mock function with given fields: _a0, _a1 func (_m *Collection) CreateIndex(_a0 context.Context, _a1 client.IndexDescription) (client.IndexDescription, error) { ret := _m.Called(_a0, _a1) @@ -301,49 +258,6 @@ func (_c *Collection_Delete_Call) RunAndReturn(run func(context.Context, client. return _c } -// DeleteDocIndex provides a mock function with given fields: _a0, _a1 -func (_m *Collection) DeleteDocIndex(_a0 context.Context, _a1 *client.Document) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *client.Document) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Collection_DeleteDocIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteDocIndex' -type Collection_DeleteDocIndex_Call struct { - *mock.Call -} - -// DeleteDocIndex is a helper method to define mock.On call -// - _a0 context.Context -// - _a1 *client.Document -func (_e *Collection_Expecter) DeleteDocIndex(_a0 interface{}, _a1 interface{}) *Collection_DeleteDocIndex_Call { - return &Collection_DeleteDocIndex_Call{Call: _e.mock.On("DeleteDocIndex", _a0, _a1)} -} - -func (_c *Collection_DeleteDocIndex_Call) Run(run func(_a0 context.Context, _a1 *client.Document)) *Collection_DeleteDocIndex_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*client.Document)) - }) - return _c -} - -func (_c *Collection_DeleteDocIndex_Call) Return(_a0 error) *Collection_DeleteDocIndex_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *Collection_DeleteDocIndex_Call) RunAndReturn(run func(context.Context, *client.Document) error) *Collection_DeleteDocIndex_Call { - _c.Call.Return(run) - return _c -} - // DeleteWithFilter provides a mock function with given fields: ctx, filter func (_m *Collection) DeleteWithFilter(ctx context.Context, filter interface{}) (*client.DeleteResult, error) { ret := _m.Called(ctx, filter) @@ -950,50 +864,6 @@ func (_c *Collection_Update_Call) RunAndReturn(run func(context.Context, *client return _c } -// UpdateDocIndex provides a mock function with given fields: ctx, oldDoc, newDoc -func (_m *Collection) UpdateDocIndex(ctx context.Context, oldDoc *client.Document, newDoc *client.Document) error { - ret := _m.Called(ctx, oldDoc, newDoc) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *client.Document, *client.Document) error); ok { - r0 = rf(ctx, oldDoc, newDoc) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Collection_UpdateDocIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateDocIndex' -type Collection_UpdateDocIndex_Call struct { - *mock.Call -} - -// UpdateDocIndex is a helper method to define mock.On call -// - ctx context.Context -// - oldDoc *client.Document -// - newDoc *client.Document -func (_e *Collection_Expecter) UpdateDocIndex(ctx interface{}, oldDoc interface{}, newDoc interface{}) *Collection_UpdateDocIndex_Call { - return &Collection_UpdateDocIndex_Call{Call: _e.mock.On("UpdateDocIndex", ctx, oldDoc, newDoc)} -} - -func (_c *Collection_UpdateDocIndex_Call) Run(run func(ctx context.Context, oldDoc *client.Document, newDoc *client.Document)) *Collection_UpdateDocIndex_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*client.Document), args[2].(*client.Document)) - }) - return _c -} - -func (_c *Collection_UpdateDocIndex_Call) Return(_a0 error) *Collection_UpdateDocIndex_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *Collection_UpdateDocIndex_Call) RunAndReturn(run func(context.Context, *client.Document, *client.Document) error) *Collection_UpdateDocIndex_Call { - _c.Call.Return(run) - return _c -} - // UpdateWithFilter provides a mock function with given fields: ctx, filter, updater func (_m *Collection) UpdateWithFilter(ctx context.Context, filter interface{}, updater string) (*client.UpdateResult, error) { ret := _m.Called(ctx, filter, updater) diff --git a/datastore/memory/memory.go b/datastore/memory/memory.go index e650776623..8a17d79603 100644 --- a/datastore/memory/memory.go +++ b/datastore/memory/memory.go @@ -347,10 +347,14 @@ func (d *Datastore) executePurge(ctx context.Context) { } func (d *Datastore) handleContextDone(ctx context.Context) { - <-ctx.Done() - // It is safe to ignore the error since the only error that could occur is if the - // datastore is already closed, in which case the purpose of the `Close` call is already covered. - _ = d.Close() + select { + case <-d.closing: + return + case <-ctx.Done(): + // It is safe to ignore the error since the only error that could occur is if the + // datastore is already closed, in which case the purpose of the `Close` call is already covered. + _ = d.Close() + } } // commit commits the given transaction to the datastore. diff --git a/events/dag_sync.go b/events/dag_sync.go new file mode 100644 index 0000000000..4ab568b7d0 --- /dev/null +++ b/events/dag_sync.go @@ -0,0 +1,33 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package events + +import ( + "sync" + + "github.com/ipfs/go-cid" + + "github.com/sourcenetwork/immutable" +) + +// DAGMergeChannel is the bus onto which dag merge are published. +type DAGMergeChannel = immutable.Option[Channel[DAGMerge]] + +// DAGMerge is a notification that a merge can be performed up to the provided CID. +type DAGMerge struct { + // Cid is the id of the composite commit that formed this update in the DAG. + Cid cid.Cid + // SchemaRoot is the root identifier of the schema that defined the shape of the document that was updated. + SchemaRoot string + // Wg is a wait group that can be used to synchronize the merge, + // allowing the caller to optionnaly block until the merge is complete. + Wg *sync.WaitGroup +} diff --git a/events/events.go b/events/events.go index 4f910ab454..ec29adc24c 100644 --- a/events/events.go +++ b/events/events.go @@ -49,4 +49,7 @@ func New[T any](commandBufferSize int, eventBufferSize int) Channel[T] { type Events struct { // Updates publishes an `Update` for each document written to in the database. Updates UpdateChannel + + // DAGMerges publishes a `DAGMerge` for each completed DAG sync process over P2P. + DAGMerges DAGMergeChannel } diff --git a/go.sum b/go.sum index 3203ff8b9c..e6246a269d 100644 --- a/go.sum +++ b/go.sum @@ -1166,8 +1166,6 @@ github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49u github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/vito/go-sse v1.0.0 h1:e6/iTrrvy8BRrOwJwmQmlndlil+TLdxXvHi55ZDzH6M= github.com/vito/go-sse v1.0.0/go.mod h1:2wkcaQ+jtlZ94Uve8gYZjFpL68luAjssTINA2hpgcZs= -github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsXyEHU0s= -github.com/warpfork/go-testmark v0.12.1/go.mod h1:kHwy7wfvGSPh1rQJYKayD4AbtNaeyZdcGi9tNJTaa5Y= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/wasmerio/wasmer-go v1.0.4 h1:MnqHoOGfiQ8MMq2RF6wyCeebKOe84G88h5yv+vmxJgs= diff --git a/http/client_collection.go b/http/client_collection.go index 59b2cf79b6..ee614c1dba 100644 --- a/http/client_collection.go +++ b/http/client_collection.go @@ -413,15 +413,3 @@ func (c *Collection) GetIndexes(ctx context.Context) ([]client.IndexDescription, } return indexes, nil } - -func (c *Collection) CreateDocIndex(context.Context, *client.Document) error { - return ErrMethodIsNotImplemented -} - -func (c *Collection) UpdateDocIndex(ctx context.Context, oldDoc, newDoc *client.Document) error { - return ErrMethodIsNotImplemented -} - -func (c *Collection) DeleteDocIndex(context.Context, *client.Document) error { - return ErrMethodIsNotImplemented -} diff --git a/internal/core/crdt/composite.go b/internal/core/crdt/composite.go index a6b7299a60..58372cfb49 100644 --- a/internal/core/crdt/composite.go +++ b/internal/core/crdt/composite.go @@ -90,7 +90,7 @@ func (c CompositeDAG) Set(status client.DocumentStatus) *CompositeDAGDelta { return &CompositeDAGDelta{ DocID: []byte(c.key.DocID), FieldName: c.fieldName, - SchemaVersionID: c.schemaVersionKey.SchemaVersionId, + SchemaVersionID: c.schemaVersionKey.SchemaVersionID, Status: status, } } @@ -130,7 +130,7 @@ func (c CompositeDAG) Merge(ctx context.Context, delta core.Delta) error { // been migrated yet locally. schemaVersionId = dagDelta.SchemaVersionID } else { - schemaVersionId = c.schemaVersionKey.SchemaVersionId + schemaVersionId = c.schemaVersionKey.SchemaVersionID } err = c.store.Put(ctx, versionKey.ToDS(), []byte(schemaVersionId)) diff --git a/internal/core/crdt/counter.go b/internal/core/crdt/counter.go index 6c4e002223..c87c7d6da6 100644 --- a/internal/core/crdt/counter.go +++ b/internal/core/crdt/counter.go @@ -141,7 +141,7 @@ func (c Counter[T]) Increment(ctx context.Context, value T) (*CounterDelta[T], e DocID: []byte(c.key.DocID), FieldName: c.fieldName, Data: value, - SchemaVersionID: c.schemaVersionKey.SchemaVersionId, + SchemaVersionID: c.schemaVersionKey.SchemaVersionID, Nonce: nonce, }, nil } diff --git a/internal/core/crdt/lwwreg.go b/internal/core/crdt/lwwreg.go index 0df8187dae..edfff9ca05 100644 --- a/internal/core/crdt/lwwreg.go +++ b/internal/core/crdt/lwwreg.go @@ -97,7 +97,7 @@ func (reg LWWRegister) Set(value []byte) *LWWRegDelta { Data: value, DocID: []byte(reg.key.DocID), FieldName: reg.fieldName, - SchemaVersionID: reg.schemaVersionKey.SchemaVersionId, + SchemaVersionID: reg.schemaVersionKey.SchemaVersionID, } } diff --git a/internal/core/key.go b/internal/core/key.go index 69b19efb6e..d087c43af8 100644 --- a/internal/core/key.go +++ b/internal/core/key.go @@ -136,7 +136,7 @@ var _ Key = (*CollectionNameKey)(nil) // // This key should be removed in https://github.com/sourcenetwork/defradb/issues/1085 type CollectionSchemaVersionKey struct { - SchemaVersionId string + SchemaVersionID string CollectionID uint32 } @@ -296,7 +296,7 @@ func NewCollectionNameKey(name string) CollectionNameKey { func NewCollectionSchemaVersionKey(schemaVersionId string, collectionID uint32) CollectionSchemaVersionKey { return CollectionSchemaVersionKey{ - SchemaVersionId: schemaVersionId, + SchemaVersionID: schemaVersionId, CollectionID: collectionID, } } @@ -309,7 +309,7 @@ func NewCollectionSchemaVersionKeyFromString(key string) (CollectionSchemaVersio } return CollectionSchemaVersionKey{ - SchemaVersionId: elements[len(elements)-2], + SchemaVersionID: elements[len(elements)-2], CollectionID: uint32(colID), }, nil } @@ -591,8 +591,8 @@ func (k CollectionNameKey) ToDS() ds.Key { func (k CollectionSchemaVersionKey) ToString() string { result := COLLECTION_SCHEMA_VERSION - if k.SchemaVersionId != "" { - result = result + "/" + k.SchemaVersionId + if k.SchemaVersionID != "" { + result = result + "/" + k.SchemaVersionID } if k.CollectionID != 0 { diff --git a/internal/db/collection_index.go b/internal/db/collection_index.go index c2f02bf3bf..c606cc45b7 100644 --- a/internal/db/collection_index.go +++ b/internal/db/collection_index.go @@ -109,53 +109,13 @@ func (db *db) fetchCollectionIndexDescriptions( return indexDescriptions, nil } -func (c *collection) CreateDocIndex(ctx context.Context, doc *client.Document) error { - ctx, txn, err := ensureContextTxn(ctx, c.db, false) - if err != nil { - return err - } - defer txn.Discard(ctx) - - err = c.indexNewDoc(ctx, doc) - if err != nil { - return err - } - - return txn.Commit(ctx) -} - -func (c *collection) UpdateDocIndex(ctx context.Context, oldDoc, newDoc *client.Document) error { - ctx, txn, err := ensureContextTxn(ctx, c.db, false) - if err != nil { - return err - } - defer txn.Discard(ctx) - - err = c.deleteIndexedDoc(ctx, oldDoc) - if err != nil { - return err - } - err = c.indexNewDoc(ctx, newDoc) - if err != nil { - return err - } - - return txn.Commit(ctx) -} - -func (c *collection) DeleteDocIndex(ctx context.Context, doc *client.Document) error { - ctx, txn, err := ensureContextTxn(ctx, c.db, false) +func (c *collection) updateDocIndex(ctx context.Context, oldDoc, newDoc *client.Document) error { + err := c.deleteIndexedDoc(ctx, oldDoc) if err != nil { return err } - defer txn.Discard(ctx) - err = c.deleteIndexedDoc(ctx, doc) - if err != nil { - return err - } - - return txn.Commit(ctx) + return c.indexNewDoc(ctx, newDoc) } func (c *collection) indexNewDoc(ctx context.Context, doc *client.Document) error { diff --git a/internal/db/config.go b/internal/db/config.go index 1364cab09b..2debf41df9 100644 --- a/internal/db/config.go +++ b/internal/db/config.go @@ -27,9 +27,14 @@ type Option func(*db) // WithUpdateEvents enables the update events channel. func WithUpdateEvents() Option { return func(db *db) { - db.events = events.Events{ - Updates: immutable.Some(events.New[events.Update](0, updateEventBufferSize)), - } + db.events.Updates = immutable.Some(events.New[events.Update](0, updateEventBufferSize)) + } +} + +// WithDAGMergeEvents enables the dag merge events channel. +func WithDAGMergeEvents() Option { + return func(db *db) { + db.events.DAGMerges = immutable.Some(events.New[events.DAGMerge](0, updateEventBufferSize)) } } diff --git a/internal/db/config_test.go b/internal/db/config_test.go index f80e538b4f..a08bd7815d 100644 --- a/internal/db/config_test.go +++ b/internal/db/config_test.go @@ -19,7 +19,13 @@ import ( func TestWithUpdateEvents(t *testing.T) { d := &db{} WithUpdateEvents()(d) - assert.NotNil(t, d.events) + assert.NotNil(t, d.events.Updates) +} + +func TestWithDAGMergeEvents(t *testing.T) { + d := &db{} + WithDAGMergeEvents()(d) + assert.NotNil(t, d.events.DAGMerges) } func TestWithMaxRetries(t *testing.T) { diff --git a/internal/db/db.go b/internal/db/db.go index 979626034c..a04dee5123 100644 --- a/internal/db/db.go +++ b/internal/db/db.go @@ -118,6 +118,14 @@ func newDB( return nil, err } + if db.events.DAGMerges.HasValue() { + merges, err := db.events.DAGMerges.Value().Subscribe() + if err != nil { + return nil, err + } + go db.handleMerges(ctx, merges) + } + return db, nil } @@ -262,6 +270,9 @@ func (db *db) Close() { if db.events.Updates.HasValue() { db.events.Updates.Value().Close() } + if db.events.DAGMerges.HasValue() { + db.events.DAGMerges.Value().Close() + } err := db.rootstore.Close() if err != nil { diff --git a/internal/db/db_test.go b/internal/db/db_test.go index 6f5a03e809..387be0154d 100644 --- a/internal/db/db_test.go +++ b/internal/db/db_test.go @@ -18,6 +18,7 @@ import ( "github.com/sourcenetwork/defradb/acp" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" + "github.com/sourcenetwork/defradb/datastore/memory" ) func newMemoryDB(ctx context.Context) (*db, error) { @@ -29,6 +30,11 @@ func newMemoryDB(ctx context.Context) (*db, error) { return newDB(ctx, rootstore, acp.NoACP, nil) } +func newDefraMemoryDB(ctx context.Context) (*db, error) { + rootstore := memory.NewDatastore(ctx) + return newDB(ctx, rootstore, acp.NoACP, nil) +} + func TestNewDB(t *testing.T) { ctx := context.Background() opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} diff --git a/internal/db/errors.go b/internal/db/errors.go index fcb4baf13f..8d3c770bd8 100644 --- a/internal/db/errors.go +++ b/internal/db/errors.go @@ -93,6 +93,7 @@ const ( errCanNotHavePolicyWithoutACP string = "can not specify policy on collection, without acp" errSecondaryFieldOnSchema string = "secondary relation fields cannot be defined on the schema" errRelationMissingField string = "relation missing field" + errNoTransactionInContext string = "no transaction in context" ) var ( @@ -126,6 +127,7 @@ var ( ErrSecondaryFieldOnSchema = errors.New(errSecondaryFieldOnSchema) ErrRelationMissingField = errors.New(errRelationMissingField) ErrMultipleRelationPrimaries = errors.New("relation can only have a single field set as primary") + ErrNoTransactionInContext = errors.New(errNoTransactionInContext) ) // NewErrFailedToGetHeads returns a new error indicating that the heads of a document diff --git a/internal/db/merge.go b/internal/db/merge.go new file mode 100644 index 0000000000..323f7ae92c --- /dev/null +++ b/internal/db/merge.go @@ -0,0 +1,425 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package db + +import ( + "container/list" + "context" + + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime/linking" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + + "github.com/sourcenetwork/corelog" + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/datastore/badger/v4" + "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/internal/core" + coreblock "github.com/sourcenetwork/defradb/internal/core/block" + "github.com/sourcenetwork/defradb/internal/db/base" + "github.com/sourcenetwork/defradb/internal/merkle/clock" + merklecrdt "github.com/sourcenetwork/defradb/internal/merkle/crdt" +) + +func (db *db) handleMerges(ctx context.Context, merges events.Subscription[events.DAGMerge]) { + for { + select { + case <-ctx.Done(): + return + case merge, ok := <-merges: + if !ok { + return + } + go func() { + err := db.executeMerge(ctx, merge) + if err != nil { + log.ErrorContextE( + ctx, + "Failed to execute merge", + err, + corelog.String("CID", merge.Cid.String()), + corelog.String("Error", err.Error()), + ) + } + }() + } + } +} + +func (db *db) executeMerge(ctx context.Context, dagMerge events.DAGMerge) error { + defer func() { + // Notify the caller that the merge is complete. + if dagMerge.Wg != nil { + dagMerge.Wg.Done() + } + }() + ctx, txn, err := ensureContextTxn(ctx, db, false) + if err != nil { + return err + } + defer txn.Discard(ctx) + + col, err := getCollectionFromRootSchema(ctx, db, dagMerge.SchemaRoot) + if err != nil { + return err + } + + ls := cidlink.DefaultLinkSystem() + ls.SetReadStorage(txn.DAGstore().AsIPLDStorage()) + + docID, err := getDocIDFromBlock(ctx, ls, dagMerge.Cid) + if err != nil { + return err + } + dsKey := base.MakeDataStoreKeyWithCollectionAndDocID(col.Description(), docID.String()) + + mp, err := db.newMergeProcessor(txn, ls, col, dsKey) + if err != nil { + return err + } + + mt, err := getHeadsAsMergeTarget(ctx, txn, dsKey) + if err != nil { + return err + } + + err = mp.loadComposites(ctx, dagMerge.Cid, mt) + if err != nil { + return err + } + + for retry := 0; retry < db.MaxTxnRetries(); retry++ { + err := mp.mergeComposites(ctx) + if err != nil { + return err + } + err = syncIndexedDoc(ctx, docID, col) + if err != nil { + return err + } + err = txn.Commit(ctx) + if err != nil { + if errors.Is(err, badger.ErrTxnConflict) { + txn, err = db.NewTxn(ctx, false) + if err != nil { + return err + } + ctx = SetContextTxn(ctx, txn) + mp.txn = txn + mp.lsys.SetReadStorage(txn.DAGstore().AsIPLDStorage()) + // Reset the CRDTs to avoid reusing the old transaction. + mp.mCRDTs = make(map[string]merklecrdt.MerkleCRDT) + continue + } + return err + } + break + } + + return nil +} + +type mergeProcessor struct { + txn datastore.Txn + lsys linking.LinkSystem + mCRDTs map[string]merklecrdt.MerkleCRDT + col *collection + dsKey core.DataStoreKey + composites *list.List +} + +func (db *db) newMergeProcessor( + txn datastore.Txn, + lsys linking.LinkSystem, + col *collection, + dsKey core.DataStoreKey, +) (*mergeProcessor, error) { + return &mergeProcessor{ + txn: txn, + lsys: lsys, + mCRDTs: make(map[string]merklecrdt.MerkleCRDT), + col: col, + dsKey: dsKey, + composites: list.New(), + }, nil +} + +type mergeTarget struct { + heads map[cid.Cid]*coreblock.Block + headHeight uint64 +} + +func newMergeTarget() mergeTarget { + return mergeTarget{ + heads: make(map[cid.Cid]*coreblock.Block), + } +} + +// loadComposites retrieves and stores into the merge processor the composite blocks for the given +// document until it reaches a block that has already been merged or until we reach the genesis block. +func (mp *mergeProcessor) loadComposites( + ctx context.Context, + blockCid cid.Cid, + mt mergeTarget, +) error { + if _, ok := mt.heads[blockCid]; ok { + // We've already processed this block. + return nil + } + + nd, err := mp.lsys.Load(linking.LinkContext{Ctx: ctx}, cidlink.Link{Cid: blockCid}, coreblock.SchemaPrototype) + if err != nil { + return err + } + + block, err := coreblock.GetFromNode(nd) + if err != nil { + return err + } + + // In the simplest case, the new block or its children will link to the current head/heads (merge target) + // of the composite DAG. However, the new block and its children might have branched off from an older block. + // In this case, we also need to walk back the merge target's DAG until we reach a common block. + if block.Delta.GetPriority() >= mt.headHeight { + mp.composites.PushFront(block) + for _, link := range block.Links { + if link.Name == core.HEAD { + err := mp.loadComposites(ctx, link.Cid, mt) + if err != nil { + return err + } + } + } + } else { + newMT := newMergeTarget() + for _, b := range mt.heads { + for _, link := range b.Links { + if link.Name == core.HEAD { + nd, err := mp.lsys.Load(linking.LinkContext{Ctx: ctx}, link.Link, coreblock.SchemaPrototype) + if err != nil { + return err + } + + childBlock, err := coreblock.GetFromNode(nd) + if err != nil { + return err + } + + newMT.heads[link.Cid] = childBlock + newMT.headHeight = childBlock.Delta.GetPriority() + } + } + } + return mp.loadComposites(ctx, blockCid, newMT) + } + return nil +} + +func (mp *mergeProcessor) mergeComposites(ctx context.Context) error { + for e := mp.composites.Front(); e != nil; e = e.Next() { + block := e.Value.(*coreblock.Block) + link, err := block.GenerateLink() + if err != nil { + return err + } + err = mp.processBlock(ctx, block, link) + if err != nil { + return err + } + } + return nil +} + +// processBlock merges the block and its children to the datastore and sets the head accordingly. +func (mp *mergeProcessor) processBlock( + ctx context.Context, + block *coreblock.Block, + blockLink cidlink.Link, +) error { + crdt, err := mp.initCRDTForType(block.Delta.GetFieldName()) + if err != nil { + return err + } + + // If the CRDT is nil, it means the field is not part + // of the schema and we can safely ignore it. + if crdt == nil { + return nil + } + + err = crdt.Clock().ProcessBlock(ctx, block, blockLink) + if err != nil { + return err + } + + for _, link := range block.Links { + if link.Name == core.HEAD { + continue + } + + nd, err := mp.lsys.Load(linking.LinkContext{Ctx: ctx}, link.Link, coreblock.SchemaPrototype) + if err != nil { + return err + } + + childBlock, err := coreblock.GetFromNode(nd) + if err != nil { + return err + } + + if err := mp.processBlock(ctx, childBlock, link.Link); err != nil { + return err + } + } + + return nil +} + +func (mp *mergeProcessor) initCRDTForType( + field string, +) (merklecrdt.MerkleCRDT, error) { + mcrdt, exists := mp.mCRDTs[field] + if exists { + return mcrdt, nil + } + + schemaVersionKey := core.CollectionSchemaVersionKey{ + SchemaVersionID: mp.col.Schema().VersionID, + CollectionID: mp.col.ID(), + } + + if field == "" { + mcrdt = merklecrdt.NewMerkleCompositeDAG( + mp.txn, + schemaVersionKey, + mp.dsKey.WithFieldId(core.COMPOSITE_NAMESPACE), + "", + ) + mp.mCRDTs[field] = mcrdt + return mcrdt, nil + } + + fd, ok := mp.col.Definition().GetFieldByName(field) + if !ok { + // If the field is not part of the schema, we can safely ignore it. + return nil, nil + } + + mcrdt, err := merklecrdt.InstanceWithStore( + mp.txn, + schemaVersionKey, + fd.Typ, + fd.Kind, + mp.dsKey.WithFieldId(fd.ID.String()), + field, + ) + if err != nil { + return nil, err + } + + mp.mCRDTs[field] = mcrdt + return mcrdt, nil +} + +func getDocIDFromBlock(ctx context.Context, ls linking.LinkSystem, cid cid.Cid) (client.DocID, error) { + nd, err := ls.Load(linking.LinkContext{Ctx: ctx}, cidlink.Link{Cid: cid}, coreblock.SchemaPrototype) + if err != nil { + return client.DocID{}, err + } + block, err := coreblock.GetFromNode(nd) + if err != nil { + return client.DocID{}, err + } + return client.NewDocIDFromString(string(block.Delta.GetDocID())) +} + +func getCollectionFromRootSchema(ctx context.Context, db *db, rootSchema string) (*collection, error) { + cols, err := db.getCollections( + ctx, + client.CollectionFetchOptions{ + SchemaRoot: immutable.Some(rootSchema), + }, + ) + if err != nil { + return nil, err + } + if len(cols) == 0 { + return nil, client.NewErrCollectionNotFoundForSchema(rootSchema) + } + // We currently only support one active collection per root schema + // so it is safe to return the first one. + return cols[0].(*collection), nil +} + +// getHeadsAsMergeTarget retrieves the heads of the composite DAG for the given document +// and returns them as a merge target. +func getHeadsAsMergeTarget(ctx context.Context, txn datastore.Txn, dsKey core.DataStoreKey) (mergeTarget, error) { + headset := clock.NewHeadSet( + txn.Headstore(), + dsKey.WithFieldId(core.COMPOSITE_NAMESPACE).ToHeadStoreKey(), + ) + + cids, _, err := headset.List(ctx) + if err != nil { + return mergeTarget{}, err + } + + mt := newMergeTarget() + for _, cid := range cids { + b, err := txn.DAGstore().Get(ctx, cid) + if err != nil { + return mergeTarget{}, err + } + + block, err := coreblock.GetFromBytes(b.RawData()) + if err != nil { + return mergeTarget{}, err + } + + mt.heads[cid] = block + // All heads have the same height so overwriting is ok. + mt.headHeight = block.Delta.GetPriority() + } + return mt, nil +} + +func syncIndexedDoc( + ctx context.Context, + docID client.DocID, + col *collection, +) error { + // remove transaction from old context + oldCtx := SetContextTxn(ctx, nil) + + oldDoc, err := col.Get(oldCtx, docID, false) + isNewDoc := errors.Is(err, client.ErrDocumentNotFoundOrNotAuthorized) + if !isNewDoc && err != nil { + return err + } + + doc, err := col.Get(ctx, docID, false) + isDeletedDoc := errors.Is(err, client.ErrDocumentNotFoundOrNotAuthorized) + if !isDeletedDoc && err != nil { + return err + } + + if isDeletedDoc { + return col.deleteIndexedDoc(ctx, oldDoc) + } else if isNewDoc { + return col.indexNewDoc(ctx, doc) + } else { + return col.updateDocIndex(ctx, oldDoc, doc) + } +} diff --git a/internal/db/merge_test.go b/internal/db/merge_test.go new file mode 100644 index 0000000000..b8671a5171 --- /dev/null +++ b/internal/db/merge_test.go @@ -0,0 +1,294 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package db + +import ( + "context" + "testing" + + "github.com/fxamacker/cbor/v2" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/linking" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/internal/core" + coreblock "github.com/sourcenetwork/defradb/internal/core/block" + "github.com/sourcenetwork/defradb/internal/core/crdt" +) + +const userSchema = ` +type User { + name: String + age: Int +} +` + +func TestMerge_SingleBranch_NoError(t *testing.T) { + ctx := context.Background() + + db, err := newDefraMemoryDB(ctx) + require.NoError(t, err) + + _, err = db.AddSchema(ctx, userSchema) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + lsys := cidlink.DefaultLinkSystem() + lsys.SetWriteStorage(db.multistore.DAGstore().AsIPLDStorage()) + + initialDocState := map[string]any{ + "name": "John", + } + d, docID := newDagBuilder(col, initialDocState) + compInfo, err := d.generateCompositeUpdate(&lsys, initialDocState, compositeInfo{}) + require.NoError(t, err) + compInfo2, err := d.generateCompositeUpdate(&lsys, map[string]any{"name": "Johny"}, compInfo) + require.NoError(t, err) + + err = db.executeMerge(ctx, events.DAGMerge{ + Cid: compInfo2.link.Cid, + SchemaRoot: col.SchemaRoot(), + }) + require.NoError(t, err) + + // Verify the document was created with the expected values + doc, err := col.Get(ctx, docID, false) + require.NoError(t, err) + docMap, err := doc.ToMap() + require.NoError(t, err) + + expectedDocMap := map[string]any{ + "_docID": docID.String(), + "name": "Johny", + } + + require.Equal(t, expectedDocMap, docMap) +} + +func TestMerge_DualBranch_NoError(t *testing.T) { + ctx := context.Background() + + db, err := newDefraMemoryDB(ctx) + require.NoError(t, err) + + _, err = db.AddSchema(ctx, userSchema) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + lsys := cidlink.DefaultLinkSystem() + lsys.SetWriteStorage(db.multistore.DAGstore().AsIPLDStorage()) + + initialDocState := map[string]any{ + "name": "John", + } + d, docID := newDagBuilder(col, initialDocState) + compInfo, err := d.generateCompositeUpdate(&lsys, initialDocState, compositeInfo{}) + require.NoError(t, err) + compInfo2, err := d.generateCompositeUpdate(&lsys, map[string]any{"name": "Johny"}, compInfo) + require.NoError(t, err) + + err = db.executeMerge(ctx, events.DAGMerge{ + Cid: compInfo2.link.Cid, + SchemaRoot: col.SchemaRoot(), + }) + require.NoError(t, err) + + compInfo3, err := d.generateCompositeUpdate(&lsys, map[string]any{"age": 30}, compInfo) + require.NoError(t, err) + + err = db.executeMerge(ctx, events.DAGMerge{ + Cid: compInfo3.link.Cid, + SchemaRoot: col.SchemaRoot(), + }) + require.NoError(t, err) + + // Verify the document was created with the expected values + doc, err := col.Get(ctx, docID, false) + require.NoError(t, err) + docMap, err := doc.ToMap() + require.NoError(t, err) + + expectedDocMap := map[string]any{ + "_docID": docID.String(), + "age": int64(30), + "name": "Johny", + } + + require.Equal(t, expectedDocMap, docMap) +} + +// This test is not something we can reproduce in with integration tests. +// Until we introduce partial dag syncs to integration tests, this should not be removed. +func TestMerge_DualBranchWithOneIncomplete_CouldNotFindCID(t *testing.T) { + ctx := context.Background() + + db, err := newDefraMemoryDB(ctx) + require.NoError(t, err) + + _, err = db.AddSchema(ctx, userSchema) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + lsys := cidlink.DefaultLinkSystem() + lsys.SetWriteStorage(db.multistore.DAGstore().AsIPLDStorage()) + + initialDocState := map[string]any{ + "name": "John", + } + d, docID := newDagBuilder(col, initialDocState) + compInfo, err := d.generateCompositeUpdate(&lsys, initialDocState, compositeInfo{}) + require.NoError(t, err) + compInfo2, err := d.generateCompositeUpdate(&lsys, map[string]any{"name": "Johny"}, compInfo) + require.NoError(t, err) + + err = db.executeMerge(ctx, events.DAGMerge{ + Cid: compInfo2.link.Cid, + SchemaRoot: col.SchemaRoot(), + }) + require.NoError(t, err) + + someUnknownBlock := coreblock.Block{Delta: crdt.CRDT{CompositeDAGDelta: &crdt.CompositeDAGDelta{Status: 1}}} + someUnknownLink, err := coreblock.GetLinkFromNode(someUnknownBlock.GenerateNode()) + require.NoError(t, err) + + compInfoUnkown := compositeInfo{ + link: someUnknownLink, + height: 2, + } + + compInfo3, err := d.generateCompositeUpdate(&lsys, map[string]any{"name": "Johny"}, compInfoUnkown) + require.NoError(t, err) + + err = db.executeMerge(ctx, events.DAGMerge{ + Cid: compInfo3.link.Cid, + SchemaRoot: col.SchemaRoot(), + }) + require.ErrorContains(t, err, "could not find bafyreichk7jctbxhrodk5au3r4c4iqm627d4fi2cii2beseu4h6caoiwla") + + // Verify the document was created with the expected values + doc, err := col.Get(ctx, docID, false) + require.NoError(t, err) + docMap, err := doc.ToMap() + require.NoError(t, err) + + expectedDocMap := map[string]any{ + "_docID": docID.String(), + "name": "Johny", + } + + require.Equal(t, expectedDocMap, docMap) +} + +type dagBuilder struct { + fieldsHeight map[string]uint64 + docID []byte + col client.Collection +} + +func newDagBuilder(col client.Collection, initalDocState map[string]any) (*dagBuilder, client.DocID) { + doc, err := client.NewDocFromMap( + initalDocState, + col.Definition(), + ) + if err != nil { + panic(err) + } + return &dagBuilder{ + fieldsHeight: make(map[string]uint64), + docID: []byte(doc.ID().String()), + col: col, + }, doc.ID() +} + +type compositeInfo struct { + link cidlink.Link + height uint64 +} + +func (d *dagBuilder) generateCompositeUpdate(lsys *linking.LinkSystem, fields map[string]any, from compositeInfo) (compositeInfo, error) { + links := []coreblock.DAGLink{} + newPriority := from.height + 1 + if from.link.ByteLen() != 0 { + links = append(links, coreblock.DAGLink{ + Name: core.HEAD, + Link: from.link, + }) + } + for field, val := range fields { + d.fieldsHeight[field]++ + // Generate new Block and save to lsys + fieldBlock := coreblock.Block{ + Delta: crdt.CRDT{ + LWWRegDelta: &crdt.LWWRegDelta{ + DocID: d.docID, + FieldName: field, + Priority: d.fieldsHeight[field], + SchemaVersionID: d.col.Schema().VersionID, + Data: encodeValue(val), + }, + }, + } + fieldBlockLink, err := lsys.Store(ipld.LinkContext{}, coreblock.GetLinkPrototype(), fieldBlock.GenerateNode()) + if err != nil { + return compositeInfo{}, err + } + links = append(links, coreblock.DAGLink{ + Name: field, + Link: fieldBlockLink.(cidlink.Link), + }) + } + + compositeBlock := coreblock.Block{ + Delta: crdt.CRDT{ + CompositeDAGDelta: &crdt.CompositeDAGDelta{ + DocID: d.docID, + FieldName: "", + Priority: newPriority, + SchemaVersionID: d.col.Schema().VersionID, + Status: 1, + }, + }, + Links: links, + } + + compositeBlockLink, err := lsys.Store(ipld.LinkContext{}, coreblock.GetLinkPrototype(), compositeBlock.GenerateNode()) + if err != nil { + return compositeInfo{}, err + } + + return compositeInfo{ + link: compositeBlockLink.(cidlink.Link), + height: newPriority, + }, nil +} + +func encodeValue(val any) []byte { + em, err := cbor.EncOptions{Time: cbor.TimeRFC3339}.EncMode() + if err != nil { + // safe to panic here as this is a test + panic(err) + } + b, err := em.Marshal(val) + if err != nil { + // safe to panic here as this is a test + panic(err) + } + return b +} diff --git a/internal/merkle/clock/clock.go b/internal/merkle/clock/clock.go index 087ba76804..06cccb6467 100644 --- a/internal/merkle/clock/clock.go +++ b/internal/merkle/clock/clock.go @@ -125,7 +125,7 @@ func (mc *MerkleClock) ProcessBlock( // check if we have any HEAD links hasHeads := false for _, l := range block.Links { - if l.Name == "_head" { + if l.Name == core.HEAD { hasHeads = true break } diff --git a/net/client_test.go b/net/client_test.go index e074947213..6e85a516be 100644 --- a/net/client_test.go +++ b/net/client_test.go @@ -129,14 +129,17 @@ func TestPushlogW_WithValidPeerID_NoError(t *testing.T) { err = col.Save(ctx, doc) require.NoError(t, err) - cid, err := createCID(doc) + headCID, err := getHead(ctx, n1.db, doc.ID()) + require.NoError(t, err) + + b, err := n1.db.Blockstore().AsIPLDStorage().Get(ctx, headCID.KeyString()) require.NoError(t, err) err = n1.server.pushLog(ctx, events.Update{ DocID: doc.ID().String(), - Cid: cid, + Cid: headCID, SchemaRoot: col.SchemaRoot(), - Block: emptyBlock(), + Block: b, }, n2.PeerInfo().ID) require.NoError(t, err) } diff --git a/net/dag.go b/net/dag.go deleted file mode 100644 index 7718db6c27..0000000000 --- a/net/dag.go +++ /dev/null @@ -1,161 +0,0 @@ -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package net - -import ( - "sync" - "time" - - "github.com/ipfs/go-cid" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - - "github.com/sourcenetwork/corelog" - - coreblock "github.com/sourcenetwork/defradb/internal/core/block" -) - -var ( - DAGSyncTimeout = time.Second * 60 -) - -type dagJob struct { - session *sync.WaitGroup // A waitgroup to wait for all related jobs to conclude - bp *blockProcessor // the block processor to use - cid cid.Cid // the cid of the block to fetch from the P2P network - - // OLD FIELDS - // root cid.Cid // the root of the branch we are walking down - // rootPrio uint64 // the priority of the root delta - // delta core.Delta // the current delta -} - -// the only purpose of this worker is to be able to orderly shut-down job -// workers without races by becoming the only sender for the store.jobQueue -// channel. -func (p *Peer) sendJobWorker() { - // The DAG sync process for a document is handled over a single transaction, it is possible that a single - // document ends up using all workers. Since the transaction uses a mutex to guarantee thread safety, some - // operations in those workers may temporarily blocked which would leave a concurrent document sync process - // hanging waiting for some workers to free up. To eliviate this problem, we add new workers dedicated to a - // document and discard them once the process is completed. - docWorkerQueue := make(map[string]chan *dagJob) - for { - select { - case <-p.ctx.Done(): - for _, job := range docWorkerQueue { - close(job) - } - return - - case newJob := <-p.sendJobs: - jobs, ok := docWorkerQueue[newJob.bp.dsKey.DocID] - if !ok { - jobs = make(chan *dagJob, numWorkers) - for i := 0; i < numWorkers; i++ { - go p.dagWorker(jobs) - } - docWorkerQueue[newJob.bp.dsKey.DocID] = jobs - } - jobs <- newJob - - case docID := <-p.closeJob: - if jobs, ok := docWorkerQueue[docID]; ok { - close(jobs) - delete(docWorkerQueue, docID) - } - } - } -} - -// dagWorker should run in its own goroutine. Workers are launched during -// initialization in New(). -func (p *Peer) dagWorker(jobs chan *dagJob) { - for job := range jobs { - select { - case <-p.ctx.Done(): - // drain jobs from queue when we are done - job.session.Done() - continue - default: - } - - go func(j *dagJob) { - if j.bp.dagSyncer != nil && j.cid.Defined() { - // BlockOfType will return the block if it is already in the store or fetch it from the network - // if it is not. This is a blocking call and will wait for the block to be fetched. - // It uses the LinkSystem to fetch the block. Blocks retrieved from the network will - // also be stored in the blockstore in the same call. - // Blocks have to match the coreblock.SchemaPrototype to be returned. - nd, err := j.bp.dagSyncer.BlockOfType(p.ctx, cidlink.Link{Cid: j.cid}, coreblock.SchemaPrototype) - if err != nil { - log.ErrorContextE( - p.ctx, - "Failed to get node", - err, - corelog.Any("CID", j.cid)) - j.session.Done() - return - } - block, err := coreblock.GetFromNode(nd) - if err != nil { - log.ErrorContextE( - p.ctx, - "Failed to convert ipld node to block", - err, - corelog.Any("CID", j.cid)) - } - j.bp.handleChildBlocks( - p.ctx, - j.session, - block, - ) - } - p.queuedChildren.Remove(j.cid) - j.session.Done() - }(job) - } -} - -type cidSafeSet struct { - set map[cid.Cid]struct{} - mux sync.Mutex -} - -func newCidSafeSet() *cidSafeSet { - return &cidSafeSet{ - set: make(map[cid.Cid]struct{}), - } -} - -// Visit checks if we can visit this node, or -// if its already being visited -func (s *cidSafeSet) Visit(c cid.Cid) bool { - var b bool - s.mux.Lock() - { - if _, ok := s.set[c]; !ok { - s.set[c] = struct{}{} - b = true - } - } - s.mux.Unlock() - return b -} - -func (s *cidSafeSet) Remove(c cid.Cid) { - s.mux.Lock() - { - delete(s.set, c) - } - s.mux.Unlock() -} diff --git a/net/dag_test.go b/net/dag_test.go deleted file mode 100644 index 976f43653a..0000000000 --- a/net/dag_test.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package net - -import ( - "context" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/internal/core" - coreblock "github.com/sourcenetwork/defradb/internal/core/block" - "github.com/sourcenetwork/defradb/internal/merkle/clock" - netutils "github.com/sourcenetwork/defradb/net/utils" -) - -const timeout = 5 * time.Second - -func TestSendJobWorker_ExitOnContextClose_NoError(t *testing.T) { - ctx := context.Background() - _, n := newTestNode(ctx, t) - done := make(chan struct{}) - go func() { - n.sendJobWorker() - close(done) - }() - n.Close() - select { - case <-done: - case <-time.After(timeout): - t.Error("failed to close sendJobWorker") - } -} - -func TestSendJobWorker_WithNewJob_NoError(t *testing.T) { - ctx := context.Background() - db, n := newTestNode(ctx, t) - done := make(chan struct{}) - go func() { - n.sendJobWorker() - close(done) - }() - _, err := db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - col, err := db.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`), col.Definition()) - require.NoError(t, err) - dsKey := core.DataStoreKeyFromDocID(doc.ID()) - - txn, err := db.NewTxn(ctx, false) - require.NoError(t, err) - - wg := sync.WaitGroup{} - wg.Add(1) - - n.sendJobs <- &dagJob{ - session: &wg, - bp: &blockProcessor{ - dsKey: dsKey, - txn: txn, - }, - } - // Give the jobworker time to process the job. - time.Sleep(100 * time.Microsecond) - n.Close() - select { - case <-done: - case <-time.After(timeout): - t.Error("failed to close sendJobWorker") - } -} - -func TestSendJobWorker_WithCloseJob_NoError(t *testing.T) { - ctx := context.Background() - db, n := newTestNode(ctx, t) - done := make(chan struct{}) - go func() { - n.sendJobWorker() - close(done) - }() - _, err := db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - col, err := db.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`), col.Definition()) - require.NoError(t, err) - dsKey := core.DataStoreKeyFromDocID(doc.ID()) - - txn, err := db.NewTxn(ctx, false) - require.NoError(t, err) - - wg := sync.WaitGroup{} - wg.Add(1) - - n.sendJobs <- &dagJob{ - session: &wg, - bp: &blockProcessor{ - dsKey: dsKey, - txn: txn, - }, - } - - n.closeJob <- dsKey.DocID - - n.Close() - select { - case <-done: - case <-time.After(timeout): - t.Error("failed to close sendJobWorker") - } -} - -func TestSendJobWorker_WithPeer_NoError(t *testing.T) { - ctx := context.Background() - db1, n1 := newTestNode(ctx, t) - db2, n2 := newTestNode(ctx, t) - - addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) - require.NoError(t, err) - n2.Bootstrap(addrs) - - err = n1.WaitForPeerConnectionEvent(n2.PeerID()) - require.NoError(t, err) - err = n2.WaitForPeerConnectionEvent(n1.PeerID()) - require.NoError(t, err) - done := make(chan struct{}) - go func() { - n2.sendJobWorker() - close(done) - }() - - _, err = db1.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - _, err = db2.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - col, err := db1.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`), col.Definition()) - require.NoError(t, err) - dsKey := core.DataStoreKeyFromDocID(doc.ID()) - - err = col.Create(ctx, doc) - require.NoError(t, err) - - txn1, _ := db1.NewTxn(ctx, false) - heads, _, err := clock.NewHeadSet(txn1.Headstore(), dsKey.ToHeadStoreKey().WithFieldId(core.COMPOSITE_NAMESPACE)).List(ctx) - require.NoError(t, err) - txn1.Discard(ctx) - - txn2, err := db2.NewTxn(ctx, false) - require.NoError(t, err) - - wg := sync.WaitGroup{} - wg.Add(1) - - fetcher := n2.Peer.newDAGSyncerTxn(txn2) - - n2.sendJobs <- &dagJob{ - bp: newBlockProcessor(n2.Peer, txn2, col, dsKey, fetcher), - session: &wg, - cid: heads[0], - } - wg.Wait() - - err = txn2.Commit(ctx) - require.NoError(t, err) - - b, err := n1.db.Blockstore().Get(ctx, heads[0]) - require.NoError(t, err) - block, err := coreblock.GetFromBytes(b.RawData()) - require.NoError(t, err) - - for _, link := range block.Links { - exists, err := n2.db.Blockstore().Has(ctx, link.Cid) - require.NoError(t, err) - require.True(t, exists) - } - - n1.Close() - n2.Close() - select { - case <-done: - case <-time.After(timeout): - t.Error("failed to close sendJobWorker") - } -} diff --git a/net/errors.go b/net/errors.go index 773eb8765d..eb53a8e2a5 100644 --- a/net/errors.go +++ b/net/errors.go @@ -19,13 +19,14 @@ import ( ) const ( - errPushLog = "failed to push log" - errFailedToGetDocID = "failed to get DocID from broadcast message" - errPublishingToDocIDTopic = "can't publish log %s for docID %s" - errPublishingToSchemaTopic = "can't publish log %s for schema %s" - errReplicatorExists = "replicator already exists for %s with peerID %s" - errReplicatorDocID = "failed to get docID for replicator %s with peerID %s" - errReplicatorCollections = "failed to get collections for replicator" + errPushLog = "failed to push log" + errFailedToGetDocID = "failed to get DocID from broadcast message" + errPublishingToDocIDTopic = "can't publish log %s for docID %s" + errPublishingToSchemaTopic = "can't publish log %s for schema %s" + errReplicatorExists = "replicator already exists for %s with peerID %s" + errReplicatorDocID = "failed to get docID for replicator %s with peerID %s" + errReplicatorCollections = "failed to get collections for replicator" + errCheckingForExistingBlock = "failed to check for existing block" ) var ( @@ -38,6 +39,7 @@ var ( ErrNilDB = errors.New("database object can't be nil") ErrNilUpdateChannel = errors.New("tried to subscribe to update channel, but update channel is nil") ErrSelfTargetForReplicator = errors.New("can't target ourselves as a replicator") + ErrCheckingForExistingBlock = errors.New(errCheckingForExistingBlock) ) func NewErrPushLog(inner error, kv ...errors.KV) error { @@ -67,3 +69,7 @@ func NewErrReplicatorDocID(inner error, collection string, peerID peer.ID, kv .. func NewErrReplicatorCollections(inner error, kv ...errors.KV) error { return errors.Wrap(errReplicatorCollections, inner, kv...) } + +func NewErrCheckingForExistingBlock(inner error, cid string) error { + return errors.Wrap(errCheckingForExistingBlock, inner, errors.NewKV("cid", cid)) +} diff --git a/net/node.go b/net/node.go index 7683d3fb8f..1fa8da6316 100644 --- a/net/node.go +++ b/net/node.go @@ -36,8 +36,8 @@ import ( "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/routing" - "github.com/multiformats/go-multiaddr" + "github.com/sourcenetwork/corelog" "github.com/sourcenetwork/go-libp2p-pubsub-rpc/finalizer" @@ -70,8 +70,9 @@ type Node struct { // receives an event when a pushLog request has been processed. pushLogEvent chan EvtReceivedPushLog - ctx context.Context - cancel context.CancelFunc + ctx context.Context + cancel context.CancelFunc + dhtClose func() error } // NewNode creates a new network node instance of DefraDB, wired into libp2p. @@ -79,7 +80,7 @@ func NewNode( ctx context.Context, db client.DB, opts ...NodeOpt, -) (*Node, error) { +) (node *Node, err error) { options := DefaultOptions() for _, opt := range opts { opt(options) @@ -101,6 +102,13 @@ func NewNode( fin := finalizer.NewFinalizer() + ctx, cancel := context.WithCancel(ctx) + defer func() { + if node == nil { + cancel() + } + }() + peerstore, err := pstoreds.NewPeerstore(ctx, db.Peerstore(), pstoreds.DefaultOpts()) if err != nil { return nil, fin.Cleanup(err) @@ -170,9 +178,6 @@ func NewNode( return nil, fin.Cleanup(err) } } - - ctx, cancel := context.WithCancel(ctx) - peer, err := NewPeer( ctx, db, @@ -183,7 +188,6 @@ func NewNode( options.GRPCDialOptions, ) if err != nil { - cancel() return nil, fin.Cleanup(err) } @@ -201,6 +205,7 @@ func NewNode( DB: db, ctx: ctx, cancel: cancel, + dhtClose: ddht.Close, } n.subscribeToPeerConnectionEvents() @@ -268,12 +273,28 @@ func (n *Node) subscribeToPeerConnectionEvents() { return } go func() { - for e := range sub.Out() { + for { select { - case n.peerEvent <- e.(event.EvtPeerConnectednessChanged): - default: - <-n.peerEvent - n.peerEvent <- e.(event.EvtPeerConnectednessChanged) + case <-n.ctx.Done(): + err := sub.Close() + if err != nil { + log.ErrorContextE( + n.ctx, + "Failed to close peer connectedness changed event subscription", + err, + ) + } + return + case e, ok := <-sub.Out(): + if !ok { + return + } + select { + case n.peerEvent <- e.(event.EvtPeerConnectednessChanged): + default: + <-n.peerEvent + n.peerEvent <- e.(event.EvtPeerConnectednessChanged) + } } } }() @@ -290,12 +311,28 @@ func (n *Node) subscribeToPubSubEvents() { return } go func() { - for e := range sub.Out() { + for { select { - case n.pubSubEvent <- e.(EvtPubSub): - default: - <-n.pubSubEvent - n.pubSubEvent <- e.(EvtPubSub) + case <-n.ctx.Done(): + err := sub.Close() + if err != nil { + log.ErrorContextE( + n.ctx, + "Failed to close pubsub event subscription", + err, + ) + } + return + case e, ok := <-sub.Out(): + if !ok { + return + } + select { + case n.pubSubEvent <- e.(EvtPubSub): + default: + <-n.pubSubEvent + n.pubSubEvent <- e.(EvtPubSub) + } } } }() @@ -312,12 +349,28 @@ func (n *Node) subscribeToPushLogEvents() { return } go func() { - for e := range sub.Out() { + for { select { - case n.pushLogEvent <- e.(EvtReceivedPushLog): - default: - <-n.pushLogEvent - n.pushLogEvent <- e.(EvtReceivedPushLog) + case <-n.ctx.Done(): + err := sub.Close() + if err != nil { + log.ErrorContextE( + n.ctx, + "Failed to close push log event subscription", + err, + ) + } + return + case e, ok := <-sub.Out(): + if !ok { + return + } + select { + case n.pushLogEvent <- e.(EvtReceivedPushLog): + default: + <-n.pushLogEvent + n.pushLogEvent <- e.(EvtReceivedPushLog) + } } } }() @@ -428,5 +481,11 @@ func (n Node) Close() { if n.Peer != nil { n.Peer.Close() } + if n.dhtClose != nil { + err := n.dhtClose() + if err != nil { + log.ErrorContextE(n.ctx, "Failed to close DHT", err) + } + } n.DB.Close() } diff --git a/net/peer.go b/net/peer.go index 3d728a1d87..fc49aec7ec 100644 --- a/net/peer.go +++ b/net/peer.go @@ -21,8 +21,6 @@ import ( "github.com/ipfs/boxo/bitswap/network" "github.com/ipfs/boxo/blockservice" exchange "github.com/ipfs/boxo/exchange" - dagsyncer "github.com/ipfs/boxo/fetcher" - dagsyncerbs "github.com/ipfs/boxo/fetcher/impl/blockservice" "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" gostream "github.com/libp2p/go-libp2p-gostream" @@ -44,10 +42,6 @@ import ( pb "github.com/sourcenetwork/defradb/net/pb" ) -var ( - numWorkers = 5 -) - // Peer is a DefraDB Peer node which exposes all the LibP2P host/peer functionality // to the underlying DefraDB instance. type Peer struct { @@ -63,20 +57,11 @@ type Peer struct { server *server p2pRPC *grpc.Server // rpc server over the P2P network - // Used to close the dagWorker pool for a given document. - // The string represents a docID. - closeJob chan string - sendJobs chan *dagJob - - // outstanding log request currently being processed - queuedChildren *cidSafeSet - // replicators is a map from collectionName => peerId replicators map[string]map[peer.ID]struct{} mu sync.Mutex // peer DAG service - dagsyncerbs.FetcherConfig exch exchange.Interface bserv blockservice.BlockService @@ -100,20 +85,17 @@ func NewPeer( ctx, cancel := context.WithCancel(ctx) p := &Peer{ - host: h, - dht: dht, - ps: ps, - db: db, - p2pRPC: grpc.NewServer(serverOptions...), - ctx: ctx, - cancel: cancel, - closeJob: make(chan string), - sendJobs: make(chan *dagJob), - replicators: make(map[string]map[peer.ID]struct{}), - queuedChildren: newCidSafeSet(), + host: h, + dht: dht, + ps: ps, + db: db, + p2pRPC: grpc.NewServer(serverOptions...), + ctx: ctx, + cancel: cancel, + replicators: make(map[string]map[peer.ID]struct{}), } var err error - p.server, err = newServer(p, db, dialOptions...) + p.server, err = newServer(p, dialOptions...) if err != nil { return nil, err } @@ -124,7 +106,6 @@ func NewPeer( } p.setupBlockService() - p.setupDAGService() return p, nil } @@ -190,9 +171,6 @@ func (p *Peer) Start() error { } }() - // start sendJobWorker - go p.sendJobWorker() - return nil } @@ -496,17 +474,6 @@ func (p *Peer) setupBlockService() { p.exch = bswap } -func (p *Peer) setupDAGService() { - p.FetcherConfig = dagsyncerbs.NewFetcherConfig(p.bserv) -} - -func (p *Peer) newDAGSyncerTxn(txn datastore.Txn) dagsyncer.Fetcher { - return p.FetcherWithSession( - p.ctx, - blockservice.NewSession(p.ctx, blockservice.New(txn.DAGstore(), p.exch)), - ) -} - func stopGRPCServer(ctx context.Context, server *grpc.Server) { stopped := make(chan struct{}) go func() { diff --git a/net/process.go b/net/process.go index 882c29c360..b4f85134fb 100644 --- a/net/process.go +++ b/net/process.go @@ -13,229 +13,112 @@ package net import ( - "container/list" "context" - "fmt" "sync" + "time" - dagsyncer "github.com/ipfs/boxo/fetcher" + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime/linking" cidlink "github.com/ipld/go-ipld-prime/linking/cid" "github.com/sourcenetwork/corelog" - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/internal/core" coreblock "github.com/sourcenetwork/defradb/internal/core/block" - "github.com/sourcenetwork/defradb/internal/db/base" - merklecrdt "github.com/sourcenetwork/defradb/internal/merkle/crdt" +) + +var ( + dagSyncTimeout = time.Second * 60 ) type blockProcessor struct { *Peer - txn datastore.Txn - col client.Collection - dsKey core.DataStoreKey - dagSyncer dagsyncer.Fetcher - // List of composite blocks to eventually merge - composites *list.List + wg *sync.WaitGroup + bsSession *blockservice.Session + queuedChildren *sync.Map } func newBlockProcessor( + ctx context.Context, p *Peer, - txn datastore.Txn, - col client.Collection, - dsKey core.DataStoreKey, - dagSyncer dagsyncer.Fetcher, ) *blockProcessor { return &blockProcessor{ - Peer: p, - composites: list.New(), - txn: txn, - col: col, - dsKey: dsKey, - dagSyncer: dagSyncer, - } -} - -// mergeBlock runs trough the list of composite blocks and sends them for processing. -func (bp *blockProcessor) mergeBlocks(ctx context.Context) { - for e := bp.composites.Front(); e != nil; e = e.Next() { - block := e.Value.(*coreblock.Block) - link, _ := block.GenerateLink() - err := bp.processBlock(ctx, block, link, "") - if err != nil { - log.ErrorContextE( - ctx, - "Failed to process block", - err, - corelog.String("DocID", bp.dsKey.DocID), - corelog.Any("CID", link.Cid), - ) - } - } -} - -// processBlock merges the block and its children to the datastore and sets the head accordingly. -func (bp *blockProcessor) processBlock( - ctx context.Context, - block *coreblock.Block, - blockLink cidlink.Link, - field string, -) error { - crdt, err := initCRDTForType(bp.txn, bp.col, bp.dsKey, field) - if err != nil { - return err - } - - err = crdt.Clock().ProcessBlock(ctx, block, blockLink) - if err != nil { - return err - } - - for _, link := range block.Links { - if link.Name == core.HEAD { - continue - } - - b, err := bp.txn.DAGstore().Get(ctx, link.Cid) - if err != nil { - return err - } - - childBlock, err := coreblock.GetFromBytes(b.RawData()) - if err != nil { - return err - } - - if err := bp.processBlock(ctx, childBlock, link.Link, link.Name); err != nil { - log.ErrorContextE( - ctx, - "Failed to process block", - err, - corelog.String("DocID", bp.dsKey.DocID), - corelog.Any("CID", link.Cid), - ) - } - } - - return nil -} - -func initCRDTForType( - txn datastore.Txn, - col client.Collection, - dsKey core.DataStoreKey, - field string, -) (merklecrdt.MerkleCRDT, error) { - var key core.DataStoreKey - var ctype client.CType - description := col.Description() - if field == "" { // empty field name implies composite type - key = base.MakeDataStoreKeyWithCollectionDescription( - description, - ).WithInstanceInfo( - dsKey, - ).WithFieldId( - core.COMPOSITE_NAMESPACE, - ) - - return merklecrdt.NewMerkleCompositeDAG( - txn, - core.NewCollectionSchemaVersionKey(col.Schema().VersionID, col.ID()), - key, - field, - ), nil - } - - fd, ok := col.Definition().GetFieldByName(field) - if !ok { - return nil, errors.New(fmt.Sprintf("Couldn't find field %s for doc %s", field, dsKey.ToString())) + Peer: p, + wg: &sync.WaitGroup{}, + bsSession: blockservice.NewSession(ctx, p.bserv), + queuedChildren: &sync.Map{}, } - ctype = fd.Typ - fieldID := fd.ID.String() - key = base.MakeDataStoreKeyWithCollectionDescription(description).WithInstanceInfo(dsKey).WithFieldId(fieldID) - - return merklecrdt.InstanceWithStore( - txn, - core.NewCollectionSchemaVersionKey(col.Schema().VersionID, col.ID()), - ctype, - fd.Kind, - key, - field, - ) } // processRemoteBlock stores the block in the DAG store and initiates a sync of the block's children. func (bp *blockProcessor) processRemoteBlock( ctx context.Context, - session *sync.WaitGroup, block *coreblock.Block, ) error { - link, err := block.GenerateLink() + // Store the block in the DAG store + lsys := cidlink.DefaultLinkSystem() + lsys.SetWriteStorage(bp.db.Blockstore().AsIPLDStorage()) + _, err := lsys.Store(linking.LinkContext{Ctx: ctx}, coreblock.GetLinkPrototype(), block.GenerateNode()) if err != nil { return err } - - b, err := block.Marshal() - if err != nil { - return err - } - - if err := bp.txn.DAGstore().AsIPLDStorage().Put(ctx, link.Binary(), b); err != nil { - return err - } - - bp.handleChildBlocks(ctx, session, block) + // Initiate a sync of the block's children + bp.wg.Add(1) + bp.handleChildBlocks(ctx, block) return nil } func (bp *blockProcessor) handleChildBlocks( ctx context.Context, - session *sync.WaitGroup, block *coreblock.Block, ) { - if block.Delta.IsComposite() { - bp.composites.PushFront(block) - } + defer bp.wg.Done() if len(block.Links) == 0 { return } - ctx, cancel := context.WithTimeout(ctx, DAGSyncTimeout) - defer cancel() - + links := make([]cid.Cid, 0, len(block.Links)) for _, link := range block.Links { - if !bp.queuedChildren.Visit(link.Cid) { // reserve for processing - continue - } - - exist, err := bp.txn.DAGstore().Has(ctx, link.Cid) + exists, err := bp.db.Blockstore().Has(ctx, link.Cid) if err != nil { - log.ErrorContext( + log.ErrorContextE( ctx, - "Failed to check for existing block", + "Failed to check if block exists", + err, corelog.Any("CID", link.Cid), - corelog.Any("ERROR", err), ) + continue } - if exist { + if exists { continue } - - session.Add(1) - job := &dagJob{ - session: session, - cid: link.Cid, - bp: bp, + if _, loaded := bp.queuedChildren.LoadOrStore(link.Cid, struct{}{}); !loaded { + links = append(links, link.Cid) } + } + + getCtx, cancel := context.WithTimeout(ctx, dagSyncTimeout) + defer cancel() + + childBlocks := bp.bsSession.GetBlocks(getCtx, links) - select { - case bp.sendJobs <- job: - case <-bp.ctx.Done(): - return // jump out + for rawBlock := range childBlocks { + block, err := coreblock.GetFromBytes(rawBlock.RawData()) + if err != nil { + log.ErrorContextE( + ctx, + "Failed to get block from bytes", + err, + corelog.Any("CID", rawBlock.Cid()), + ) + continue } + bp.wg.Add(1) + go bp.handleChildBlocks(ctx, block) + } + + for _, link := range links { + bp.queuedChildren.Delete(link) } } diff --git a/net/server.go b/net/server.go index 1cd9910856..94d791854f 100644 --- a/net/server.go +++ b/net/server.go @@ -22,18 +22,15 @@ import ( libpeer "github.com/libp2p/go-libp2p/core/peer" "github.com/sourcenetwork/corelog" rpc "github.com/sourcenetwork/go-libp2p-pubsub-rpc" - "github.com/sourcenetwork/immutable" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" grpcpeer "google.golang.org/grpc/peer" "google.golang.org/protobuf/proto" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/datastore/badger/v4" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/events" coreblock "github.com/sourcenetwork/defradb/internal/core/block" - "github.com/sourcenetwork/defradb/internal/db" pb "github.com/sourcenetwork/defradb/net/pb" ) @@ -45,7 +42,6 @@ import ( type server struct { peer *Peer opts []grpc.DialOption - db client.DB topics map[string]pubsubTopic mu sync.Mutex @@ -72,12 +68,11 @@ type pubsubTopic struct { // newServer creates a new network server that handle/directs RPC requests to the // underlying DB instance. -func newServer(p *Peer, db client.DB, opts ...grpc.DialOption) (*server, error) { +func newServer(p *Peer, opts ...grpc.DialOption) (*server, error) { s := &server{ peer: p, conns: make(map[libpeer.ID]*grpc.ClientConn), topics: make(map[string]pubsubTopic), - db: db, docQueue: &docQueue{ docs: make(map[string]chan struct{}), }, @@ -97,7 +92,7 @@ func newServer(p *Peer, db client.DB, opts ...grpc.DialOption) (*server, error) } // Get all DocIDs across all collections in the DB - cols, err := s.db.GetCollections(s.peer.ctx, client.CollectionFetchOptions{}) + cols, err := s.peer.db.GetCollections(s.peer.ctx, client.CollectionFetchOptions{}) if err != nil { return nil, err } @@ -224,157 +219,52 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL } }() - // make sure were not processing twice - if canVisit := s.peer.queuedChildren.Visit(cid); !canVisit { - return &pb.PushLogReply{}, nil - } - defer s.peer.queuedChildren.Remove(cid) - // check if we already have this block - exists, err := s.db.Blockstore().Has(ctx, cid) + exists, err := s.peer.db.Blockstore().Has(ctx, cid) if err != nil { - return nil, errors.Wrap(fmt.Sprintf("failed to check for existing block %s", cid), err) + return nil, NewErrCheckingForExistingBlock(err, cid.String()) } if exists { return &pb.PushLogReply{}, nil } - dsKey := core.DataStoreKeyFromDocID(docID) - - var txnErr error - for retry := 0; retry < s.peer.db.MaxTxnRetries(); retry++ { - // To prevent a potential deadlock on DAG sync if an error occures mid process, we handle - // each process on a single transaction. - txn, err := s.db.NewConcurrentTxn(ctx, false) - if err != nil { - return nil, err - } - defer txn.Discard(ctx) - - // use a transaction for all operations - ctx = db.SetContextTxn(ctx, txn) - - // Currently a schema is the best way we have to link a push log request to a collection, - // this will change with https://github.com/sourcenetwork/defradb/issues/1085 - col, err := s.getActiveCollection(ctx, s.db, string(req.Body.SchemaRoot)) - if err != nil { - return nil, err - } - - // Create a new DAG service with the current transaction - dagSyncer := s.peer.newDAGSyncerTxn(txn) - - // handleComposite - block, err := coreblock.GetFromBytes(req.Body.Log.Block) - if err != nil { - return nil, errors.Wrap("failed to decode block", err) - } + block, err := coreblock.GetFromBytes(req.Body.Log.Block) + if err != nil { + return nil, err + } - var wg sync.WaitGroup - bp := newBlockProcessor(s.peer, txn, col, dsKey, dagSyncer) - err = bp.processRemoteBlock(ctx, &wg, block) - if err != nil { - log.ErrorContextE( - ctx, - "Failed to process remote block", - err, - corelog.String("DocID", dsKey.DocID), - corelog.Any("CID", cid), - ) - } + bp := newBlockProcessor(ctx, s.peer) + err = bp.processRemoteBlock(ctx, block) + if err != nil { + log.ErrorContextE( + ctx, + "Failed to process remote block", + err, + corelog.String("DocID", docID.String()), + corelog.Any("CID", cid), + ) + } + bp.wg.Wait() + if s.peer.db.Events().DAGMerges.HasValue() { + wg := &sync.WaitGroup{} + wg.Add(1) + s.peer.db.Events().DAGMerges.Value().Publish(events.DAGMerge{ + Cid: cid, + SchemaRoot: string(req.Body.SchemaRoot), + Wg: wg, + }) wg.Wait() - bp.mergeBlocks(ctx) + } - err = s.syncIndexedDocs(ctx, col, docID) + // Once processed, subscribe to the DocID topic on the pubsub network unless we already + // suscribe to the collection. + if !s.hasPubSubTopic(string(req.Body.SchemaRoot)) { + err = s.addPubSubTopic(docID.String(), true) if err != nil { return nil, err } - - // dagWorkers specific to the DocID will have been spawned within handleChildBlocks. - // Once we are done with the dag syncing process, we can get rid of those workers. - if s.peer.closeJob != nil { - s.peer.closeJob <- dsKey.DocID - } - - if txnErr = txn.Commit(ctx); txnErr != nil { - if errors.Is(txnErr, badger.ErrTxnConflict) { - continue - } - return &pb.PushLogReply{}, txnErr - } - - // Once processed, subscribe to the DocID topic on the pubsub network unless we already - // suscribe to the collection. - if !s.hasPubSubTopic(col.SchemaRoot()) { - err = s.addPubSubTopic(dsKey.DocID, true) - if err != nil { - return nil, err - } - } - return &pb.PushLogReply{}, nil - } - - return &pb.PushLogReply{}, client.NewErrMaxTxnRetries(txnErr) -} - -func (*server) getActiveCollection( - ctx context.Context, - store client.Store, - schemaRoot string, -) (client.Collection, error) { - cols, err := store.GetCollections( - ctx, - client.CollectionFetchOptions{ - SchemaRoot: immutable.Some(schemaRoot), - }, - ) - if err != nil { - return nil, errors.Wrap(fmt.Sprintf("Failed to get collection from schemaRoot %s", schemaRoot), err) - } - if len(cols) == 0 { - return nil, client.NewErrCollectionNotFoundForSchema(schemaRoot) - } - var col client.Collection - for _, c := range cols { - if col != nil && col.Name().HasValue() && !c.Name().HasValue() { - continue - } - col = c - } - return col, nil -} - -func (s *server) syncIndexedDocs( - ctx context.Context, - col client.Collection, - docID client.DocID, -) error { - // remove transaction from old context - oldCtx := db.SetContextTxn(ctx, nil) - - //TODO-ACP: https://github.com/sourcenetwork/defradb/issues/2365 - // Resolve while handling acp <> secondary indexes. - oldDoc, err := col.Get(oldCtx, docID, false) - isNewDoc := errors.Is(err, client.ErrDocumentNotFoundOrNotAuthorized) - if !isNewDoc && err != nil { - return err - } - - //TODO-ACP: https://github.com/sourcenetwork/defradb/issues/2365 - // Resolve while handling acp <> secondary indexes. - doc, err := col.Get(ctx, docID, false) - isDeletedDoc := errors.Is(err, client.ErrDocumentNotFoundOrNotAuthorized) - if !isDeletedDoc && err != nil { - return err - } - - if isDeletedDoc { - return col.DeleteDocIndex(oldCtx, oldDoc) - } else if isNewDoc { - return col.CreateDocIndex(ctx, doc) - } else { - return col.UpdateDocIndex(ctx, oldDoc, doc) } + return &pb.PushLogReply{}, nil } // GetHeadLog receives a get head log request diff --git a/net/server_test.go b/net/server_test.go index 93d7d8130f..47d6a68aa8 100644 --- a/net/server_test.go +++ b/net/server_test.go @@ -32,8 +32,8 @@ import ( func TestNewServerSimple(t *testing.T) { ctx := context.Background() - db, n := newTestNode(ctx, t) - _, err := newServer(n.Peer, db) + _, n := newTestNode(ctx, t) + _, err := newServer(n.Peer) require.NoError(t, err) } @@ -42,7 +42,7 @@ func TestNewServerWithDBClosed(t *testing.T) { db, n := newTestNode(ctx, t) db.Close() - _, err := newServer(n.Peer, db) + _, err := newServer(n.Peer) require.ErrorIs(t, err, memory.ErrClosed) } @@ -60,7 +60,8 @@ func TestNewServerWithGetAllCollectionError(t *testing.T) { ctx := context.Background() db, n := newTestNode(ctx, t) mDB := mockDBColError{db} - _, err := newServer(n.Peer, &mDB) + n.Peer.db = &mDB + _, err := newServer(n.Peer) require.ErrorIs(t, err, mockError) } @@ -80,7 +81,7 @@ func TestNewServerWithCollectionSubscribed(t *testing.T) { err = n.AddP2PCollections(ctx, []string{col.SchemaRoot()}) require.NoError(t, err) - _, err = newServer(n.Peer, db) + _, err = newServer(n.Peer) require.NoError(t, err) } @@ -118,8 +119,8 @@ func TestNewServerWithGetAllDocIDsError(t *testing.T) { require.NoError(t, err) mDB := mockDBDocIDsError{db} - - _, err = newServer(n.Peer, &mDB) + n.Peer.db = &mDB + _, err = newServer(n.Peer) require.ErrorIs(t, err, mockError) } @@ -145,7 +146,7 @@ func TestNewServerWithAddTopicError(t *testing.T) { _, err = rpc.NewTopic(ctx, n.Peer.ps, n.Peer.host.ID(), doc.ID().String(), true) require.NoError(t, err) - _, err = newServer(n.Peer, db) + _, err = newServer(n.Peer) require.ErrorContains(t, err, "topic already exists") } @@ -190,7 +191,7 @@ func TestNewServerWithEmitterError(t *testing.T) { n.Peer.host = &mockHost{n.Peer.host} - _, err = newServer(n.Peer, db) + _, err = newServer(n.Peer) require.NoError(t, err) } diff --git a/tests/clients/cli/wrapper_collection.go b/tests/clients/cli/wrapper_collection.go index 618d9491d2..62458dae99 100644 --- a/tests/clients/cli/wrapper_collection.go +++ b/tests/clients/cli/wrapper_collection.go @@ -377,15 +377,3 @@ func (c *Collection) GetIndexes(ctx context.Context) ([]client.IndexDescription, } return indexes, nil } - -func (c *Collection) CreateDocIndex(context.Context, *client.Document) error { - return ErrMethodIsNotImplemented -} - -func (c *Collection) UpdateDocIndex(ctx context.Context, oldDoc, newDoc *client.Document) error { - return ErrMethodIsNotImplemented -} - -func (c *Collection) DeleteDocIndex(context.Context, *client.Document) error { - return ErrMethodIsNotImplemented -} diff --git a/tests/integration/db.go b/tests/integration/db.go index c473e4cdd0..1e8fe82731 100644 --- a/tests/integration/db.go +++ b/tests/integration/db.go @@ -105,6 +105,7 @@ func NewBadgerFileDB(ctx context.Context, t testing.TB) (client.DB, error) { func setupDatabase(s *state) (client.DB, string, error) { opts := []node.Option{ db.WithUpdateEvents(), + db.WithDAGMergeEvents(), node.WithLensPoolSize(lensPoolSize), // The test framework sets this up elsewhere when required so that it may be wrapped // into a [client.DB]. diff --git a/tests/integration/net/state/one_to_many/peer/with_create_update_test.go b/tests/integration/net/one_to_many/peer/with_create_update_test.go similarity index 100% rename from tests/integration/net/state/one_to_many/peer/with_create_update_test.go rename to tests/integration/net/one_to_many/peer/with_create_update_test.go diff --git a/tests/integration/net/state/one_to_many/replicator/with_create_test.go b/tests/integration/net/one_to_many/replicator/with_create_test.go similarity index 100% rename from tests/integration/net/state/one_to_many/replicator/with_create_test.go rename to tests/integration/net/one_to_many/replicator/with_create_test.go diff --git a/tests/integration/net/order/tcp_test.go b/tests/integration/net/order/tcp_test.go deleted file mode 100644 index ef18668d20..0000000000 --- a/tests/integration/net/order/tcp_test.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package order - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/net" - testUtils "github.com/sourcenetwork/defradb/tests/integration" -) - -// TestP2PWithSingleDocumentUpdatePerNode tests document syncing between two nodes with a single update per node -func TestP2PWithSingleDocumentUpdatePerNode(t *testing.T) { - test := P2PTestCase{ - NodeConfig: [][]net.NodeOpt{ - testUtils.RandomNetworkingConfig()(), - testUtils.RandomNetworkingConfig()(), - }, - NodePeers: map[int][]int{ - 1: { - 0, - }, - }, - SeedDocuments: []string{ - `{ - "Name": "John", - "Age": 21 - }`, - }, - Updates: map[int]map[int][]string{ - 1: { - 0: { - `{ - "Age": 45 - }`, - }, - }, - 0: { - 0: { - `{ - "Age": 60 - }`, - }, - }, - }, - Results: map[int]map[int]map[string]any{ - 0: { - 0: { - "Age": int64(45), - }, - }, - 1: { - 0: { - "Age": int64(60), - }, - }, - }, - } - - executeTestCase(t, test) -} - -// TestP2PWithMultipleDocumentUpdatesPerNode tests document syncing between two nodes with multiple updates per node. -func TestP2PWithMultipleDocumentUpdatesPerNode(t *testing.T) { - test := P2PTestCase{ - NodeConfig: [][]net.NodeOpt{ - testUtils.RandomNetworkingConfig()(), - testUtils.RandomNetworkingConfig()(), - }, - NodePeers: map[int][]int{ - 1: { - 0, - }, - }, - SeedDocuments: []string{ - `{ - "Name": "John", - "Age": 21 - }`, - }, - Updates: map[int]map[int][]string{ - 0: { - 0: { - `{ - "Age": 60 - }`, - `{ - "Age": 61 - }`, - `{ - "Age": 62 - }`, - }, - }, - 1: { - 0: { - `{ - "Age": 45 - }`, - `{ - "Age": 46 - }`, - `{ - "Age": 47 - }`, - }, - }, - }, - Results: map[int]map[int]map[string]any{ - 0: { - 0: { - "Age": int64(47), - }, - }, - 1: { - 0: { - "Age": int64(62), - }, - }, - }, - } - - executeTestCase(t, test) -} - -// TestP2FullPReplicator tests document syncing between a node and a replicator. -func TestP2FullPReplicator(t *testing.T) { - colDefMap, err := testUtils.ParseSDL(userCollectionGQLSchema) - require.NoError(t, err) - doc, err := client.NewDocFromJSON([]byte(`{ - "Name": "John", - "Age": 21 - }`), colDefMap[userCollection]) - require.NoError(t, err) - - test := P2PTestCase{ - NodeConfig: [][]net.NodeOpt{ - testUtils.RandomNetworkingConfig()(), - testUtils.RandomNetworkingConfig()(), - }, - NodeReplicators: map[int][]int{ - 0: { - 1, - }, - }, - DocumentsToReplicate: []*client.Document{ - doc, - }, - ReplicatorResult: map[int]map[string]map[string]any{ - 1: { - doc.ID().String(): { - "Age": int64(21), - }, - }, - }, - } - - executeTestCase(t, test) -} diff --git a/tests/integration/net/order/utils.go b/tests/integration/net/order/utils.go deleted file mode 100644 index c7075dae22..0000000000 --- a/tests/integration/net/order/utils.go +++ /dev/null @@ -1,382 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package order - -import ( - "context" - "fmt" - "testing" - - "github.com/sourcenetwork/corelog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/net" - netutils "github.com/sourcenetwork/defradb/net/utils" - testutils "github.com/sourcenetwork/defradb/tests/integration" -) - -var ( - log = corelog.NewLogger("test.net") -) - -const ( - userCollectionGQLSchema = ` - type Users { - Name: String - Email: String - Age: Int - Height: Float - Verified: Boolean - } - ` - - userCollection = "Users" -) - -type P2PTestCase struct { - Query string - - // The identity for all requests. - // TODO-ACP: https://github.com/sourcenetwork/defradb/issues/2366 - Improve in ACP <> P2P implementation - Identity string - - // Configuration parameters for each peer - NodeConfig [][]net.NodeOpt - - // List of peers for each net. - // Only peers with lower index than the node can be used in the list of peers. - NodePeers map[int][]int - - // List of replicators for each net. - // Only peers with lower index than the node can be used in the list of peers. - NodeReplicators map[int][]int - - SeedDocuments []string - DocumentsToReplicate []*client.Document - - // node/docID/values - Updates map[int]map[int][]string - Results map[int]map[int]map[string]any - ReplicatorResult map[int]map[string]map[string]any -} - -func setupDefraNode( - t *testing.T, - opts []net.NodeOpt, - peers []string, - seeds []string, -) (*net.Node, []client.DocID, error) { - ctx := context.Background() - - log.InfoContext(ctx, "Building new memory store") - db, err := testutils.NewBadgerMemoryDB(ctx) - if err != nil { - return nil, nil, err - } - - if err := seedSchema(ctx, db); err != nil { - return nil, nil, err - } - - // seed the database with a set of documents - docIDs := []client.DocID{} - for _, document := range seeds { - docID, err := seedDocument(ctx, db, document) - require.NoError(t, err) - docIDs = append(docIDs, docID) - } - - // init the P2P node - var n *net.Node - n, err = net.NewNode(ctx, db, opts...) - if err != nil { - return nil, nil, errors.Wrap("failed to start P2P node", err) - } - - // parse peers and bootstrap - if len(peers) != 0 { - log.InfoContext(ctx, "Parsing bootstrap peers", corelog.Any("Peers", peers)) - addrs, err := netutils.ParsePeers(peers) - if err != nil { - return nil, nil, errors.Wrap(fmt.Sprintf("failed to parse bootstrap peers %v", peers), err) - } - log.InfoContext(ctx, "Bootstrapping with peers", corelog.Any("Addresses", addrs)) - n.Bootstrap(addrs) - } - - log.InfoContext(ctx, "Starting P2P node", corelog.Any("P2P addresses", n.PeerInfo().Addrs)) - if err := n.Start(); err != nil { - n.Close() - return nil, nil, errors.Wrap("unable to start P2P listeners", err) - } - - return n, docIDs, nil -} - -func seedSchema(ctx context.Context, db client.DB) error { - _, err := db.AddSchema(ctx, userCollectionGQLSchema) - return err -} - -func seedDocument( - ctx context.Context, - db client.DB, - document string, -) (client.DocID, error) { - col, err := db.GetCollectionByName(ctx, userCollection) - if err != nil { - return client.DocID{}, err - } - - doc, err := client.NewDocFromJSON([]byte(document), col.Definition()) - if err != nil { - return client.DocID{}, err - } - - err = col.Save(ctx, doc) - if err != nil { - return client.DocID{}, err - } - - return doc.ID(), nil -} - -func saveDocument( - ctx context.Context, - db client.DB, - document *client.Document, -) error { - col, err := db.GetCollectionByName(ctx, userCollection) - if err != nil { - return err - } - - return col.Save(ctx, document) -} - -func updateDocument( - ctx context.Context, - db client.DB, - docID client.DocID, - update string, -) error { - col, err := db.GetCollectionByName(ctx, userCollection) - if err != nil { - return err - } - - doc, err := getDocument(ctx, db, docID) - if err != nil { - return err - } - - if err := doc.SetWithJSON([]byte(update)); err != nil { - return err - } - - return col.Save(ctx, doc) -} - -func getDocument( - ctx context.Context, - db client.DB, - docID client.DocID, -) (*client.Document, error) { - col, err := db.GetCollectionByName(ctx, userCollection) - if err != nil { - return nil, err - } - - doc, err := col.Get(ctx, docID, false) - if err != nil { - return nil, err - } - return doc, err -} - -func executeTestCase(t *testing.T, test P2PTestCase) { - ctx := context.Background() - - docIDs := []client.DocID{} - nodes := []*net.Node{} - - for i, cfg := range test.NodeConfig { - log.InfoContext(ctx, fmt.Sprintf("Setting up node %d", i)) - var peerAddresses []string - if peers, ok := test.NodePeers[i]; ok { - for _, p := range peers { - if p >= len(nodes) { - log.InfoContext(ctx, "cannot set a peer that hasn't been started. Skipping to next peer") - continue - } - peerInfo := nodes[p].PeerInfo() - peerAddresses = append( - peerAddresses, - fmt.Sprintf("%s/p2p/%s", peerInfo.Addrs[0], peerInfo.ID), - ) - } - } - n, d, err := setupDefraNode( - t, - cfg, - peerAddresses, - test.SeedDocuments, - ) - require.NoError(t, err) - - if i == 0 { - docIDs = d - } - nodes = append(nodes, n) - } - - ////////////////////////////////////////////////////////////// - ////////////////////////////////////////////////////////////// - // PubSub related test logic - - // wait for peers to connect to each other - if len(test.NodePeers) > 0 { - for i, n := range nodes { - for j, p := range nodes { - if i == j { - continue - } - log.InfoContext(ctx, fmt.Sprintf("Waiting for node %d to connect with peer %d", i, j)) - err := n.WaitForPubSubEvent(p.PeerID()) - require.NoError(t, err) - log.InfoContext(ctx, fmt.Sprintf("Node %d connected to peer %d", i, j)) - } - } - } - - // update and sync peers - for n, updateMap := range test.Updates { - if n >= len(nodes) { - log.InfoContext(ctx, "cannot update a node that hasn't been started. Skipping to next node") - continue - } - - for d, updates := range updateMap { - for _, update := range updates { - log.InfoContext(ctx, fmt.Sprintf("Updating node %d with update %d", n, d)) - err := updateDocument( - ctx, - nodes[n].DB, - docIDs[d], - update, - ) - require.NoError(t, err) - - // wait for peers to sync - for n2, p := range nodes { - if n2 == n { - continue - } - log.InfoContext(ctx, fmt.Sprintf("Waiting for node %d to sync with peer %d", n2, n)) - err := p.WaitForPushLogByPeerEvent(nodes[n].PeerInfo().ID) - require.NoError(t, err) - log.InfoContext(ctx, fmt.Sprintf("Node %d synced", n2)) - } - } - } - - // check that peers actually received the update - for n2, resultsMap := range test.Results { - if n2 == n { - continue - } - if n2 >= len(nodes) { - log.InfoContext(ctx, "cannot check results of a node that hasn't been started. Skipping to next node") - continue - } - - for d, results := range resultsMap { - for field, result := range results { - doc, err := getDocument( - ctx, - nodes[n2].DB, - docIDs[d], - ) - require.NoError(t, err) - - val, err := doc.Get(field) - require.NoError(t, err) - - assert.Equal(t, result, val) - } - } - } - } - - ////////////////////////////////////////////////////////////// - ////////////////////////////////////////////////////////////// - // Replicator related test logic - - if len(test.NodeReplicators) > 0 { - for i, n := range nodes { - if reps, ok := test.NodeReplicators[i]; ok { - for _, r := range reps { - err := n.Peer.SetReplicator(ctx, client.Replicator{ - Info: nodes[r].PeerInfo(), - }) - require.NoError(t, err) - } - } - } - } - - if len(test.DocumentsToReplicate) > 0 { - for n, reps := range test.NodeReplicators { - for _, doc := range test.DocumentsToReplicate { - err := saveDocument( - ctx, - nodes[n].DB, - doc, - ) - require.NoError(t, err) - } - for _, rep := range reps { - log.InfoContext(ctx, fmt.Sprintf("Waiting for node %d to sync with peer %d", rep, n)) - err := nodes[rep].WaitForPushLogByPeerEvent(nodes[n].PeerID()) - require.NoError(t, err) - log.InfoContext(ctx, fmt.Sprintf("Node %d synced", rep)) - - for docID, results := range test.ReplicatorResult[rep] { - for field, result := range results { - d, err := client.NewDocIDFromString(docID) - require.NoError(t, err) - - doc, err := getDocument( - ctx, - nodes[rep].DB, - d, - ) - require.NoError(t, err) - - val, err := doc.Get(field) - require.NoError(t, err) - - assert.Equal(t, result, val) - } - } - } - } - } - - // clean up - for _, n := range nodes { - n.Close() - n.DB.Close() - } -} diff --git a/tests/integration/net/state/simple/peer/crdt/pcounter_test.go b/tests/integration/net/simple/peer/crdt/pcounter_test.go similarity index 100% rename from tests/integration/net/state/simple/peer/crdt/pcounter_test.go rename to tests/integration/net/simple/peer/crdt/pcounter_test.go diff --git a/tests/integration/net/state/simple/peer/crdt/pncounter_test.go b/tests/integration/net/simple/peer/crdt/pncounter_test.go similarity index 100% rename from tests/integration/net/state/simple/peer/crdt/pncounter_test.go rename to tests/integration/net/simple/peer/crdt/pncounter_test.go diff --git a/tests/integration/net/state/simple/peer/subscribe/with_add_get_remove_test.go b/tests/integration/net/simple/peer/subscribe/with_add_get_remove_test.go similarity index 100% rename from tests/integration/net/state/simple/peer/subscribe/with_add_get_remove_test.go rename to tests/integration/net/simple/peer/subscribe/with_add_get_remove_test.go diff --git a/tests/integration/net/state/simple/peer/subscribe/with_add_get_test.go b/tests/integration/net/simple/peer/subscribe/with_add_get_test.go similarity index 100% rename from tests/integration/net/state/simple/peer/subscribe/with_add_get_test.go rename to tests/integration/net/simple/peer/subscribe/with_add_get_test.go diff --git a/tests/integration/net/state/simple/peer/subscribe/with_add_remove_test.go b/tests/integration/net/simple/peer/subscribe/with_add_remove_test.go similarity index 100% rename from tests/integration/net/state/simple/peer/subscribe/with_add_remove_test.go rename to tests/integration/net/simple/peer/subscribe/with_add_remove_test.go diff --git a/tests/integration/net/state/simple/peer/subscribe/with_add_test.go b/tests/integration/net/simple/peer/subscribe/with_add_test.go similarity index 100% rename from tests/integration/net/state/simple/peer/subscribe/with_add_test.go rename to tests/integration/net/simple/peer/subscribe/with_add_test.go diff --git a/tests/integration/net/state/simple/peer/subscribe/with_get_test.go b/tests/integration/net/simple/peer/subscribe/with_get_test.go similarity index 100% rename from tests/integration/net/state/simple/peer/subscribe/with_get_test.go rename to tests/integration/net/simple/peer/subscribe/with_get_test.go diff --git a/tests/integration/net/state/simple/peer/with_create_add_field_test.go b/tests/integration/net/simple/peer/with_create_add_field_test.go similarity index 100% rename from tests/integration/net/state/simple/peer/with_create_add_field_test.go rename to tests/integration/net/simple/peer/with_create_add_field_test.go diff --git a/tests/integration/net/state/simple/peer/with_create_test.go b/tests/integration/net/simple/peer/with_create_test.go similarity index 100% rename from tests/integration/net/state/simple/peer/with_create_test.go rename to tests/integration/net/simple/peer/with_create_test.go diff --git a/tests/integration/net/state/simple/peer/with_delete_test.go b/tests/integration/net/simple/peer/with_delete_test.go similarity index 100% rename from tests/integration/net/state/simple/peer/with_delete_test.go rename to tests/integration/net/simple/peer/with_delete_test.go diff --git a/tests/integration/net/state/simple/peer/with_update_add_field_test.go b/tests/integration/net/simple/peer/with_update_add_field_test.go similarity index 100% rename from tests/integration/net/state/simple/peer/with_update_add_field_test.go rename to tests/integration/net/simple/peer/with_update_add_field_test.go diff --git a/tests/integration/net/state/simple/peer/with_update_restart_test.go b/tests/integration/net/simple/peer/with_update_restart_test.go similarity index 100% rename from tests/integration/net/state/simple/peer/with_update_restart_test.go rename to tests/integration/net/simple/peer/with_update_restart_test.go diff --git a/tests/integration/net/state/simple/peer/with_update_test.go b/tests/integration/net/simple/peer/with_update_test.go similarity index 100% rename from tests/integration/net/state/simple/peer/with_update_test.go rename to tests/integration/net/simple/peer/with_update_test.go diff --git a/tests/integration/net/state/simple/peer_replicator/crdt/pcounter_test.go b/tests/integration/net/simple/peer_replicator/crdt/pcounter_test.go similarity index 100% rename from tests/integration/net/state/simple/peer_replicator/crdt/pcounter_test.go rename to tests/integration/net/simple/peer_replicator/crdt/pcounter_test.go diff --git a/tests/integration/net/state/simple/peer_replicator/crdt/pncounter_test.go b/tests/integration/net/simple/peer_replicator/crdt/pncounter_test.go similarity index 100% rename from tests/integration/net/state/simple/peer_replicator/crdt/pncounter_test.go rename to tests/integration/net/simple/peer_replicator/crdt/pncounter_test.go diff --git a/tests/integration/net/state/simple/peer_replicator/with_create_test.go b/tests/integration/net/simple/peer_replicator/with_create_test.go similarity index 100% rename from tests/integration/net/state/simple/peer_replicator/with_create_test.go rename to tests/integration/net/simple/peer_replicator/with_create_test.go diff --git a/tests/integration/net/state/simple/peer_replicator/with_delete_test.go b/tests/integration/net/simple/peer_replicator/with_delete_test.go similarity index 100% rename from tests/integration/net/state/simple/peer_replicator/with_delete_test.go rename to tests/integration/net/simple/peer_replicator/with_delete_test.go diff --git a/tests/integration/net/state/simple/peer_replicator/with_update_restart_test.go b/tests/integration/net/simple/peer_replicator/with_update_restart_test.go similarity index 100% rename from tests/integration/net/state/simple/peer_replicator/with_update_restart_test.go rename to tests/integration/net/simple/peer_replicator/with_update_restart_test.go diff --git a/tests/integration/net/state/simple/peer_replicator/with_update_test.go b/tests/integration/net/simple/peer_replicator/with_update_test.go similarity index 100% rename from tests/integration/net/state/simple/peer_replicator/with_update_test.go rename to tests/integration/net/simple/peer_replicator/with_update_test.go diff --git a/tests/integration/net/state/simple/replicator/crdt/pcounter_test.go b/tests/integration/net/simple/replicator/crdt/pcounter_test.go similarity index 100% rename from tests/integration/net/state/simple/replicator/crdt/pcounter_test.go rename to tests/integration/net/simple/replicator/crdt/pcounter_test.go diff --git a/tests/integration/net/state/simple/replicator/crdt/pncounter_test.go b/tests/integration/net/simple/replicator/crdt/pncounter_test.go similarity index 100% rename from tests/integration/net/state/simple/replicator/crdt/pncounter_test.go rename to tests/integration/net/simple/replicator/crdt/pncounter_test.go diff --git a/tests/integration/net/state/simple/replicator/with_create_add_field_test.go b/tests/integration/net/simple/replicator/with_create_add_field_test.go similarity index 100% rename from tests/integration/net/state/simple/replicator/with_create_add_field_test.go rename to tests/integration/net/simple/replicator/with_create_add_field_test.go diff --git a/tests/integration/net/state/simple/replicator/with_create_restart_test.go b/tests/integration/net/simple/replicator/with_create_restart_test.go similarity index 100% rename from tests/integration/net/state/simple/replicator/with_create_restart_test.go rename to tests/integration/net/simple/replicator/with_create_restart_test.go diff --git a/tests/integration/net/state/simple/replicator/with_create_test.go b/tests/integration/net/simple/replicator/with_create_test.go similarity index 100% rename from tests/integration/net/state/simple/replicator/with_create_test.go rename to tests/integration/net/simple/replicator/with_create_test.go diff --git a/tests/integration/net/state/simple/replicator/with_create_update_test.go b/tests/integration/net/simple/replicator/with_create_update_test.go similarity index 100% rename from tests/integration/net/state/simple/replicator/with_create_update_test.go rename to tests/integration/net/simple/replicator/with_create_update_test.go diff --git a/tests/integration/net/state/simple/replicator/with_delete_test.go b/tests/integration/net/simple/replicator/with_delete_test.go similarity index 100% rename from tests/integration/net/state/simple/replicator/with_delete_test.go rename to tests/integration/net/simple/replicator/with_delete_test.go diff --git a/tests/integration/net/state/simple/replicator/with_update_add_field_test.go b/tests/integration/net/simple/replicator/with_update_add_field_test.go similarity index 100% rename from tests/integration/net/state/simple/replicator/with_update_add_field_test.go rename to tests/integration/net/simple/replicator/with_update_add_field_test.go diff --git a/tests/integration/net/state/simple/replicator/with_update_test.go b/tests/integration/net/simple/replicator/with_update_test.go similarity index 100% rename from tests/integration/net/state/simple/replicator/with_update_test.go rename to tests/integration/net/simple/replicator/with_update_test.go diff --git a/tests/integration/query/commits/simple_test.go b/tests/integration/query/commits/simple_test.go index 13f8307840..4b2d037d61 100644 --- a/tests/integration/query/commits/simple_test.go +++ b/tests/integration/query/commits/simple_test.go @@ -104,7 +104,7 @@ func TestQueryCommitsMultipleDocs(t *testing.T) { testUtils.ExecuteTestCase(t, test) } -func TestQueryCommitsWithSchemaVersionIdField(t *testing.T) { +func TestQueryCommitsWithSchemaVersionIDField(t *testing.T) { test := testUtils.TestCase{ Description: "Simple commits query yielding schemaVersionId", Actions: []any{ diff --git a/tests/integration/query/commits/with_field_test.go b/tests/integration/query/commits/with_field_test.go index fa1886304b..6d4922d9b8 100644 --- a/tests/integration/query/commits/with_field_test.go +++ b/tests/integration/query/commits/with_field_test.go @@ -110,7 +110,7 @@ func TestQueryCommitsWithCompositeFieldId(t *testing.T) { // This test is for documentation reasons only. This is not // desired behaviour (Users should not be specifying field ids). -func TestQueryCommitsWithCompositeFieldIdWithReturnedSchemaVersionId(t *testing.T) { +func TestQueryCommitsWithCompositeFieldIdWithReturnedSchemaVersionID(t *testing.T) { test := testUtils.TestCase{ Description: "Simple all commits query with docID and field id", Actions: []any{ diff --git a/tests/integration/query/latest_commits/with_doc_id_test.go b/tests/integration/query/latest_commits/with_doc_id_test.go index 726c009cf7..290dea175d 100644 --- a/tests/integration/query/latest_commits/with_doc_id_test.go +++ b/tests/integration/query/latest_commits/with_doc_id_test.go @@ -56,7 +56,7 @@ func TestQueryLatestCommitsWithDocID(t *testing.T) { executeTestCase(t, test) } -func TestQueryLatestCommitsWithDocIDWithSchemaVersionIdField(t *testing.T) { +func TestQueryLatestCommitsWithDocIDWithSchemaVersionIDField(t *testing.T) { test := testUtils.RequestTestCase{ Description: "Simple latest commits query with docID and schema versiion id field", Request: `query { diff --git a/tests/integration/query/simple/with_version_test.go b/tests/integration/query/simple/with_version_test.go index 5baf65a0ae..ea7ac76a2b 100644 --- a/tests/integration/query/simple/with_version_test.go +++ b/tests/integration/query/simple/with_version_test.go @@ -66,7 +66,7 @@ func TestQuerySimpleWithEmbeddedLatestCommit(t *testing.T) { executeTestCase(t, test) } -func TestQuerySimpleWithEmbeddedLatestCommitWithSchemaVersionId(t *testing.T) { +func TestQuerySimpleWithEmbeddedLatestCommitWithSchemaVersionID(t *testing.T) { test := testUtils.RequestTestCase{ Description: "Embedded commits query within object query with schema version id", Request: `query { diff --git a/tests/integration/schema/updates/add/field/create_update_test.go b/tests/integration/schema/updates/add/field/create_update_test.go index d299b70e7f..cd3a0b1267 100644 --- a/tests/integration/schema/updates/add/field/create_update_test.go +++ b/tests/integration/schema/updates/add/field/create_update_test.go @@ -17,8 +17,8 @@ import ( ) func TestSchemaUpdatesAddFieldWithCreateWithUpdateAfterSchemaUpdateAndVersionJoin(t *testing.T) { - initialSchemaVersionId := "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe" - updatedSchemaVersionId := "bafkreibz4g6rkxanzn6ro74ezmbwoe5hvcguwvi34judrk2kfuqqtk5ak4" + initialSchemaVersionID := "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe" + updatedSchemaVersionID := "bafkreibz4g6rkxanzn6ro74ezmbwoe5hvcguwvi34judrk2kfuqqtk5ak4" test := testUtils.TestCase{ Description: "Test schema update, add field with update after schema update, version join", @@ -52,7 +52,7 @@ func TestSchemaUpdatesAddFieldWithCreateWithUpdateAfterSchemaUpdateAndVersionJoi "name": "John", "_version": []map[string]any{ { - "schemaVersionId": initialSchemaVersionId, + "schemaVersionId": initialSchemaVersionID, }, }, }, @@ -89,11 +89,11 @@ func TestSchemaUpdatesAddFieldWithCreateWithUpdateAfterSchemaUpdateAndVersionJoi "_version": []map[string]any{ { // Update commit - "schemaVersionId": updatedSchemaVersionId, + "schemaVersionId": updatedSchemaVersionID, }, { // Create commit - "schemaVersionId": initialSchemaVersionId, + "schemaVersionId": initialSchemaVersionID, }, }, }, @@ -105,8 +105,8 @@ func TestSchemaUpdatesAddFieldWithCreateWithUpdateAfterSchemaUpdateAndVersionJoi } func TestSchemaUpdatesAddFieldWithCreateWithUpdateAfterSchemaUpdateAndCommitQuery(t *testing.T) { - initialSchemaVersionId := "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe" - updatedSchemaVersionId := "bafkreibz4g6rkxanzn6ro74ezmbwoe5hvcguwvi34judrk2kfuqqtk5ak4" + initialSchemaVersionID := "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe" + updatedSchemaVersionID := "bafkreibz4g6rkxanzn6ro74ezmbwoe5hvcguwvi34judrk2kfuqqtk5ak4" test := testUtils.TestCase{ Description: "Test schema update, add field with update after schema update, commits query", @@ -147,11 +147,11 @@ func TestSchemaUpdatesAddFieldWithCreateWithUpdateAfterSchemaUpdateAndCommitQuer Results: []map[string]any{ { // Update commit - "schemaVersionId": updatedSchemaVersionId, + "schemaVersionId": updatedSchemaVersionID, }, { // Create commit - "schemaVersionId": initialSchemaVersionId, + "schemaVersionId": initialSchemaVersionID, }, }, }, From 343b90ae3f2e8e9d66970cb00269f5ba54ff228d Mon Sep 17 00:00:00 2001 From: Shahzad Lone Date: Fri, 7 Jun 2024 17:10:06 -0400 Subject: [PATCH 37/78] ci(i): Add check to keep table of contents up-to-date (#2693) ## Relevant issue(s) Resolves #2692 ## Description - Combine previous documentation workflows in one file - Add toc script tool - Add the toc make rule - Add toc job ## How has this been tested? - Using `act` tool - Manually in the commit which was reverted to ensure it fails if Readme is not updated. Action-run: https://github.com/sourcenetwork/defradb/actions/runs/9417618174/job/25943201802?pr=2693 Specify the platform(s) on which this was tested: - WSL2 --- .github/workflows/check-cli-documentation.yml | 52 -- .github/workflows/check-documentation.yml | 102 ++++ .../workflows/check-http-documentation.yml | 52 -- Makefile | 5 + README.md | 42 +- tools/scripts/md-toc/README.md | 443 ++++++++++++++++++ tools/scripts/md-toc/gh-md-toc | 421 +++++++++++++++++ 7 files changed, 992 insertions(+), 125 deletions(-) delete mode 100644 .github/workflows/check-cli-documentation.yml create mode 100644 .github/workflows/check-documentation.yml delete mode 100644 .github/workflows/check-http-documentation.yml create mode 100644 tools/scripts/md-toc/README.md create mode 100755 tools/scripts/md-toc/gh-md-toc diff --git a/.github/workflows/check-cli-documentation.yml b/.github/workflows/check-cli-documentation.yml deleted file mode 100644 index be50351518..0000000000 --- a/.github/workflows/check-cli-documentation.yml +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2024 Democratized Data Foundation -# -# Use of this software is governed by the Business Source License -# included in the file licenses/BSL.txt. -# -# As of the Change Date specified in that file, in accordance with -# the Business Source License, use of this software will be governed -# by the Apache License, Version 2.0, included in the file -# licenses/APL.txt. - -# This workflow checks that all CLI documentation is up to date. -# If the documentation is not up to date then this action will fail. -name: Check CLI Documentation Workflow - -on: - pull_request: - branches: - - master - - develop - - push: - tags: - - 'v[0-9]+.[0-9]+.[0-9]+' - branches: - - master - - develop - -jobs: - check-cli-documentation: - name: Check cli documentation job - - runs-on: ubuntu-latest - - steps: - - name: Checkout code into the directory - uses: actions/checkout@v3 - - - name: Setup Go environment explicitly - uses: actions/setup-go@v3 - with: - go-version: "1.21" - check-latest: true - - - name: Try generating cli documentation - run: make docs:cli - - - name: Check no new changes exist - uses: tj-actions/verify-changed-files@v20 - with: - fail-if-changed: true - files: | - docs/website/references/cli diff --git a/.github/workflows/check-documentation.yml b/.github/workflows/check-documentation.yml new file mode 100644 index 0000000000..97214d515b --- /dev/null +++ b/.github/workflows/check-documentation.yml @@ -0,0 +1,102 @@ +# Copyright 2024 Democratized Data Foundation +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +# This workflow checks that all documentation is up to date. +# If any documentation is not up to date then this action will fail. +name: Check Documentation Workflow + +on: + pull_request: + branches: + - master + - develop + + push: + tags: + - 'v[0-9]+.[0-9]+.[0-9]+' + branches: + - master + - develop + +jobs: + check-cli-documentation: + name: Check cli documentation job + + runs-on: ubuntu-latest + + steps: + - name: Checkout code into the directory + uses: actions/checkout@v3 + + - name: Setup Go environment explicitly + uses: actions/setup-go@v3 + with: + go-version: "1.21" + check-latest: true + + - name: Try generating cli documentation + run: make docs:cli + + - name: Check no new changes exist + uses: tj-actions/verify-changed-files@v20 + with: + fail-if-changed: true + files: | + docs/website/references/cli + + check-http-documentation: + name: Check http documentation job + + runs-on: ubuntu-latest + + steps: + - name: Checkout code into the directory + uses: actions/checkout@v3 + + - name: Setup Go environment explicitly + uses: actions/setup-go@v3 + with: + go-version: "1.21" + check-latest: true + + - name: Try generating http documentation + run: make docs:http + + - name: Check no new changes exist + uses: tj-actions/verify-changed-files@v20 + with: + fail-if-changed: true + files: | + docs/website/references/http + + check-readme-toc: + name: Check readme toc job + + runs-on: ubuntu-latest + + steps: + - name: Checkout code into the directory + uses: actions/checkout@v3 + + - name: Setup Go environment explicitly + uses: actions/setup-go@v3 + with: + go-version: "1.21" + check-latest: true + + - name: Try generating readme toc + run: make toc + + - name: Check no new changes exist + uses: tj-actions/verify-changed-files@v20 + with: + fail-if-changed: true + files: | + README.md diff --git a/.github/workflows/check-http-documentation.yml b/.github/workflows/check-http-documentation.yml deleted file mode 100644 index e68471c162..0000000000 --- a/.github/workflows/check-http-documentation.yml +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2024 Democratized Data Foundation -# -# Use of this software is governed by the Business Source License -# included in the file licenses/BSL.txt. -# -# As of the Change Date specified in that file, in accordance with -# the Business Source License, use of this software will be governed -# by the Apache License, Version 2.0, included in the file -# licenses/APL.txt. - -# This workflow checks that all HTTP documentation is up to date. -# If the documentation is not up to date then this action will fail. -name: Check HTTP Documentation Workflow - -on: - pull_request: - branches: - - master - - develop - - push: - tags: - - 'v[0-9]+.[0-9]+.[0-9]+' - branches: - - master - - develop - -jobs: - check-http-documentation: - name: Check http documentation job - - runs-on: ubuntu-latest - - steps: - - name: Checkout code into the directory - uses: actions/checkout@v3 - - - name: Setup Go environment explicitly - uses: actions/setup-go@v3 - with: - go-version: "1.21" - check-latest: true - - - name: Try generating http documentation - run: make docs:http - - - name: Check no new changes exist - uses: tj-actions/verify-changed-files@v20 - with: - fail-if-changed: true - files: | - docs/website/references/http diff --git a/Makefile b/Makefile index c4c02da3d7..f4cfa7a7bd 100644 --- a/Makefile +++ b/Makefile @@ -357,6 +357,7 @@ docs: @$(MAKE) docs\:cli @$(MAKE) docs\:manpages @$(MAKE) docs\:http + @$(MAKE) toc .PHONY: docs\:cli docs\:cli: @@ -375,3 +376,7 @@ docs\:manpages: docs\:godoc: godoc -http=:6060 # open http://localhost:6060/pkg/github.com/sourcenetwork/defradb/ + +.PHONY: toc +toc: + bash tools/scripts/md-toc/gh-md-toc --insert --no-backup --hide-footer --skip-header README.md diff --git a/README.md b/README.md index 71e3ff856c..db0a3598b7 100644 --- a/README.md +++ b/README.md @@ -15,29 +15,29 @@ DefraDB is a user-centric database that prioritizes data ownership, personal pri Read the documentation on [docs.source.network](https://docs.source.network/). + ## Table of Contents -- [Install](#install) -- [Key Management](#key-management) -- [Start](#start) -- [Configuration](#configuration) -- [External port binding](#external-port-binding) -- [Add a schema type](#add-a-schema-type) -- [Create a document instance](#create-a-document-instance) -- [Query documents](#query-documents) -- [Obtain document commits](#obtain-document-commits) -- [DefraDB Query Language (DQL)](#defradb-query-language-dql) -- [Peer-to-peer data synchronization](#peer-to-peer-data-synchronization) - - [Pubsub example](#pubsub-example) - - [Collection subscription example](#collection-subscription-example) - - [Replicator example](#replicator-example) -- [Securing the HTTP API with TLS](#securing-the-http-api-with-tls) -- [Access Control System](#access-control-system) -- [Supporting CORS](#supporting-cors) -- [Backing up and restoring](#backing-up-and-restoring) -- [Community](#community) -- [Licensing](#licensing) -- [Contributors](#contributors) + + * [Install](#install) + * [Key Management](#key-management) + * [Start](#start) + * [Configuration](#configuration) + * [External port binding](#external-port-binding) + * [Add a schema type](#add-a-schema-type) + * [Create a document](#create-a-document) + * [Query documents](#query-documents) + * [Obtain document commits](#obtain-document-commits) + * [DefraDB Query Language (DQL)](#defradb-query-language-dql) + * [Peer-to-peer data synchronization](#peer-to-peer-data-synchronization) + * [Securing the HTTP API with TLS](#securing-the-http-api-with-tls) + * [Access Control System](#access-control-system) + * [Supporting CORS](#supporting-cors) + * [Backing up and restoring](#backing-up-and-restoring) + * [Community](#community) + * [Licensing](#licensing) + * [Contributors](#contributors) + DISCLAIMER: At this early stage, DefraDB does not offer data encryption, and the default configuration exposes the database to the network. The software is provided "as is" and is not guaranteed to be stable, secure, or error-free. We encourage you to experiment with DefraDB and provide feedback, but please do not use it for production purposes until it has been thoroughly tested and developed. diff --git a/tools/scripts/md-toc/README.md b/tools/scripts/md-toc/README.md new file mode 100644 index 0000000000..bad07bc32f --- /dev/null +++ b/tools/scripts/md-toc/README.md @@ -0,0 +1,443 @@ +gh-md-toc +========= + +[![CI](https://github.com/ekalinin/github-markdown-toc/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/ekalinin/github-markdown-toc/actions/workflows/ci.yml) +![GitHub release (latest by date)](https://img.shields.io/github/v/release/ekalinin/github-markdown-toc) + +gh-md-toc — is for you if you **want to generate TOC** (Table Of Content) for a README.md or +a GitHub wiki page **without installing additional software**. + +It's my try to fix a problem: + + * [github/issues/215](https://github.com/isaacs/github/issues/215) + +gh-md-toc is able to process: + + * stdin + * local files (markdown files in local file system) + * remote files (html files on github.com) + +gh-md-toc tested on Ubuntu, and macOS High Sierra (gh-md-toc release 0.4.9). If you want it on Windows, you +better to use a golang based implementation: + + * [github-markdown-toc.go](https://github.com/ekalinin/github-markdown-toc.go) + +It's more solid, reliable and with ability of a parallel processing. And +absolutely without dependencies. + +Table of contents +================= + + + * [Installation](#installation) + * [Usage](#usage) + * [STDIN](#stdin) + * [Local files](#local-files) + * [Remote files](#remote-files) + * [Multiple files](#multiple-files) + * [Combo](#combo) + * [Auto insert and update TOC](#auto-insert-and-update-toc) + * [GitHub token](#github-token) + * [TOC generation with Github Actions](#toc-generation-with-github-actions) + * [Tests](#tests) + * [Dependency](#dependency) + * [Docker](#docker) + * [Local](#local) + * [Public](#public) + + + +Installation +============ + +Linux (manual installation) +```bash +$ wget https://raw.githubusercontent.com/ekalinin/github-markdown-toc/master/gh-md-toc +$ chmod a+x gh-md-toc +``` + +MacOS (manual installation) +```bash +$ curl https://raw.githubusercontent.com/ekalinin/github-markdown-toc/master/gh-md-toc -o gh-md-toc +$ chmod a+x gh-md-toc +``` + +Linux or MacOS (using [Basher](https://github.com/basherpm/basher)) +```bash +$ basher install ekalinin/github-markdown-toc +# `gh-md-toc` will automatically be available in the PATH +``` + +Usage +===== + + +STDIN +----- + +Here's an example of TOC creating for markdown from STDIN: + +```bash +➥ cat ~/projects/Dockerfile.vim/README.md | ./gh-md-toc - + * [Dockerfile.vim](#dockerfilevim) + * [Screenshot](#screenshot) + * [Installation](#installation) + * [OR using Pathogen:](#or-using-pathogen) + * [OR using Vundle:](#or-using-vundle) + * [License](#license) +``` + +Local files +----------- + +Here's an example of TOC creating for a local README.md: + +```bash +➥ ./gh-md-toc ~/projects/Dockerfile.vim/README.md + + +Table of Contents +================= + + * [Dockerfile.vim](#dockerfilevim) + * [Screenshot](#screenshot) + * [Installation](#installation) + * [OR using Pathogen:](#or-using-pathogen) + * [OR using Vundle:](#or-using-vundle) + * [License](#license) +``` + +Remote files +------------ + +And here's an example, when you have a README.md like this: + + * [README.md without TOC](https://github.com/ekalinin/envirius/blob/f939d3b6882bfb6ecb28ef7b6e62862f934ba945/README.md) + +And you want to generate TOC for it. + +There is nothing easier: + +```bash +➥ ./gh-md-toc https://github.com/ekalinin/envirius/blob/master/README.md + +Table of Contents +================= + + * [envirius](#envirius) + * [Idea](#idea) + * [Features](#features) + * [Installation](#installation) + * [Uninstallation](#uninstallation) + * [Available plugins](#available-plugins) + * [Usage](#usage) + * [Check available plugins](#check-available-plugins) + * [Check available versions for each plugin](#check-available-versions-for-each-plugin) + * [Create an environment](#create-an-environment) + * [Activate/deactivate environment](#activatedeactivate-environment) + * [Activating in a new shell](#activating-in-a-new-shell) + * [Activating in the same shell](#activating-in-the-same-shell) + * [Get list of environments](#get-list-of-environments) + * [Get current activated environment](#get-current-activated-environment) + * [Do something in environment without enabling it](#do-something-in-environment-without-enabling-it) + * [Get help](#get-help) + * [Get help for a command](#get-help-for-a-command) + * [How to add a plugin?](#how-to-add-a-plugin) + * [Mandatory elements](#mandatory-elements) + * [plug_list_versions](#plug_list_versions) + * [plug_url_for_download](#plug_url_for_download) + * [plug_build](#plug_build) + * [Optional elements](#optional-elements) + * [Variables](#variables) + * [Functions](#functions) + * [Examples](#examples) + * [Example of the usage](#example-of-the-usage) + * [Dependencies](#dependencies) + * [Supported OS](#supported-os) + * [Tests](#tests) + * [Version History](#version-history) + * [License](#license) + * [README in another language](#readme-in-another-language) +``` + +That's all! Now all you need — is copy/paste result from console into original +README.md. + +If you do not want to copy from console you can add `> YOURFILENAME.md` at the end of the command like `./gh-md-toc https://github.com/ekalinin/envirius/blob/master/README.md > table-of-contents.md` and this will store the table of contents to a file named table-of-contents.md in your current folder. + +And here is a result: + + * [README.md with TOC](https://github.com/ekalinin/envirius/blob/24ea3be0d3cc03f4235fa4879bb33dc122d0ae29/README.md) + +Moreover, it's able to work with GitHub's wiki pages: + +```bash +➥ ./gh-md-toc https://github.com/ekalinin/nodeenv/wiki/Who-Uses-Nodeenv + +Table of Contents +================= + + * [Who Uses Nodeenv?](#who-uses-nodeenv) + * [OpenStack](#openstack) + * [pre-commit.com](#pre-commitcom) +``` + +Multiple files +-------------- + +It supports multiple files as well: + +```bash +➥ ./gh-md-toc \ + https://github.com/aminb/rust-for-c/blob/master/hello_world/README.md \ + https://github.com/aminb/rust-for-c/blob/master/control_flow/README.md \ + https://github.com/aminb/rust-for-c/blob/master/primitive_types_and_operators/README.md \ + https://github.com/aminb/rust-for-c/blob/master/unique_pointers/README.md + + * [Hello world](https://github.com/aminb/rust-for-c/blob/master/hello_world/README.md#hello-world) + + * [Control Flow](https://github.com/aminb/rust-for-c/blob/master/control_flow/README.md#control-flow) + * [If](https://github.com/aminb/rust-for-c/blob/master/control_flow/README.md#if) + * [Loops](https://github.com/aminb/rust-for-c/blob/master/control_flow/README.md#loops) + * [For loops](https://github.com/aminb/rust-for-c/blob/master/control_flow/README.md#for-loops) + * [Switch/Match](https://github.com/aminb/rust-for-c/blob/master/control_flow/README.md#switchmatch) + * [Method call](https://github.com/aminb/rust-for-c/blob/master/control_flow/README.md#method-call) + + * [Primitive Types and Operators](https://github.com/aminb/rust-for-c/blob/master/primitive_types_and_operators/README.md#primitive-types-and-operators) + + * [Unique Pointers](https://github.com/aminb/rust-for-c/blob/master/unique_pointers/README.md#unique-pointers) +``` + +Combo +----- + +You can easily combine both ways: + +```bash +➥ ./gh-md-toc \ + ~/projects/Dockerfile.vim/README.md \ + https://github.com/ekalinin/sitemap.s/blob/master/README.md + + * [Dockerfile.vim](~/projects/Dockerfile.vim/README.md#dockerfilevim) + * [Screenshot](~/projects/Dockerfile.vim/README.md#screenshot) + * [Installation](~/projects/Dockerfile.vim/README.md#installation) + * [OR using Pathogen:](~/projects/Dockerfile.vim/README.md#or-using-pathogen) + * [OR using Vundle:](~/projects/Dockerfile.vim/README.md#or-using-vundle) + * [License](~/projects/Dockerfile.vim/README.md#license) + + * [sitemap.js](https://github.com/ekalinin/sitemap.js/blob/master/README.md#sitemapjs) + * [Installation](https://github.com/ekalinin/sitemap.js/blob/master/README.md#installation) + * [Usage](https://github.com/ekalinin/sitemap.js/blob/master/README.md#usage) + * [License](https://github.com/ekalinin/sitemap.js/blob/master/README.md#license) + + +``` + +Auto insert and update TOC +-------------------------- + +Just put into a file these two lines: + +``` + + +``` + +And run: + +```bash +$ ./gh-md-toc --insert README.test.md + +Table of Contents +================= + + * [gh-md-toc](#gh-md-toc) + * [Installation](#installation) + * [Usage](#usage) + * [STDIN](#stdin) + * [Local files](#local-files) + * [Remote files](#remote-files) + * [Multiple files](#multiple-files) + * [Combo](#combo) + * [Tests](#tests) + * [Dependency](#dependency) + +!! TOC was added into: 'README.test.md' +!! Origin version of the file: 'README.test.md.orig.2018-02-04_192655' +!! TOC added into a separate file: 'README.test.md.toc.2018-02-04_192655' + + + +``` + +Now check the same file: + +```bash +➜ grep -A15 "<\!\-\-ts" README.test.md + + * [gh-md-toc](#gh-md-toc) + * [Table of contents](#table-of-contents) + * [Installation](#installation) + * [Usage](#usage) + * [STDIN](#stdin) + * [Local files](#local-files) + * [Remote files](#remote-files) + * [Multiple files](#multiple-files) + * [Combo](#combo) + * [Auto insert and update TOC](#auto-insert-and-update-toc) + * [Tests](#tests) + * [Dependency](#dependency) + + + + +``` + +Next time when your file will be changed just repeat the command (`./gh-md-toc +--insert ...`) and TOC will be refreshed again. + +GitHub token +------------ + +All your tokens are [here](https://github.com/settings/tokens). + +You will need them if you get an error like this: + +``` +Parsing local markdown file requires access to github API +Error: You exceeded the hourly limit. See: https://developer.github.com/v3/#rate-limiting +or place github auth token here: ./token.txt +``` + +A token can be used as an env variable: + +```bash +➥ GH_TOC_TOKEN=2a2dab...563 ./gh-md-toc README.md + +Table of Contents +================= + +* [github\-markdown\-toc](#github-markdown-toc) +* [Table of Contents](#table-of-contents) +* [Installation](#installation) +* [Tests](#tests) +* [Usage](#usage) +* [LICENSE](#license) +``` + +Or from a file: + +```bash +➥ echo "2a2dab...563" > ./token.txt +➥ ./gh-md-toc README.md + +Table of Contents +================= + +* [github\-markdown\-toc](#github-markdown-toc) +* [Table of Contents](#table-of-contents) +* [Installation](#installation) +* [Tests](#tests) +* [Usage](#usage) +* [LICENSE](#license) +``` + +TOC generation with Github Actions +---------------------------------- + +Config: + +```yaml +on: + push: + branches: [main] + paths: ['foo.md'] + +jobs: + build: + runs-on: ubuntu-latest + timeout-minutes: 5 + steps: + - uses: actions/checkout@v2 + - run: | + curl https://raw.githubusercontent.com/ekalinin/github-markdown-toc/0.8.0/gh-md-toc -o gh-md-toc + chmod a+x gh-md-toc + ./gh-md-toc --insert --no-backup --hide-footer foo.md + rm gh-md-toc + - uses: stefanzweifel/git-auto-commit-action@v4 + with: + commit_message: Auto update markdown TOC +``` + +Tests +===== + +Done with [bats](https://github.com/bats-core/bats-core). +Useful articles: + + * https://www.engineyard.com/blog/how-to-use-bats-to-test-your-command-line-tools/ + * http://blog.spike.cx/post/60548255435/testing-bash-scripts-with-bats + + +How to run tests: + +```bash +➥ make test + + ✓ TOC for local README.md + ✓ TOC for remote README.md + ✓ TOC for mixed README.md (remote/local) + ✓ TOC for markdown from stdin + ✓ --help + ✓ --version + +6 tests, 0 failures +``` + +Dependency +========== + + * curl or wget + * awk (mawk is not tested) + * grep + * sed + * bats (for unit tests) + +Tested on Ubuntu 14.04/14.10 in bash/zsh. + +Docker +====== + +Local +----- + +* Build + +```shell +$ docker build -t markdown-toc-generator . +``` + +* Run on an URL + +```shell +$ docker run -it markdown-toc-generator https://github.com/ekalinin/envirius/blob/master/README.md +``` + +* Run on a local file (need to share volume with docker) + +```shell +$ docker run -it -v /data/ekalinin/envirius:/data markdown-toc-generator /data/README.md +``` + +Public +------- + +```shell +$ docker pull evkalinin/gh-md-toc:0.7.0 + +$ docker images | grep toc +evkalinin/gh-md-toc 0.7.0 0b8db6aed298 11 minutes ago 147MB + +$ docker run -it evkalinin/gh-md-toc:0.7.0 \ + https://github.com/ekalinin/envirius/blob/master/README.md +``` diff --git a/tools/scripts/md-toc/gh-md-toc b/tools/scripts/md-toc/gh-md-toc new file mode 100755 index 0000000000..35239bf5f8 --- /dev/null +++ b/tools/scripts/md-toc/gh-md-toc @@ -0,0 +1,421 @@ +#!/usr/bin/env bash + +# +# Steps: +# +# 1. Download corresponding html file for some README.md: +# curl -s $1 +# +# 2. Discard rows where no substring 'user-content-' (github's markup): +# awk '/user-content-/ { ... +# +# 3.1 Get last number in each row like ' ... sitemap.js.*<\/h/)+2, RLENGTH-5) +# +# 5. Find anchor and insert it inside "(...)": +# substr($0, match($0, "href=\"[^\"]+?\" ")+6, RLENGTH-8) +# + +gh_toc_version="0.10.0" + +gh_user_agent="gh-md-toc v$gh_toc_version" + +# +# Download rendered into html README.md by its url. +# +# +gh_toc_load() { + local gh_url=$1 + + if type curl &>/dev/null; then + curl --user-agent "$gh_user_agent" -s "$gh_url" + elif type wget &>/dev/null; then + wget --user-agent="$gh_user_agent" -qO- "$gh_url" + else + echo "Please, install 'curl' or 'wget' and try again." + exit 1 + fi +} + +# +# Converts local md file into html by GitHub +# +# -> curl -X POST --data '{"text": "Hello world github/linguist#1 **cool**, and #1!"}' https://api.github.com/markdown +#

Hello world github/linguist#1 cool, and #1!

'" +gh_toc_md2html() { + local gh_file_md=$1 + local skip_header=$2 + + URL=https://api.github.com/markdown/raw + + if [ -n "$GH_TOC_TOKEN" ]; then + TOKEN=$GH_TOC_TOKEN + else + TOKEN_FILE="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/token.txt" + if [ -f "$TOKEN_FILE" ]; then + TOKEN="$(cat "$TOKEN_FILE")" + fi + fi + if [ -n "${TOKEN}" ]; then + AUTHORIZATION="Authorization: token ${TOKEN}" + fi + + local gh_tmp_file_md=$gh_file_md + if [ "$skip_header" = "yes" ]; then + if grep -Fxq "" "$gh_src"; then + # cut everything before the toc + gh_tmp_file_md=$gh_file_md~~ + sed '1,//d' "$gh_file_md" > "$gh_tmp_file_md" + fi + fi + + # echo $URL 1>&2 + OUTPUT=$(curl -s \ + --user-agent "$gh_user_agent" \ + --data-binary @"$gh_tmp_file_md" \ + -H "Content-Type:text/plain" \ + -H "$AUTHORIZATION" \ + "$URL") + + rm -f "${gh_file_md}~~" + + if [ "$?" != "0" ]; then + echo "XXNetworkErrorXX" + fi + if [ "$(echo "${OUTPUT}" | awk '/API rate limit exceeded/')" != "" ]; then + echo "XXRateLimitXX" + else + echo "${OUTPUT}" + fi +} + + +# +# Is passed string url +# +gh_is_url() { + case $1 in + https* | http*) + echo "yes";; + *) + echo "no";; + esac +} + +# +# TOC generator +# +gh_toc(){ + local gh_src=$1 + local gh_src_copy=$1 + local gh_ttl_docs=$2 + local need_replace=$3 + local no_backup=$4 + local no_footer=$5 + local indent=$6 + local skip_header=$7 + + if [ "$gh_src" = "" ]; then + echo "Please, enter URL or local path for a README.md" + exit 1 + fi + + + # Show "TOC" string only if working with one document + if [ "$gh_ttl_docs" = "1" ]; then + + echo "Table of Contents" + echo "=================" + echo "" + gh_src_copy="" + + fi + + if [ "$(gh_is_url "$gh_src")" == "yes" ]; then + gh_toc_load "$gh_src" | gh_toc_grab "$gh_src_copy" "$indent" + if [ "${PIPESTATUS[0]}" != "0" ]; then + echo "Could not load remote document." + echo "Please check your url or network connectivity" + exit 1 + fi + if [ "$need_replace" = "yes" ]; then + echo + echo "!! '$gh_src' is not a local file" + echo "!! Can't insert the TOC into it." + echo + fi + else + local rawhtml + rawhtml=$(gh_toc_md2html "$gh_src" "$skip_header") + if [ "$rawhtml" == "XXNetworkErrorXX" ]; then + echo "Parsing local markdown file requires access to github API" + echo "Please make sure curl is installed and check your network connectivity" + exit 1 + fi + if [ "$rawhtml" == "XXRateLimitXX" ]; then + echo "Parsing local markdown file requires access to github API" + echo "Error: You exceeded the hourly limit. See: https://developer.github.com/v3/#rate-limiting" + TOKEN_FILE="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/token.txt" + echo "or place GitHub auth token here: ${TOKEN_FILE}" + exit 1 + fi + local toc + toc=`echo "$rawhtml" | gh_toc_grab "$gh_src_copy" "$indent"` + echo "$toc" + if [ "$need_replace" = "yes" ]; then + if grep -Fxq "" "$gh_src" && grep -Fxq "" "$gh_src"; then + echo "Found markers" + else + echo "You don't have or in your file...exiting" + exit 1 + fi + local ts="<\!--ts-->" + local te="<\!--te-->" + local dt + dt=$(date +'%F_%H%M%S') + local ext=".orig.${dt}" + local toc_path="${gh_src}.toc.${dt}" + local toc_createdby="" + local toc_footer + toc_footer="" + # http://fahdshariff.blogspot.ru/2012/12/sed-mutli-line-replacement-between-two.html + # clear old TOC + sed -i"${ext}" "/${ts}/,/${te}/{//!d;}" "$gh_src" + # create toc file + echo "${toc}" > "${toc_path}" + if [ "${no_footer}" != "yes" ]; then + echo -e "\n${toc_createdby}\n${toc_footer}\n" >> "$toc_path" + fi + + # insert toc file + if ! sed --version > /dev/null 2>&1; then + sed -i "" "/${ts}/r ${toc_path}" "$gh_src" + else + sed -i "/${ts}/r ${toc_path}" "$gh_src" + fi + echo + if [ "${no_backup}" = "yes" ]; then + rm "$toc_path" "$gh_src$ext" + fi + echo "!! TOC was added into: '$gh_src'" + if [ -z "${no_backup}" ]; then + echo "!! Origin version of the file: '${gh_src}${ext}'" + echo "!! TOC added into a separate file: '${toc_path}'" + fi + echo + fi + fi +} + +# +# Grabber of the TOC from rendered html +# +# $1 - a source url of document. +# It's need if TOC is generated for multiple documents. +# $2 - number of spaces used to indent. +# +gh_toc_grab() { + + href_regex="/href=\"[^\"]+?\"/" + common_awk_script=' + modified_href = "" + split(href, chars, "") + for (i=1;i <= length(href); i++) { + c = chars[i] + res = "" + if (c == "+") { + res = " " + } else { + if (c == "%") { + res = "\\x" + } else { + res = c "" + } + } + modified_href = modified_href res + } + print sprintf("%*s", (level-1)*'"$2"', "") "* [" text "](" gh_url modified_href ")" + ' + if [ "`uname -s`" == "OS/390" ]; then + grepcmd="pcregrep -o" + echoargs="" + awkscript='{ + level = substr($0, 3, 1) + text = substr($0, match($0, /<\/span><\/a>[^<]*<\/h/)+11, RLENGTH-14) + href = substr($0, match($0, '$href_regex')+6, RLENGTH-7) + '"$common_awk_script"' + }' + else + grepcmd="grep -Eo" + echoargs="-e" + awkscript='{ + level = substr($0, 3, 1) + text = substr($0, match($0, /">.*<\/h/)+2, RLENGTH-5) + href = substr($0, match($0, '$href_regex')+6, RLENGTH-7) + '"$common_awk_script"' + }' + fi + + # if closed is on the new line, then move it on the prev line + # for example: + # was: The command foo1 + # + # became: The command foo1 + sed -e ':a' -e 'N' -e '$!ba' -e 's/\n<\/h/<\/h/g' | + + # Sometimes a line can start with . Fix that. + sed -e ':a' -e 'N' -e '$!ba' -e 's/\n//g' | sed 's/<\/code>//g' | + + # remove g-emoji + sed 's/]*[^<]*<\/g-emoji> //g' | + + # now all rows are like: + #

title

.. + # format result line + # * $0 - whole string + # * last element of each row: "/dev/null; then + $tool --version | head -n 1 + else + echo "not installed" + fi + done +} + +show_help() { + local app_name + app_name=$(basename "$0") + echo "GitHub TOC generator ($app_name): $gh_toc_version" + echo "" + echo "Usage:" + echo " $app_name [options] src [src] Create TOC for a README file (url or local path)" + echo " $app_name - Create TOC for markdown from STDIN" + echo " $app_name --help Show help" + echo " $app_name --version Show version" + echo "" + echo "Options:" + echo " --indent Set indent size. Default: 3." + echo " --insert Insert new TOC into original file. For local files only. Default: false." + echo " See https://github.com/ekalinin/github-markdown-toc/issues/41 for details." + echo " --no-backup Remove backup file. Set --insert as well. Default: false." + echo " --hide-footer Do not write date & author of the last TOC update. Set --insert as well. Default: false." + echo " --skip-header Hide entry of the topmost headlines. Default: false." + echo " See https://github.com/ekalinin/github-markdown-toc/issues/125 for details." + echo "" +} + +# +# Options handlers +# +gh_toc_app() { + local need_replace="no" + local indent=3 + + if [ "$1" = '--help' ] || [ $# -eq 0 ] ; then + show_help + return + fi + + if [ "$1" = '--version' ]; then + show_version + return + fi + + if [ "$1" = '--indent' ]; then + indent="$2" + shift 2 + fi + + if [ "$1" = "-" ]; then + if [ -z "$TMPDIR" ]; then + TMPDIR="/tmp" + elif [ -n "$TMPDIR" ] && [ ! -d "$TMPDIR" ]; then + mkdir -p "$TMPDIR" + fi + local gh_tmp_md + if [ "`uname -s`" == "OS/390" ]; then + local timestamp + timestamp=$(date +%m%d%Y%H%M%S) + gh_tmp_md="$TMPDIR/tmp.$timestamp" + else + gh_tmp_md=$(mktemp "$TMPDIR/tmp.XXXXXX") + fi + while read -r input; do + echo "$input" >> "$gh_tmp_md" + done + gh_toc_md2html "$gh_tmp_md" | gh_toc_grab "" "$indent" + return + fi + + if [ "$1" = '--insert' ]; then + need_replace="yes" + shift + fi + + if [ "$1" = '--no-backup' ]; then + need_replace="yes" + no_backup="yes" + shift + fi + + if [ "$1" = '--hide-footer' ]; then + need_replace="yes" + no_footer="yes" + shift + fi + + if [ "$1" = '--skip-header' ]; then + skip_header="yes" + shift + fi + + + for md in "$@" + do + echo "" + gh_toc "$md" "$#" "$need_replace" "$no_backup" "$no_footer" "$indent" "$skip_header" + done + + echo "" + echo "" +} + +# +# Entry point +# +gh_toc_app "$@" From 067e378b0690d955293a93eef741d4b004dbc2c3 Mon Sep 17 00:00:00 2001 From: Shahzad Lone Date: Mon, 10 Jun 2024 12:18:05 -0400 Subject: [PATCH 38/78] ci(i): Add action to ensure tidyness (#2697) ## Relevant issue(s) Resolves ##2695 ## Description ### Backstory - A commit (c42e7ee6) broke mod tidy command recently - Which was fixed in the next commit in this PR: https://github.com/sourcenetwork/defradb/pull/2548 - Some head scratching was done to find out that mod tidy needed a minimum version, hence the current pinned mod tidy There are 2 things being checked within the new added action: 1) That the backstory doesn't happen again (`make tidy` isn't broken). 2) `make tidy` is ran and up to date. NOTE: `(2)` above I am not sure if we want to keep, or not I would be okay with just `(1)`. However here are some reasons why you would want `(2)`: - always be in `tidy state` - one less command to do at release stage. ## How has this been tested? - Action that failed when `make tidy` was broken: https://github.com/sourcenetwork/defradb/actions/runs/9422665720/job/25959440810?pr=2697 - Action that failed when `make tidy` was not up to date (i.e. not tidy): https://github.com/sourcenetwork/defradb/actions/runs/9422726089/job/25959622978?pr=2697 Specify the platform(s) on which this was tested: - WSL2 --- .github/workflows/check-tidy.yml | 59 ++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 .github/workflows/check-tidy.yml diff --git a/.github/workflows/check-tidy.yml b/.github/workflows/check-tidy.yml new file mode 100644 index 0000000000..979052cb5b --- /dev/null +++ b/.github/workflows/check-tidy.yml @@ -0,0 +1,59 @@ +# Copyright 2024 Democratized Data Foundation +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +# This workflow checks that go mod tidy command we have set for the specific +# go version is not broken, for example `go mod tidy -go=1.21.3`. This +# can cause some head scratching at times, so better catch this in the PR. +# +# Inaddition to that also checks that we are currently in a `tidy` state. +name: Check Tidy Workflow + +on: + pull_request: + branches: + - master + - develop + + push: + tags: + - 'v[0-9]+.[0-9]+.[0-9]+' + branches: + - master + - develop + +jobs: + check-tidy: + name: Check mod tidy job + + runs-on: ubuntu-latest + + steps: + + - name: Checkout code into the directory + uses: actions/checkout@v3 + + - name: Setup Go environment explicitly + uses: actions/setup-go@v3 + with: + go-version: "1.21" + check-latest: true + + # This checks mod tidy is not broken. + - name: Check mod tidy + run: make tidy + + # This checks mod tidy is up to date. + - name: Check no new changes exist + uses: tj-actions/verify-changed-files@v20 + with: + fail-if-changed: true + files: | + go.mod + go.sum From efef0c93828ca2b14ad3bbd767731a1c029cf5fa Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 10 Jun 2024 13:25:47 -0400 Subject: [PATCH 39/78] bot: Update dependencies (bulk dependabot PRs) 06-10-2024 (#2705) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ✅ This PR was created by combining the following PRs: #2704 bot: Bump github.com/zalando/go-keyring from 0.2.4 to 0.2.5 #2703 bot: Bump github.com/cosmos/cosmos-sdk from 0.50.6 to 0.50.7 #2702 bot: Bump vite from 5.2.12 to 5.2.13 in /playground --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Shahzad Lone --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- playground/package-lock.json | 8 ++++---- playground/package.json | 2 +- 4 files changed, 20 insertions(+), 20 deletions(-) diff --git a/go.mod b/go.mod index f9bac2daa8..a2701648d5 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.21.3 require ( github.com/bits-and-blooms/bitset v1.13.0 github.com/bxcodec/faker v2.0.1+incompatible - github.com/cosmos/cosmos-sdk v0.50.6 + github.com/cosmos/cosmos-sdk v0.50.7 github.com/cosmos/gogoproto v1.4.12 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 github.com/evanphx/json-patch/v5 v5.9.0 @@ -49,7 +49,7 @@ require ( github.com/tidwall/btree v1.7.0 github.com/valyala/fastjson v1.6.4 github.com/vito/go-sse v1.0.0 - github.com/zalando/go-keyring v0.2.4 + github.com/zalando/go-keyring v0.2.5 go.opentelemetry.io/otel/metric v1.27.0 go.opentelemetry.io/otel/sdk/metric v1.27.0 go.uber.org/zap v1.27.0 @@ -61,7 +61,7 @@ require ( require ( buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.31.0-20230802163732-1c33ebd9ecfa.1 // indirect - cosmossdk.io/api v0.7.4 // indirect + cosmossdk.io/api v0.7.5 // indirect cosmossdk.io/collections v0.4.0 // indirect cosmossdk.io/core v0.11.0 // indirect cosmossdk.io/depinject v1.0.0-alpha.4 // indirect @@ -69,7 +69,7 @@ require ( cosmossdk.io/log v1.3.1 // indirect cosmossdk.io/math v1.3.0 // indirect cosmossdk.io/store v1.1.0 // indirect - cosmossdk.io/x/tx v0.13.2 // indirect + cosmossdk.io/x/tx v0.13.3 // indirect filippo.io/edwards25519 v1.0.0 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.1 // indirect @@ -94,7 +94,7 @@ require ( github.com/cockroachdb/pebble v1.1.0 // indirect github.com/cockroachdb/redact v1.1.5 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect - github.com/cometbft/cometbft v0.38.6 // indirect + github.com/cometbft/cometbft v0.38.7 // indirect github.com/cometbft/cometbft-db v0.9.1 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect diff --git a/go.sum b/go.sum index e6246a269d..13d7dc3838 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= -cosmossdk.io/api v0.7.4 h1:sPo8wKwCty1lht8kgL3J7YL1voJywP3YWuA5JKkBz30= -cosmossdk.io/api v0.7.4/go.mod h1:IcxpYS5fMemZGqyYtErK7OqvdM0C8kdW3dq8Q/XIG38= +cosmossdk.io/api v0.7.5 h1:eMPTReoNmGUm8DeiQL9DyM8sYDjEhWzL1+nLbI9DqtQ= +cosmossdk.io/api v0.7.5/go.mod h1:IcxpYS5fMemZGqyYtErK7OqvdM0C8kdW3dq8Q/XIG38= cosmossdk.io/collections v0.4.0 h1:PFmwj2W8szgpD5nOd8GWH6AbYNi1f2J6akWXJ7P5t9s= cosmossdk.io/collections v0.4.0/go.mod h1:oa5lUING2dP+gdDquow+QjlF45eL1t4TJDypgGd+tv0= cosmossdk.io/core v0.11.0 h1:vtIafqUi+1ZNAE/oxLOQQ7Oek2n4S48SWLG8h/+wdbo= @@ -20,8 +20,8 @@ cosmossdk.io/math v1.3.0 h1:RC+jryuKeytIiictDslBP9i1fhkVm6ZDmZEoNP316zE= cosmossdk.io/math v1.3.0/go.mod h1:vnRTxewy+M7BtXBNFybkuhSH4WfedVAAnERHgVFhp3k= cosmossdk.io/store v1.1.0 h1:LnKwgYMc9BInn9PhpTFEQVbL9UK475G2H911CGGnWHk= cosmossdk.io/store v1.1.0/go.mod h1:oZfW/4Fc/zYqu3JmQcQdUJ3fqu5vnYTn3LZFFy8P8ng= -cosmossdk.io/x/tx v0.13.2 h1:Kh90UH30bhnnUdJH+CmWLyaH8IKdY6BBGY3EkdOk82o= -cosmossdk.io/x/tx v0.13.2/go.mod h1:yhPokDCfXVIuAtyp49IFlWB5YAXUgD7Zek+ZHwsHzvU= +cosmossdk.io/x/tx v0.13.3 h1:Ha4mNaHmxBc6RMun9aKuqul8yHiL78EKJQ8g23Zf73g= +cosmossdk.io/x/tx v0.13.3/go.mod h1:I8xaHv0rhUdIvIdptKIqzYy27+n2+zBVaxO6fscFhys= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= @@ -170,8 +170,8 @@ github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZ github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/cometbft/cometbft v0.38.6 h1:QSgpCzrGWJ2KUq1qpw+FCfASRpE27T6LQbfEHscdyOk= -github.com/cometbft/cometbft v0.38.6/go.mod h1:8rSPxzUJYquCN8uuBgbUHOMg2KAwvr7CyUw+6ukO4nw= +github.com/cometbft/cometbft v0.38.7 h1:ULhIOJ9+LgSy6nLekhq9ae3juX3NnQUMMPyVdhZV6Hk= +github.com/cometbft/cometbft v0.38.7/go.mod h1:HIyf811dFMI73IE0F7RrnY/Fr+d1+HuJAgtkEpQjCMY= github.com/cometbft/cometbft-db v0.9.1 h1:MIhVX5ja5bXNHF8EYrThkG9F7r9kSfv8BX4LWaxWJ4M= github.com/cometbft/cometbft-db v0.9.1/go.mod h1:iliyWaoV0mRwBJoizElCwwRA9Tf7jZJOURcRZF9m60U= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= @@ -194,8 +194,8 @@ github.com/cosmos/cosmos-db v1.0.2 h1:hwMjozuY1OlJs/uh6vddqnk9j7VamLv+0DBlbEXbAK github.com/cosmos/cosmos-db v1.0.2/go.mod h1:Z8IXcFJ9PqKK6BIsVOB3QXtkKoqUOp1vRvPT39kOXEA= github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= -github.com/cosmos/cosmos-sdk v0.50.6 h1:efR3MsvMHX5sxS3be+hOobGk87IzlZbSpsI2x/Vw3hk= -github.com/cosmos/cosmos-sdk v0.50.6/go.mod h1:lVkRY6cdMJ0fG3gp8y4hFrsKZqF4z7y0M2UXFb9Yt40= +github.com/cosmos/cosmos-sdk v0.50.7 h1:LsBGKxifENR/DN4E1RZaitsyL93HU44x0p8EnMHp4V4= +github.com/cosmos/cosmos-sdk v0.50.7/go.mod h1:84xDDJEHttRT7NDGwBaUOLVOMN0JNE9x7NbsYIxXs1s= github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= @@ -1191,8 +1191,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zalando/go-keyring v0.2.4 h1:wi2xxTqdiwMKbM6TWwi+uJCG/Tum2UV0jqaQhCa9/68= -github.com/zalando/go-keyring v0.2.4/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk= +github.com/zalando/go-keyring v0.2.5 h1:Bc2HHpjALryKD62ppdEzaFG6VxL6Bc+5v0LYpN8Lba8= +github.com/zalando/go-keyring v0.2.5/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk= github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= github.com/zondax/ledger-go v0.14.3 h1:wEpJt2CEcBJ428md/5MgSLsXLBos98sBOyxNmCjfUCw= diff --git a/playground/package-lock.json b/playground/package-lock.json index 8e23698c2c..e5a7db5d6e 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -25,7 +25,7 @@ "eslint-plugin-react-hooks": "^4.6.2", "eslint-plugin-react-refresh": "^0.4.7", "typescript": "^5.4.5", - "vite": "^5.2.12" + "vite": "^5.2.13" } }, "node_modules/@babel/runtime": { @@ -5939,9 +5939,9 @@ "optional": true }, "node_modules/vite": { - "version": "5.2.12", - "resolved": "https://registry.npmjs.org/vite/-/vite-5.2.12.tgz", - "integrity": "sha512-/gC8GxzxMK5ntBwb48pR32GGhENnjtY30G4A0jemunsBkiEZFw60s8InGpN8gkhHEkjnRK1aSAxeQgwvFhUHAA==", + "version": "5.2.13", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.2.13.tgz", + "integrity": "sha512-SSq1noJfY9pR3I1TUENL3rQYDQCFqgD+lM6fTRAM8Nv6Lsg5hDLaXkjETVeBt+7vZBCMoibD+6IWnT2mJ+Zb/A==", "dev": true, "dependencies": { "esbuild": "^0.20.1", diff --git a/playground/package.json b/playground/package.json index ab4b99b625..9eb354177e 100644 --- a/playground/package.json +++ b/playground/package.json @@ -27,6 +27,6 @@ "eslint-plugin-react-hooks": "^4.6.2", "eslint-plugin-react-refresh": "^0.4.7", "typescript": "^5.4.5", - "vite": "^5.2.12" + "vite": "^5.2.13" } } From ea68087e4bdd66997af9c356eebeece0bcf29cdc Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Mon, 10 Jun 2024 13:43:33 -0400 Subject: [PATCH 40/78] fix: Incorporate schema root into docID (#2701) ## Relevant issue(s) Resolves #2688 ## Description Incorporates schema root into docID. --- client/document.go | 5 + .../i2688-doc-id-includes-schema.md | 3 + docs/website/guides/schema-relationship.md | 2 +- docs/website/guides/time-traveling-queries.md | 2 +- .../query-specification/database-api.md | 2 +- internal/db/backup_test.go | 32 +- .../acp/index/query_with_relation_test.go | 32 +- tests/integration/acp/query/fixture.go | 48 +- .../acp/query/relation_objects_test.go | 24 +- .../acp/register_and_delete_test.go | 4 +- .../integration/acp/register_and_read_test.go | 6 +- .../acp/register_and_update_test.go | 14 +- .../backup/one_to_many/export_test.go | 21 +- .../backup/one_to_many/import_test.go | 40 +- .../backup/one_to_one/export_test.go | 25 +- .../backup/one_to_one/import_test.go | 50 +- .../backup/self_reference/export_test.go | 8 +- .../backup/self_reference/import_test.go | 46 +- .../integration/backup/simple/export_test.go | 6 +- .../integration/backup/simple/import_test.go | 14 +- .../collection/update/simple/utils.go | 3 + .../events/simple/with_create_txn_test.go | 2 +- .../integration/explain/debug/dagscan_test.go | 8 +- .../explain/default/dagscan_test.go | 24 +- .../explain/execute/dagscan_test.go | 4 +- .../explain/execute/delete_test.go | 2 +- tests/integration/explain/execute/fixture.go | 83 +- .../execute/query_deleted_docs_test.go | 4 +- .../explain/execute/type_join_test.go | 9 +- .../explain/execute/update_test.go | 4 +- .../explain/execute/with_average_test.go | 9 +- .../explain/execute/with_count_test.go | 5 +- .../explain/execute/with_limit_test.go | 12 +- .../explain/execute/with_order_test.go | 15 +- .../explain/execute/with_sum_test.go | 9 +- tests/integration/index/create_drop_test.go | 2 +- tests/integration/index/create_test.go | 4 +- .../index/create_unique_composite_test.go | 4 +- tests/integration/index/create_unique_test.go | 2 +- tests/integration/index/drop_test.go | 2 +- ...y_with_composite_index_only_filter_test.go | 6 +- ...uery_with_compound_filter_relation_test.go | 12 +- .../index/query_with_relation_filter_test.go | 98 +- ...with_unique_composite_index_filter_test.go | 8 +- ...uery_with_unique_index_only_filter_test.go | 2 +- .../mutation/create/crdt/pcounter_test.go | 2 +- .../mutation/create/crdt/pncounter_test.go | 2 +- .../field_kinds/field_kind_json_test.go | 2 +- .../one_to_many/with_alias_test.go | 8 +- .../one_to_many/with_simple_test.go | 6 +- .../field_kinds/one_to_one/with_alias_test.go | 33 +- .../one_to_one/with_simple_test.go | 77 +- .../one_to_one_to_one/with_txn_test.go | 70 +- .../mutation/create/simple_test.go | 4 +- .../mutation/create/with_version_test.go | 2 +- .../one_to_many/with_show_deleted_test.go | 63 +- .../one_to_one_to_one/with_id_test.go | 42 +- .../one_to_one_to_one/with_txn_test.go | 74 +- .../delete/with_deleted_field_test.go | 4 +- .../mutation/delete/with_filter_test.go | 4 +- .../mutation/delete/with_id_alias_test.go | 4 +- .../mutation/delete/with_id_test.go | 4 +- .../mutation/delete/with_id_txn_test.go | 4 +- .../mutation/delete/with_ids_alias_test.go | 6 +- .../mutation/delete/with_ids_filter_test.go | 4 +- .../mutation/delete/with_ids_test.go | 14 +- .../mutation/delete/with_ids_txn_test.go | 6 +- .../delete/with_ids_update_alias_test.go | 6 +- .../integration/mutation/mix/with_txn_test.go | 26 +- .../field_kinds/one_to_many/simple_test.go | 20 +- .../one_to_many/with_alias_test.go | 24 +- .../field_kinds/one_to_one/with_alias_test.go | 16 +- .../one_to_one/with_self_ref_test.go | 20 +- .../one_to_one/with_simple_test.go | 22 +- .../mutation/update/with_id_test.go | 6 +- .../mutation/update/with_ids_test.go | 10 +- .../peer/with_create_update_test.go | 6 +- .../replicator/with_create_test.go | 2 +- .../net/simple/peer/with_create_test.go | 16 +- .../net/simple/peer/with_delete_test.go | 48 +- .../peer_replicator/with_create_test.go | 8 +- .../net/simple/replicator/with_create_test.go | 12 +- .../integration/query/commits/simple_test.go | 54 +- .../query/commits/with_cid_test.go | 8 +- .../commits/with_collectionid_prop_test.go | 8 +- .../query/commits/with_depth_test.go | 34 +- .../query/commits/with_doc_id_cid_test.go | 6 +- .../query/commits/with_doc_id_count_test.go | 8 +- .../query/commits/with_doc_id_field_test.go | 14 +- .../commits/with_doc_id_group_order_test.go | 4 +- .../commits/with_doc_id_limit_offset_test.go | 6 +- .../query/commits/with_doc_id_limit_test.go | 6 +- .../with_doc_id_order_limit_offset_test.go | 6 +- .../query/commits/with_doc_id_order_test.go | 76 +- .../query/commits/with_doc_id_prop_test.go | 6 +- .../query/commits/with_doc_id_test.go | 54 +- .../commits/with_doc_id_typename_test.go | 8 +- .../query/commits/with_field_test.go | 6 +- .../query/commits/with_group_test.go | 20 +- .../query/inline_array/with_group_test.go | 16 +- .../with_collectionid_prop_test.go | 4 +- .../latest_commits/with_doc_id_field_test.go | 14 +- .../latest_commits/with_doc_id_prop_test.go | 4 +- .../query/latest_commits/with_doc_id_test.go | 12 +- .../query/one_to_many/simple_test.go | 40 +- .../query/one_to_many/with_cid_doc_id_test.go | 48 +- .../one_to_many/with_count_filter_test.go | 52 +- .../with_count_limit_offset_test.go | 80 +- .../one_to_many/with_count_limit_test.go | 66 +- .../query/one_to_many/with_count_test.go | 20 +- .../query/one_to_many/with_doc_id_test.go | 10 +- .../query/one_to_many/with_doc_ids_test.go | 16 +- .../with_filter_related_id_test.go | 114 +- .../query/one_to_many/with_filter_test.go | 159 ++- .../one_to_many/with_group_filter_test.go | 76 +- .../with_group_related_id_alias_test.go | 1079 +++++++++-------- .../one_to_many/with_group_related_id_test.go | 174 ++- .../query/one_to_many/with_group_test.go | 182 ++- .../query/one_to_many/with_limit_test.go | 64 +- .../with_order_filter_limit_test.go | 24 +- .../one_to_many/with_order_filter_test.go | 42 +- .../query/one_to_many/with_related_id_test.go | 62 +- .../one_to_many/with_same_field_name_test.go | 12 +- .../one_to_many/with_sum_filter_order_test.go | 160 +-- .../with_sum_limit_offset_order_test.go | 768 +++++++----- .../one_to_many/with_sum_limit_offset_test.go | 22 +- .../query/one_to_many/with_sum_limit_test.go | 22 +- .../query/one_to_many/with_typename_test.go | 6 +- .../query/one_to_many_multiple/utils.go | 43 - .../with_average_filter_test.go | 370 +++--- .../one_to_many_multiple/with_average_test.go | 181 +-- .../with_count_filter_test.go | 362 +++--- .../one_to_many_multiple/with_count_test.go | 350 ++++-- .../with_multiple_filter_test.go | 171 ++- .../with_sum_filter_test.go | 370 +++--- .../one_to_many_multiple/with_sum_test.go | 181 +-- .../query/one_to_many_to_many/joins_test.go | 365 +++--- .../query/one_to_many_to_many/utils.go | 58 - .../query/one_to_many_to_one/fixture.go | 132 +- .../query/one_to_many_to_one/joins_test.go | 178 +-- .../query/one_to_many_to_one/simple_test.go | 57 +- .../one_to_many_to_one/with_filter_test.go | 117 +- .../with_order_limit_test.go | 10 +- .../with_sum_order_limit_test.go | 40 +- .../query/one_to_many_to_one/with_sum_test.go | 59 +- .../query/one_to_one/simple_test.go | 248 ++-- .../one_to_one/with_clashing_id_field_test.go | 11 +- .../one_to_one/with_count_filter_test.go | 53 +- .../one_to_one/with_filter_order_test.go | 16 +- .../query/one_to_one/with_filter_test.go | 223 ++-- .../with_group_related_id_alias_test.go | 136 +-- .../one_to_one/with_group_related_id_test.go | 136 +-- .../query/one_to_one/with_order_test.go | 36 +- .../query/one_to_one_multiple/simple_test.go | 84 +- .../query/one_to_one_to_many/simple_test.go | 72 +- .../query/one_to_one_to_one/simple_test.go | 192 ++- .../one_to_one_to_one/with_order_test.go | 36 +- .../query/one_to_two_many/simple_test.go | 842 +++++++------ .../query/one_to_two_many/utils.go | 45 - .../query/one_to_two_many/with_order_test.go | 188 +-- tests/integration/query/simple/simple_test.go | 2 +- .../query/simple/with_cid_doc_id_test.go | 36 +- .../query/simple/with_deleted_field_test.go | 4 +- .../query/simple/with_doc_id_filter_test.go | 2 +- .../query/simple/with_doc_id_test.go | 4 +- .../query/simple/with_doc_ids_test.go | 6 +- .../query/simple/with_filter/with_and_test.go | 8 +- .../simple/with_filter/with_ge_float_test.go | 4 +- .../simple/with_filter/with_gt_float_test.go | 4 +- .../simple/with_filter/with_gt_int_test.go | 8 +- .../query/simple/with_filter/with_in_test.go | 20 +- .../with_filter/with_like_string_test.go | 4 +- .../simple/with_filter/with_ne_bool_test.go | 8 +- .../with_filter/with_ne_datetime_test.go | 4 +- .../simple/with_filter/with_ne_int_test.go | 4 +- .../simple/with_filter/with_ne_string_test.go | 4 +- .../query/simple/with_filter/with_nin_test.go | 4 +- .../with_filter/with_nlike_string_test.go | 4 +- .../query/simple/with_filter/with_not_test.go | 32 +- .../query/simple/with_filter/with_or_test.go | 8 +- .../simple/with_group_average_filter_test.go | 66 +- .../simple/with_group_average_sum_test.go | 16 +- .../query/simple/with_group_average_test.go | 92 +- .../with_group_count_limit_offset_test.go | 2 +- .../simple/with_group_count_limit_test.go | 2 +- .../query/simple/with_group_count_sum_test.go | 8 +- .../query/simple/with_group_doc_id_test.go | 12 +- .../query/simple/with_group_doc_ids_test.go | 18 +- .../query/simple/with_group_filter_test.go | 52 +- .../query/simple/with_group_order_test.go | 32 +- .../query/simple/with_group_sum_test.go | 92 +- .../query/simple/with_group_test.go | 92 +- .../query/simple/with_limit_offset_test.go | 20 +- .../query/simple/with_order_test.go | 8 +- .../query/simple/with_version_test.go | 62 +- .../migrations/query/with_doc_id_test.go | 28 +- .../schema/updates/add/field/create_test.go | 13 +- .../add/field/kind/foreign_object_test.go | 46 +- .../subscription/subscription_test.go | 33 +- .../view/one_to_many/simple_test.go | 18 +- .../view/one_to_many/with_alias_test.go | 18 +- .../view/one_to_many/with_count_test.go | 17 +- .../view/one_to_many/with_transform_test.go | 8 +- .../view/one_to_one/identical_schema_test.go | 9 +- .../view/one_to_one/with_transform_test.go | 8 +- tests/integration/view/simple/simple_test.go | 4 +- .../view/simple/with_transform_test.go | 12 +- 207 files changed, 6002 insertions(+), 5384 deletions(-) create mode 100644 docs/data_format_changes/i2688-doc-id-includes-schema.md delete mode 100644 tests/integration/query/one_to_many_multiple/utils.go delete mode 100644 tests/integration/query/one_to_many_to_many/utils.go delete mode 100644 tests/integration/query/one_to_two_many/utils.go diff --git a/client/document.go b/client/document.go index ccbdfd688c..ada47cc8f9 100644 --- a/client/document.go +++ b/client/document.go @@ -777,6 +777,11 @@ func (doc *Document) GenerateDocID() (DocID, error) { return DocID{}, err } + // The DocID must take into consideration the schema root, this ensures that + // otherwise identical documents created using different schema will have different + // document IDs - we do not want cross-schema docID collisions. + bytes = append(bytes, []byte(doc.collectionDefinition.Schema.Root)...) + cid, err := ccid.NewSHA256CidV1(bytes) if err != nil { return DocID{}, err diff --git a/docs/data_format_changes/i2688-doc-id-includes-schema.md b/docs/data_format_changes/i2688-doc-id-includes-schema.md new file mode 100644 index 0000000000..caca83656c --- /dev/null +++ b/docs/data_format_changes/i2688-doc-id-includes-schema.md @@ -0,0 +1,3 @@ +# Incorporate schema root into docID + +DocID generation now incorporates schema root, meaning documents created against different schema will no longer clash. This change also means that all the docIDs and commit CIDs have changed. diff --git a/docs/website/guides/schema-relationship.md b/docs/website/guides/schema-relationship.md index 59745b60f2..5eb77dc8d4 100644 --- a/docs/website/guides/schema-relationship.md +++ b/docs/website/guides/schema-relationship.md @@ -81,7 +81,7 @@ mutation { ```graphql mutation { - create_User(input: {name: "Alice", username: "awesomealice", age: 35, address_id: "bae-fd541c25-229e-5280-b44b-e5c2af3e374d"}) { + create_User(input: {name: "Alice", username: "awesomealice", age: 35, address_id: "bae-be6d8024-4953-5a92-84b4-f042d25230c6"}) { _key } } diff --git a/docs/website/guides/time-traveling-queries.md b/docs/website/guides/time-traveling-queries.md index 0c952861bd..2bc36956f1 100644 --- a/docs/website/guides/time-traveling-queries.md +++ b/docs/website/guides/time-traveling-queries.md @@ -21,7 +21,7 @@ A powerful feature of a time-traveling query is that very little work is require query { User ( cid: "bafybeieqnthjlvr64aodivtvtwgqelpjjvkmceyz4aqerkk5h23kjoivmu", - dockey: "bae-52b9170d-b77a-5887-b877-cbdbb99b009f" + dockey: "bae-d4303725-7db9-53d2-b324-f3ee44020e52" ) { name age diff --git a/docs/website/references/query-specification/database-api.md b/docs/website/references/query-specification/database-api.md index e2cec7d671..8a40583f89 100644 --- a/docs/website/references/query-specification/database-api.md +++ b/docs/website/references/query-specification/database-api.md @@ -106,7 +106,7 @@ Commits queries also work with aggregates, grouping, limit, offset, order, docke There is __typename introspection keyword that works on all queries that does not appear to be documented anywhere, for example: ```graphql -commits(dockey: "bae-52b9170d-b77a-5887-b877-cbdbb99b009f") { +commits(dockey: "bae-d4303725-7db9-53d2-b324-f3ee44020e52") { cid __typename } diff --git a/internal/db/backup_test.go b/internal/db/backup_test.go index 486080db81..0d84a394d1 100644 --- a/internal/db/backup_test.go +++ b/internal/db/backup_test.go @@ -80,7 +80,7 @@ func TestBasicExport_WithNormalFormatting_NoError(t *testing.T) { require.NoError(t, err) expectedMap := map[string]any{} - data := []byte(`{"Address":[{"_docID":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_docIDNew":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}],"User":[{"_docID":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","_docIDNew":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","age":40,"name":"Bob"},{"_docID":"bae-e933420a-988a-56f8-8952-6c245aebd519","_docIDNew":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`) + data := []byte(`{"Address":[{"_docID":"bae-41e1a410-df86-5846-939e-4470a8d8cb0c","_docIDNew":"bae-41e1a410-df86-5846-939e-4470a8d8cb0c","city":"Toronto","street":"101 Maple St"}],"User":[{"_docID":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","_docIDNew":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","age":30,"name":"John"},{"_docID":"bae-f8a0f1e4-129e-50ab-98ed-1aa110810fb2","_docIDNew":"bae-f8a0f1e4-129e-50ab-98ed-1aa110810fb2","age":40,"name":"Bob"}]}`) err = json.Unmarshal(data, &expectedMap) require.NoError(t, err) require.EqualValues(t, expectedMap, fileMap) @@ -145,7 +145,7 @@ func TestBasicExport_WithPrettyFormatting_NoError(t *testing.T) { require.NoError(t, err) expectedMap := map[string]any{} - data := []byte(`{"Address":[{"_docID":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_docIDNew":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}],"User":[{"_docID":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","_docIDNew":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","age":40,"name":"Bob"},{"_docID":"bae-e933420a-988a-56f8-8952-6c245aebd519","_docIDNew":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`) + data := []byte(`{"Address":[{"_docID":"bae-41e1a410-df86-5846-939e-4470a8d8cb0c","_docIDNew":"bae-41e1a410-df86-5846-939e-4470a8d8cb0c","city":"Toronto","street":"101 Maple St"}],"User":[{"_docID":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","_docIDNew":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","age":30,"name":"John"},{"_docID":"bae-f8a0f1e4-129e-50ab-98ed-1aa110810fb2","_docIDNew":"bae-f8a0f1e4-129e-50ab-98ed-1aa110810fb2","age":40,"name":"Bob"}]}`) err = json.Unmarshal(data, &expectedMap) require.NoError(t, err) require.EqualValues(t, expectedMap, fileMap) @@ -210,7 +210,7 @@ func TestBasicExport_WithSingleCollection_NoError(t *testing.T) { require.NoError(t, err) expectedMap := map[string]any{} - data := []byte(`{"Address":[{"_docID":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_docIDNew":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}]}`) + data := []byte(`{"Address":[{"_docID":"bae-41e1a410-df86-5846-939e-4470a8d8cb0c","_docIDNew":"bae-41e1a410-df86-5846-939e-4470a8d8cb0c","city":"Toronto","street":"101 Maple St"}]}`) err = json.Unmarshal(data, &expectedMap) require.NoError(t, err) require.EqualValues(t, expectedMap, fileMap) @@ -252,10 +252,10 @@ func TestBasicExport_WithMultipleCollectionsAndUpdate_NoError(t *testing.T) { col2, err := db.GetCollectionByName(ctx, "Book") require.NoError(t, err) - doc3, err := client.NewDocFromJSON([]byte(`{"name": "John and the sourcerers' stone", "author": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`), col2.Definition()) + doc3, err := client.NewDocFromJSON([]byte(`{"name": "John and the sourcerers' stone", "author": "bae-7fca96a2-5f01-5558-a81f-09b47587f26d"}`), col2.Definition()) require.NoError(t, err) - doc4, err := client.NewDocFromJSON([]byte(`{"name": "Game of chains", "author": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`), col2.Definition()) + doc4, err := client.NewDocFromJSON([]byte(`{"name": "Game of chains", "author": "bae-7fca96a2-5f01-5558-a81f-09b47587f26d"}`), col2.Definition()) require.NoError(t, err) err = col2.Create(ctx, doc3) @@ -287,7 +287,7 @@ func TestBasicExport_WithMultipleCollectionsAndUpdate_NoError(t *testing.T) { require.NoError(t, err) expectedMap := map[string]any{} - data := []byte(`{"Book":[{"_docID":"bae-4399f189-138d-5d49-9e25-82e78463677b","_docIDNew":"bae-78a40f28-a4b8-5dca-be44-392b0f96d0ff","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"Game of chains"},{"_docID":"bae-5cf2fec3-d8ed-50d5-8286-39109853d2da","_docIDNew":"bae-edeade01-2d21-5d6d-aadf-efc5a5279de5","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"John and the sourcerers' stone"}],"User":[{"_docID":"bae-0648f44e-74e8-593b-a662-3310ec278927","_docIDNew":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_docID":"bae-e933420a-988a-56f8-8952-6c245aebd519","_docIDNew":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}]}`) + data := []byte(`{"Book":[{"_docID":"bae-45c92e9c-4d31-5e96-8bd7-3d532734e117", "_docIDNew":"bae-3ca9a4c3-6240-5e86-a00f-9590d2f2ecf3", "author_id":"bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f", "name":"John and the sourcerers' stone"}, {"_docID":"bae-8fc3d148-869b-5629-ae22-5423c73f709b", "_docIDNew":"bae-33c136bd-4375-54a0-81ff-54ca560c7bb8", "author_id":"bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f", "name":"Game of chains"}], "User":[{"_docID":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d", "_docIDNew":"bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f", "age":31, "name":"John"}, {"_docID":"bae-ebfe11e2-045d-525d-9fb7-2abb961dc84f", "_docIDNew":"bae-ebfe11e2-045d-525d-9fb7-2abb961dc84f", "age":31, "name":"Bob"}]}`) err = json.Unmarshal(data, &expectedMap) require.NoError(t, err) require.EqualValues(t, expectedMap, fileMap) @@ -345,7 +345,7 @@ func TestBasicExport_EnsureFileOverwrite_NoError(t *testing.T) { err = os.WriteFile( filepath, - []byte(`{"Address":[{"_docID":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_docIDNew":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}],"User":[{"_docID":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","_docIDNew":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","age":40,"name":"Bob"},{"_docID":"bae-e933420a-988a-56f8-8952-6c245aebd519","_docIDNew":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`), + []byte(`{"Address":[{"_docID":"bae-41e1a410-df86-5846-939e-4470a8d8cb0c","_docIDNew":"bae-41e1a410-df86-5846-939e-4470a8d8cb0c","city":"Toronto","street":"101 Maple St"}],"User":[{"_docID":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","_docIDNew":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","age":40,"name":"Bob"},{"_docID":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","_docIDNew":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","age":30,"name":"John"}]}`), 0664, ) require.NoError(t, err) @@ -360,7 +360,7 @@ func TestBasicExport_EnsureFileOverwrite_NoError(t *testing.T) { require.NoError(t, err) expectedMap := map[string]any{} - data := []byte(`{"Address":[{"_docID":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_docIDNew":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}]}`) + data := []byte(`{"Address":[{"_docID":"bae-41e1a410-df86-5846-939e-4470a8d8cb0c","_docIDNew":"bae-41e1a410-df86-5846-939e-4470a8d8cb0c","city":"Toronto","street":"101 Maple St"}]}`) err = json.Unmarshal(data, &expectedMap) require.NoError(t, err) require.EqualValues(t, expectedMap, fileMap) @@ -393,7 +393,7 @@ func TestBasicImport_WithMultipleCollectionsAndObjects_NoError(t *testing.T) { err = os.WriteFile( filepath, - []byte(`{"Address":[{"_docID":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_docIDNew":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}],"User":[{"_docID":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","_docIDNew":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","age":40,"name":"Bob"},{"_docID":"bae-e933420a-988a-56f8-8952-6c245aebd519","_docIDNew":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`), + []byte(`{"Address":[{"_docID":"bae-41e1a410-df86-5846-939e-4470a8d8cb0c","_docIDNew":"bae-41e1a410-df86-5846-939e-4470a8d8cb0c","city":"Toronto","street":"101 Maple St"}],"User":[{"_docID":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","_docIDNew":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","age":40,"name":"Bob"},{"_docID":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","_docIDNew":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","age":30,"name":"John"}]}`), 0664, ) require.NoError(t, err) @@ -412,7 +412,7 @@ func TestBasicImport_WithMultipleCollectionsAndObjects_NoError(t *testing.T) { col1, err := db.getCollectionByName(ctx, "Address") require.NoError(t, err) - key1, err := client.NewDocIDFromString("bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f") + key1, err := client.NewDocIDFromString("bae-41e1a410-df86-5846-939e-4470a8d8cb0c") require.NoError(t, err) _, err = col1.Get(ctx, key1, false) require.NoError(t, err) @@ -420,12 +420,12 @@ func TestBasicImport_WithMultipleCollectionsAndObjects_NoError(t *testing.T) { col2, err := db.getCollectionByName(ctx, "User") require.NoError(t, err) - key2, err := client.NewDocIDFromString("bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df") + key2, err := client.NewDocIDFromString("bae-7fca96a2-5f01-5558-a81f-09b47587f26d") require.NoError(t, err) _, err = col2.Get(ctx, key2, false) require.NoError(t, err) - key3, err := client.NewDocIDFromString("bae-e933420a-988a-56f8-8952-6c245aebd519") + key3, err := client.NewDocIDFromString("bae-7fca96a2-5f01-5558-a81f-09b47587f26d") require.NoError(t, err) _, err = col2.Get(ctx, key3, false) require.NoError(t, err) @@ -456,7 +456,7 @@ func TestBasicImport_WithJSONArray_ReturnError(t *testing.T) { err = os.WriteFile( filepath, - []byte(`["Address":[{"_docID":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_docIDNew":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}],"User":[{"_docID":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","_docIDNew":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","age":40,"name":"Bob"},{"_docID":"bae-e933420a-988a-56f8-8952-6c245aebd519","_docIDNew":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]]`), + []byte(`["Address":[{"_docID":"bae-41e1a410-df86-5846-939e-4470a8d8cb0c","_docIDNew":"bae-41e1a410-df86-5846-939e-4470a8d8cb0c","city":"Toronto","street":"101 Maple St"}],"User":[{"_docID":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","_docIDNew":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","age":40,"name":"Bob"},{"_docID":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","_docIDNew":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","age":30,"name":"John"}]]`), 0664, ) require.NoError(t, err) @@ -492,7 +492,7 @@ func TestBasicImport_WithObjectCollection_ReturnError(t *testing.T) { err = os.WriteFile( filepath, - []byte(`{"Address":{"_docID":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_docIDNew":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}}`), + []byte(`{"Address":{"_docID":"bae-41e1a410-df86-5846-939e-4470a8d8cb0c","_docIDNew":"bae-41e1a410-df86-5846-939e-4470a8d8cb0c","city":"Toronto","street":"101 Maple St"}}`), 0664, ) require.NoError(t, err) @@ -528,7 +528,7 @@ func TestBasicImport_WithInvalidFilepath_ReturnError(t *testing.T) { err = os.WriteFile( filepath, - []byte(`{"Address":{"_docID":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_docIDNew":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}}`), + []byte(`{"Address":{"_docID":"bae-41e1a410-df86-5846-939e-4470a8d8cb0c","_docIDNew":"bae-41e1a410-df86-5846-939e-4470a8d8cb0c","city":"Toronto","street":"101 Maple St"}}`), 0664, ) require.NoError(t, err) @@ -565,7 +565,7 @@ func TestBasicImport_WithInvalidCollection_ReturnError(t *testing.T) { err = os.WriteFile( filepath, - []byte(`{"Addresses":{"_docID":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_docIDNew":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}}`), + []byte(`{"Addresses":{"_docID":"bae-41e1a410-df86-5846-939e-4470a8d8cb0c","_docIDNew":"bae-41e1a410-df86-5846-939e-4470a8d8cb0c","city":"Toronto","street":"101 Maple St"}}`), 0664, ) require.NoError(t, err) diff --git a/tests/integration/acp/index/query_with_relation_test.go b/tests/integration/acp/index/query_with_relation_test.go index 8a0d8c7d74..19229b3f67 100644 --- a/tests/integration/acp/index/query_with_relation_test.go +++ b/tests/integration/acp/index/query_with_relation_test.go @@ -47,7 +47,7 @@ func createAuthorBooksSchemaWithPolicyAndCreateDocs() []any { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-7aabc9d2-fbbc-5911-b0d0-b49a2a1d0e84 Doc: `{ "name": "John Grisham", "age": 65, @@ -66,29 +66,29 @@ func createAuthorBooksSchemaWithPolicyAndCreateDocs() []any { }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "name": "Painted House", - "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, + DocMap: map[string]any{ + "name": "Painted House", + "rating": 4.9, + "author_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ Identity: acpUtils.Actor1Identity, CollectionID: 1, - Doc: `{ - "name": "A Time for Mercy", - "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ Identity: acpUtils.Actor1Identity, CollectionID: 1, - Doc: `{ - "name": "Theif Lord", - "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, + DocMap: map[string]any{ + "name": "Theif Lord", + "rating": 4.8, + "author_id": testUtils.NewDocIndex(0, 1), + }, }, } } diff --git a/tests/integration/acp/query/fixture.go b/tests/integration/acp/query/fixture.go index 2b05b48232..be521e700f 100644 --- a/tests/integration/acp/query/fixture.go +++ b/tests/integration/acp/query/fixture.go @@ -104,45 +104,37 @@ func getSetupEmployeeCompanyActions() []any { }, testUtils.CreateDoc{ CollectionID: 0, - Doc: ` - { - "name": "PubEmp in PubCompany", - "salary": 10000, - "company": "bae-1ab7ac86-3c68-5abb-b526-803858c9dccf" - } - `, + DocMap: map[string]any{ + "name": "PubEmp in PubCompany", + "salary": 10000, + "company": testUtils.NewDocIndex(1, 0), + }, }, testUtils.CreateDoc{ CollectionID: 0, - Doc: ` - { - "name": "PubEmp in PrivateCompany", - "salary": 20000, - "company": "bae-4aef4bd6-e2ee-5075-85a5-4d64bbf80bca" - } - `, + DocMap: map[string]any{ + "name": "PubEmp in PrivateCompany", + "salary": 20000, + "company": testUtils.NewDocIndex(1, 1), + }, }, testUtils.CreateDoc{ CollectionID: 0, Identity: acpUtils.Actor1Identity, - Doc: ` - { - "name": "PrivateEmp in PubCompany", - "salary": 30000, - "company": "bae-1ab7ac86-3c68-5abb-b526-803858c9dccf" - } - `, + DocMap: map[string]any{ + "name": "PrivateEmp in PubCompany", + "salary": 30000, + "company": testUtils.NewDocIndex(1, 0), + }, }, testUtils.CreateDoc{ CollectionID: 0, Identity: acpUtils.Actor1Identity, - Doc: ` - { - "name": "PrivateEmp in PrivateCompany", - "salary": 40000, - "company": "bae-4aef4bd6-e2ee-5075-85a5-4d64bbf80bca" - } - `, + DocMap: map[string]any{ + "name": "PrivateEmp in PrivateCompany", + "salary": 40000, + "company": testUtils.NewDocIndex(1, 1), + }, }, } } diff --git a/tests/integration/acp/query/relation_objects_test.go b/tests/integration/acp/query/relation_objects_test.go index 76bd264ac8..6a1b79f8b1 100644 --- a/tests/integration/acp/query/relation_objects_test.go +++ b/tests/integration/acp/query/relation_objects_test.go @@ -36,14 +36,14 @@ func TestACP_QueryManyToOneRelationObjectsWithoutIdentity(t *testing.T) { } `, Results: []map[string]any{ - { - "name": "PubEmp in PrivateCompany", - "company": nil, - }, { "name": "PubEmp in PubCompany", "company": map[string]any{"name": "Public Company"}, }, + { + "name": "PubEmp in PrivateCompany", + "company": nil, + }, }, }, }, @@ -113,14 +113,14 @@ func TestACP_QueryManyToOneRelationObjectsWithIdentity(t *testing.T) { "name": "PrivateEmp in PrivateCompany", "company": map[string]any{"name": "Private Company"}, }, - { - "name": "PubEmp in PrivateCompany", - "company": map[string]any{"name": "Private Company"}, - }, { "name": "PubEmp in PubCompany", "company": map[string]any{"name": "Public Company"}, }, + { + "name": "PubEmp in PrivateCompany", + "company": map[string]any{"name": "Private Company"}, + }, }, }, }, @@ -191,14 +191,14 @@ func TestACP_QueryManyToOneRelationObjectsWithWrongIdentity(t *testing.T) { } `, Results: []map[string]any{ - { - "name": "PubEmp in PrivateCompany", - "company": nil, - }, { "name": "PubEmp in PubCompany", "company": map[string]any{"name": "Public Company"}, }, + { + "name": "PubEmp in PrivateCompany", + "company": nil, + }, }, }, }, diff --git a/tests/integration/acp/register_and_delete_test.go b/tests/integration/acp/register_and_delete_test.go index ef4e80ec22..1392c3719a 100644 --- a/tests/integration/acp/register_and_delete_test.go +++ b/tests/integration/acp/register_and_delete_test.go @@ -393,7 +393,7 @@ func TestACP_CreateWithIdentityAndDeleteWithoutIdentity_CanNotDelete(t *testing. `, Results: []map[string]any{ { - "_docID": "bae-1e608f7d-b01e-5dd5-ad4a-9c6cc3005a36", + "_docID": "bae-9d443d0c-52f6-568b-8f74-e8ff0825697b", "name": "Shahzad", "age": int64(28), }, @@ -501,7 +501,7 @@ func TestACP_CreateWithIdentityAndDeleteWithWrongIdentity_CanNotDelete(t *testin `, Results: []map[string]any{ { - "_docID": "bae-1e608f7d-b01e-5dd5-ad4a-9c6cc3005a36", + "_docID": "bae-9d443d0c-52f6-568b-8f74-e8ff0825697b", "name": "Shahzad", "age": int64(28), }, diff --git a/tests/integration/acp/register_and_read_test.go b/tests/integration/acp/register_and_read_test.go index 0ad80c4953..280d85637e 100644 --- a/tests/integration/acp/register_and_read_test.go +++ b/tests/integration/acp/register_and_read_test.go @@ -92,7 +92,7 @@ func TestACP_CreateWithoutIdentityAndReadWithoutIdentity_CanRead(t *testing.T) { `, Results: []map[string]any{ { - "_docID": "bae-1e608f7d-b01e-5dd5-ad4a-9c6cc3005a36", + "_docID": "bae-9d443d0c-52f6-568b-8f74-e8ff0825697b", "name": "Shahzad", "age": int64(28), }, @@ -182,7 +182,7 @@ func TestACP_CreateWithoutIdentityAndReadWithIdentity_CanRead(t *testing.T) { `, Results: []map[string]any{ { - "_docID": "bae-1e608f7d-b01e-5dd5-ad4a-9c6cc3005a36", + "_docID": "bae-9d443d0c-52f6-568b-8f74-e8ff0825697b", "name": "Shahzad", "age": int64(28), }, @@ -274,7 +274,7 @@ func TestACP_CreateWithIdentityAndReadWithIdentity_CanRead(t *testing.T) { `, Results: []map[string]any{ { - "_docID": "bae-1e608f7d-b01e-5dd5-ad4a-9c6cc3005a36", + "_docID": "bae-9d443d0c-52f6-568b-8f74-e8ff0825697b", "name": "Shahzad", "age": int64(28), }, diff --git a/tests/integration/acp/register_and_update_test.go b/tests/integration/acp/register_and_update_test.go index 4a51aa6aec..e9ebb52e27 100644 --- a/tests/integration/acp/register_and_update_test.go +++ b/tests/integration/acp/register_and_update_test.go @@ -111,7 +111,7 @@ func TestACP_CreateWithoutIdentityAndUpdateWithoutIdentity_CanUpdate(t *testing. Results: []map[string]any{ { - "_docID": "bae-1e608f7d-b01e-5dd5-ad4a-9c6cc3005a36", + "_docID": "bae-9d443d0c-52f6-568b-8f74-e8ff0825697b", "name": "Shahzad Lone", "age": int64(28), }, @@ -217,7 +217,7 @@ func TestACP_CreateWithoutIdentityAndUpdateWithIdentity_CanUpdate(t *testing.T) `, Results: []map[string]any{ { - "_docID": "bae-1e608f7d-b01e-5dd5-ad4a-9c6cc3005a36", + "_docID": "bae-9d443d0c-52f6-568b-8f74-e8ff0825697b", "name": "Shahzad Lone", "age": int64(28), }, @@ -327,7 +327,7 @@ func TestACP_CreateWithIdentityAndUpdateWithIdentity_CanUpdate(t *testing.T) { `, Results: []map[string]any{ { - "_docID": "bae-1e608f7d-b01e-5dd5-ad4a-9c6cc3005a36", + "_docID": "bae-9d443d0c-52f6-568b-8f74-e8ff0825697b", "name": "Shahzad Lone", "age": int64(28), }, @@ -443,7 +443,7 @@ func TestACP_CreateWithIdentityAndUpdateWithoutIdentity_CanNotUpdate(t *testing. `, Results: []map[string]any{ { - "_docID": "bae-1e608f7d-b01e-5dd5-ad4a-9c6cc3005a36", + "_docID": "bae-9d443d0c-52f6-568b-8f74-e8ff0825697b", "name": "Shahzad", "age": int64(28), }, @@ -563,7 +563,7 @@ func TestACP_CreateWithIdentityAndUpdateWithWrongIdentity_CanNotUpdate(t *testin `, Results: []map[string]any{ { - "_docID": "bae-1e608f7d-b01e-5dd5-ad4a-9c6cc3005a36", + "_docID": "bae-9d443d0c-52f6-568b-8f74-e8ff0825697b", "name": "Shahzad", "age": int64(28), }, @@ -678,7 +678,7 @@ func TestACP_CreateWithIdentityAndUpdateWithoutIdentityGQL_CanNotUpdate(t *testi `, Results: []map[string]any{ { - "_docID": "bae-1e608f7d-b01e-5dd5-ad4a-9c6cc3005a36", + "_docID": "bae-9d443d0c-52f6-568b-8f74-e8ff0825697b", "name": "Shahzad", "age": int64(28), }, @@ -797,7 +797,7 @@ func TestACP_CreateWithIdentityAndUpdateWithWrongIdentityGQL_CanNotUpdate(t *tes `, Results: []map[string]any{ { - "_docID": "bae-1e608f7d-b01e-5dd5-ad4a-9c6cc3005a36", + "_docID": "bae-9d443d0c-52f6-568b-8f74-e8ff0825697b", "name": "Shahzad", "age": int64(28), }, diff --git a/tests/integration/backup/one_to_many/export_test.go b/tests/integration/backup/one_to_many/export_test.go index 3626535b9d..9126456149 100644 --- a/tests/integration/backup/one_to_many/export_test.go +++ b/tests/integration/backup/one_to_many/export_test.go @@ -28,7 +28,7 @@ func TestBackupExport_JustUserCollection_NoError(t *testing.T) { Config: client.BackupConfig{ Collections: []string{"User"}, }, - ExpectedContent: `{"User":[{"_docID":"bae-e933420a-988a-56f8-8952-6c245aebd519","_docIDNew":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`, + ExpectedContent: `{"User":[{"_docID":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","_docIDNew":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","age":30,"name":"John"}]}`, }, }, } @@ -49,7 +49,10 @@ func TestBackupExport_AllCollectionsMultipleDocsAndDocUpdate_NoError(t *testing. }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{"name": "John and the sourcerers' stone", "author": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`, + DocMap: map[string]any{ + "name": "John and the sourcerers' stone", + "author": testUtils.NewDocIndex(0, 0), + }, }, testUtils.UpdateDoc{ CollectionID: 0, @@ -57,7 +60,7 @@ func TestBackupExport_AllCollectionsMultipleDocsAndDocUpdate_NoError(t *testing. Doc: `{"age": 31}`, }, testUtils.BackupExport{ - ExpectedContent: `{"User":[{"_docID":"bae-0648f44e-74e8-593b-a662-3310ec278927","_docIDNew":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_docID":"bae-e933420a-988a-56f8-8952-6c245aebd519","_docIDNew":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}],"Book":[{"_docID":"bae-5cf2fec3-d8ed-50d5-8286-39109853d2da","_docIDNew":"bae-edeade01-2d21-5d6d-aadf-efc5a5279de5","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"John and the sourcerers' stone"}]}`, + ExpectedContent: `{"User":[{"_docID":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","_docIDNew":"bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f","age":31,"name":"John"},{"_docID":"bae-ebfe11e2-045d-525d-9fb7-2abb961dc84f","_docIDNew":"bae-ebfe11e2-045d-525d-9fb7-2abb961dc84f","age":31,"name":"Bob"}],"Book":[{"_docID":"bae-45c92e9c-4d31-5e96-8bd7-3d532734e117","_docIDNew":"bae-3ca9a4c3-6240-5e86-a00f-9590d2f2ecf3","author_id":"bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f","name":"John and the sourcerers' stone"}]}`, }, }, } @@ -78,11 +81,17 @@ func TestBackupExport_AllCollectionsMultipleDocsAndMultipleDocUpdate_NoError(t * }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{"name": "John and the sourcerers' stone", "author": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`, + DocMap: map[string]any{ + "name": "John and the sourcerers' stone", + "author": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{"name": "Game of chains", "author": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`, + DocMap: map[string]any{ + "name": "Game of chains", + "author": testUtils.NewDocIndex(0, 0), + }, }, testUtils.UpdateDoc{ CollectionID: 0, @@ -90,7 +99,7 @@ func TestBackupExport_AllCollectionsMultipleDocsAndMultipleDocUpdate_NoError(t * Doc: `{"age": 31}`, }, testUtils.BackupExport{ - ExpectedContent: `{"User":[{"_docID":"bae-0648f44e-74e8-593b-a662-3310ec278927","_docIDNew":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_docID":"bae-e933420a-988a-56f8-8952-6c245aebd519","_docIDNew":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}],"Book":[{"_docID":"bae-4399f189-138d-5d49-9e25-82e78463677b","_docIDNew":"bae-78a40f28-a4b8-5dca-be44-392b0f96d0ff","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"Game of chains"},{"_docID":"bae-5cf2fec3-d8ed-50d5-8286-39109853d2da","_docIDNew":"bae-edeade01-2d21-5d6d-aadf-efc5a5279de5","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"John and the sourcerers' stone"}]}`, + ExpectedContent: `{"User":[{"_docID":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","_docIDNew":"bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f","age":31,"name":"John"},{"_docID":"bae-ebfe11e2-045d-525d-9fb7-2abb961dc84f","_docIDNew":"bae-ebfe11e2-045d-525d-9fb7-2abb961dc84f","age":31,"name":"Bob"}],"Book":[{"_docID":"bae-45c92e9c-4d31-5e96-8bd7-3d532734e117","_docIDNew":"bae-3ca9a4c3-6240-5e86-a00f-9590d2f2ecf3","author_id":"bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f","name":"John and the sourcerers' stone"},{"_docID":"bae-8fc3d148-869b-5629-ae22-5423c73f709b","_docIDNew":"bae-33c136bd-4375-54a0-81ff-54ca560c7bb8","author_id":"bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f","name":"Game of chains"}]}`, }, }, } diff --git a/tests/integration/backup/one_to_many/import_test.go b/tests/integration/backup/one_to_many/import_test.go index 193867cc8d..70260d58d8 100644 --- a/tests/integration/backup/one_to_many/import_test.go +++ b/tests/integration/backup/one_to_many/import_test.go @@ -42,16 +42,16 @@ func TestBackupImport_WithMultipleNoKeyAndMultipleCollections_NoError(t *testing }`, Results: []map[string]any{ { - "name": "Smith", - "age": int64(31), + "name": "John", + "age": int64(30), }, { "name": "Bob", "age": int64(32), }, { - "name": "John", - "age": int64(30), + "name": "Smith", + "age": int64(31), }, }, }, @@ -64,10 +64,10 @@ func TestBackupImport_WithMultipleNoKeyAndMultipleCollections_NoError(t *testing }`, Results: []map[string]any{ { - "name": "John and the sourcerers' stone", + "name": "Game of chains", }, { - "name": "Game of chains", + "name": "John and the sourcerers' stone", }, }, }, @@ -84,28 +84,28 @@ func TestBackupImport_WithMultipleNoKeyAndMultipleCollectionsAndUpdatedDocs_NoEr ImportContent: `{ "Book":[ { - "_docID":"bae-4399f189-138d-5d49-9e25-82e78463677b", - "_docIDNew":"bae-78a40f28-a4b8-5dca-be44-392b0f96d0ff", - "author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162", + "_docID":"bae-8fc3d148-869b-5629-ae22-5423c73f709b", + "_docIDNew":"bae-33c136bd-4375-54a0-81ff-54ca560c7bb8", + "author_id":"bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f", "name":"Game of chains" }, { - "_docID":"bae-5cf2fec3-d8ed-50d5-8286-39109853d2da", - "_docIDNew":"bae-edeade01-2d21-5d6d-aadf-efc5a5279de5", - "author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162", + "_docID":"bae-45c92e9c-4d31-5e96-8bd7-3d532734e117", + "_docIDNew":"bae-3ca9a4c3-6240-5e86-a00f-9590d2f2ecf3", + "author_id":"bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f", "name":"John and the sourcerers' stone" } ], "User":[ { - "_docID":"bae-0648f44e-74e8-593b-a662-3310ec278927", - "_docIDNew":"bae-0648f44e-74e8-593b-a662-3310ec278927", + "_docID":"bae-ebfe11e2-045d-525d-9fb7-2abb961dc84f", + "_docIDNew":"bae-ebfe11e2-045d-525d-9fb7-2abb961dc84f", "age":31, "name":"Bob" }, { - "_docID":"bae-e933420a-988a-56f8-8952-6c245aebd519", - "_docIDNew":"bae-807ea028-6c13-5f86-a72b-46e8b715a162", + "_docID":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d", + "_docIDNew":"bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f", "age":31, "name":"John" } @@ -122,11 +122,11 @@ func TestBackupImport_WithMultipleNoKeyAndMultipleCollectionsAndUpdatedDocs_NoEr }`, Results: []map[string]any{ { - "name": "Bob", + "name": "John", "age": int64(31), }, { - "name": "John", + "name": "Bob", "age": int64(31), }, }, @@ -145,13 +145,13 @@ func TestBackupImport_WithMultipleNoKeyAndMultipleCollectionsAndUpdatedDocs_NoEr { "name": "Game of chains", "author": map[string]any{ - "_docID": "bae-807ea028-6c13-5f86-a72b-46e8b715a162", + "_docID": "bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f", }, }, { "name": "John and the sourcerers' stone", "author": map[string]any{ - "_docID": "bae-807ea028-6c13-5f86-a72b-46e8b715a162", + "_docID": "bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f", }, }, }, diff --git a/tests/integration/backup/one_to_one/export_test.go b/tests/integration/backup/one_to_one/export_test.go index b52e0bb02f..fb63d52208 100644 --- a/tests/integration/backup/one_to_one/export_test.go +++ b/tests/integration/backup/one_to_one/export_test.go @@ -28,7 +28,7 @@ func TestBackupExport_JustUserCollection_NoError(t *testing.T) { Config: client.BackupConfig{ Collections: []string{"User"}, }, - ExpectedContent: `{"User":[{"_docID":"bae-e933420a-988a-56f8-8952-6c245aebd519","_docIDNew":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`, + ExpectedContent: `{"User":[{"_docID":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","_docIDNew":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","age":30,"name":"John"}]}`, }, }, } @@ -49,7 +49,10 @@ func TestBackupExport_AllCollectionsMultipleDocsAndDocUpdate_NoError(t *testing. }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{"name": "John and the sourcerers' stone", "author": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`, + DocMap: map[string]any{ + "name": "John and the sourcerers' stone", + "author": testUtils.NewDocIndex(0, 0), + }, }, testUtils.UpdateDoc{ CollectionID: 0, @@ -57,7 +60,7 @@ func TestBackupExport_AllCollectionsMultipleDocsAndDocUpdate_NoError(t *testing. Doc: `{"age": 31}`, }, testUtils.BackupExport{ - ExpectedContent: `{"User":[{"_docID":"bae-0648f44e-74e8-593b-a662-3310ec278927","_docIDNew":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_docID":"bae-e933420a-988a-56f8-8952-6c245aebd519","_docIDNew":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}],"Book":[{"_docID":"bae-5cf2fec3-d8ed-50d5-8286-39109853d2da","_docIDNew":"bae-edeade01-2d21-5d6d-aadf-efc5a5279de5","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"John and the sourcerers' stone"}]}`, + ExpectedContent: `{"User":[{"_docID":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","_docIDNew":"bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f","age":31,"name":"John"},{"_docID":"bae-ebfe11e2-045d-525d-9fb7-2abb961dc84f","_docIDNew":"bae-ebfe11e2-045d-525d-9fb7-2abb961dc84f","age":31,"name":"Bob"}],"Book":[{"_docID":"bae-f33a7110-fb6f-57aa-9501-df0111427315","_docIDNew":"bae-c9c1a385-afce-5ef7-8b98-9369b157fd97","author_id":"bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f","name":"John and the sourcerers' stone"}]}`, }, }, } @@ -93,7 +96,11 @@ func TestBackupExport_DoubleReletionship_NoError(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{"name": "John and the sourcerers' stone", "author": "bae-e933420a-988a-56f8-8952-6c245aebd519", "favourite": "bae-0648f44e-74e8-593b-a662-3310ec278927"}`, + DocMap: map[string]any{ + "name": "John and the sourcerers' stone", + "author": testUtils.NewDocIndex(0, 0), + "favourite": testUtils.NewDocIndex(0, 1), + }, }, testUtils.UpdateDoc{ CollectionID: 0, @@ -101,7 +108,7 @@ func TestBackupExport_DoubleReletionship_NoError(t *testing.T) { Doc: `{"age": 31}`, }, testUtils.BackupExport{ - ExpectedContent: `{"User":[{"_docID":"bae-0648f44e-74e8-593b-a662-3310ec278927","_docIDNew":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_docID":"bae-e933420a-988a-56f8-8952-6c245aebd519","_docIDNew":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}],"Book":[{"_docID":"bae-45b1def4-4e63-5a93-a1b8-f7b08e682164","_docIDNew":"bae-add2ccfe-84a1-519c-ab7d-c54b43909532","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","favourite_id":"bae-0648f44e-74e8-593b-a662-3310ec278927","name":"John and the sourcerers' stone"}]}`, + ExpectedContent: `{"User":[{"_docID":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","_docIDNew":"bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f","age":31,"name":"John"},{"_docID":"bae-ebfe11e2-045d-525d-9fb7-2abb961dc84f","_docIDNew":"bae-ebfe11e2-045d-525d-9fb7-2abb961dc84f","age":31,"name":"Bob"}],"Book":[{"_docID":"bae-ccf9da82-8ed6-5133-b64f-558c21bc8dfd","_docIDNew":"bae-27ae099a-fa7d-5a66-a919-6c3b0322d17c","author_id":"bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f","favourite_id":"bae-ebfe11e2-045d-525d-9fb7-2abb961dc84f","name":"John and the sourcerers' stone"}]}`, }, }, } @@ -137,7 +144,11 @@ func TestBackupExport_DoubleReletionshipWithUpdate_NoError(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{"name": "John and the sourcerers' stone", "author": "bae-e933420a-988a-56f8-8952-6c245aebd519", "favourite": "bae-0648f44e-74e8-593b-a662-3310ec278927"}`, + DocMap: map[string]any{ + "name": "John and the sourcerers' stone", + "author": testUtils.NewDocIndex(0, 0), + "favourite": testUtils.NewDocIndex(0, 1), + }, }, testUtils.CreateDoc{ CollectionID: 1, @@ -149,7 +160,7 @@ func TestBackupExport_DoubleReletionshipWithUpdate_NoError(t *testing.T) { Doc: `{"age": 31}`, }, testUtils.BackupExport{ - ExpectedContent: `{"User":[{"_docID":"bae-0648f44e-74e8-593b-a662-3310ec278927","_docIDNew":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_docID":"bae-e933420a-988a-56f8-8952-6c245aebd519","_docIDNew":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}],"Book":[{"_docID":"bae-45b1def4-4e63-5a93-a1b8-f7b08e682164","_docIDNew":"bae-add2ccfe-84a1-519c-ab7d-c54b43909532","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","favourite_id":"bae-0648f44e-74e8-593b-a662-3310ec278927","name":"John and the sourcerers' stone"},{"_docID":"bae-da7f2d88-05c4-528a-846a-0d18ab26603b","_docIDNew":"bae-da7f2d88-05c4-528a-846a-0d18ab26603b","name":"Game of chains"}]}`, + ExpectedContent: `{"User":[{"_docID":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","_docIDNew":"bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f","age":31,"name":"John"},{"_docID":"bae-ebfe11e2-045d-525d-9fb7-2abb961dc84f","_docIDNew":"bae-ebfe11e2-045d-525d-9fb7-2abb961dc84f","age":31,"name":"Bob"}],"Book":[{"_docID":"bae-ccf9da82-8ed6-5133-b64f-558c21bc8dfd","_docIDNew":"bae-27ae099a-fa7d-5a66-a919-6c3b0322d17c","author_id":"bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f","favourite_id":"bae-ebfe11e2-045d-525d-9fb7-2abb961dc84f","name":"John and the sourcerers' stone"},{"_docID":"bae-ffba7007-d4d4-5630-be53-d66f56da57fd","_docIDNew":"bae-ffba7007-d4d4-5630-be53-d66f56da57fd","name":"Game of chains"}]}`, }, }, } diff --git a/tests/integration/backup/one_to_one/import_test.go b/tests/integration/backup/one_to_one/import_test.go index 8c3aff4fe2..4451841163 100644 --- a/tests/integration/backup/one_to_one/import_test.go +++ b/tests/integration/backup/one_to_one/import_test.go @@ -42,16 +42,16 @@ func TestBackupImport_WithMultipleNoKeyAndMultipleCollections_NoError(t *testing }`, Results: []map[string]any{ { - "name": "Smith", - "age": int64(31), + "name": "John", + "age": int64(30), }, { "name": "Bob", "age": int64(32), }, { - "name": "John", - "age": int64(30), + "name": "Smith", + "age": int64(31), }, }, }, @@ -64,10 +64,10 @@ func TestBackupImport_WithMultipleNoKeyAndMultipleCollections_NoError(t *testing }`, Results: []map[string]any{ { - "name": "John and the sourcerers' stone", + "name": "Game of chains", }, { - "name": "Game of chains", + "name": "John and the sourcerers' stone", }, }, }, @@ -84,22 +84,22 @@ func TestBackupImport_WithMultipleNoKeyAndMultipleCollectionsAndUpdatedDocs_NoEr ImportContent: `{ "Book":[ { - "_docID":"bae-5cf2fec3-d8ed-50d5-8286-39109853d2da", - "_docIDNew":"bae-edeade01-2d21-5d6d-aadf-efc5a5279de5", - "author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162", + "_docID":"bae-f33a7110-fb6f-57aa-9501-df0111427315", + "_docIDNew":"bae-c9c1a385-afce-5ef7-8b98-9369b157fd97", + "author_id":"bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f", "name":"John and the sourcerers' stone" } ], "User":[ { - "_docID":"bae-0648f44e-74e8-593b-a662-3310ec278927", - "_docIDNew":"bae-0648f44e-74e8-593b-a662-3310ec278927", + "_docID":"bae-ebfe11e2-045d-525d-9fb7-2abb961dc84f", + "_docIDNew":"bae-ebfe11e2-045d-525d-9fb7-2abb961dc84f", "age":31, "name":"Bob" }, { - "_docID":"bae-e933420a-988a-56f8-8952-6c245aebd519", - "_docIDNew":"bae-807ea028-6c13-5f86-a72b-46e8b715a162", + "_docID":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d", + "_docIDNew":"bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f", "age":31, "name":"John" } @@ -116,11 +116,11 @@ func TestBackupImport_WithMultipleNoKeyAndMultipleCollectionsAndUpdatedDocs_NoEr }`, Results: []map[string]any{ { - "name": "Bob", + "name": "John", "age": int64(31), }, { - "name": "John", + "name": "Bob", "age": int64(31), }, }, @@ -139,7 +139,7 @@ func TestBackupImport_WithMultipleNoKeyAndMultipleCollectionsAndUpdatedDocs_NoEr { "name": "John and the sourcerers' stone", "author": map[string]any{ - "_docID": "bae-807ea028-6c13-5f86-a72b-46e8b715a162", + "_docID": "bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f", }, }, }, @@ -159,26 +159,26 @@ func TestBackupImport_WithMultipleNoKeyAndMultipleCollectionsAndMultipleUpdatedD { "_docID":"bae-4399f189-138d-5d49-9e25-82e78463677b", "_docIDNew":"bae-78a40f28-a4b8-5dca-be44-392b0f96d0ff", - "author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162", + "author_id":"bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f", "name":"Game of chains" }, { - "_docID":"bae-5cf2fec3-d8ed-50d5-8286-39109853d2da", - "_docIDNew":"bae-edeade01-2d21-5d6d-aadf-efc5a5279de5", - "author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162", + "_docID":"bae-f33a7110-fb6f-57aa-9501-df0111427315", + "_docIDNew":"bae-c9c1a385-afce-5ef7-8b98-9369b157fd97", + "author_id":"bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f", "name":"John and the sourcerers' stone" } ], "User":[ { - "_docID":"bae-0648f44e-74e8-593b-a662-3310ec278927", - "_docIDNew":"bae-0648f44e-74e8-593b-a662-3310ec278927", + "_docID":"bae-ebfe11e2-045d-525d-9fb7-2abb961dc84f", + "_docIDNew":"bae-ebfe11e2-045d-525d-9fb7-2abb961dc84f", "age":31, "name":"Bob" }, { - "_docID":"bae-e933420a-988a-56f8-8952-6c245aebd519", - "_docIDNew":"bae-807ea028-6c13-5f86-a72b-46e8b715a162", + "_docID":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d", + "_docIDNew":"bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f", "age":31, "name":"John" } @@ -211,7 +211,7 @@ func TestBackupImport_DoubleRelationshipWithUpdate_NoError(t *testing.T) { `, }, testUtils.BackupImport{ - ImportContent: `{"Book":[{"_docID":"bae-236c14bd-4621-5d43-bc03-4442f3b8719e","_docIDNew":"bae-6dbb3738-d3db-5121-acee-6fbdd97ff7a8","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","favourite_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"John and the sourcerers' stone"},{"_docID":"bae-da7f2d88-05c4-528a-846a-0d18ab26603b","_docIDNew":"bae-da7f2d88-05c4-528a-846a-0d18ab26603b","name":"Game of chains"}],"User":[{"_docID":"bae-0648f44e-74e8-593b-a662-3310ec278927","_docIDNew":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_docID":"bae-e933420a-988a-56f8-8952-6c245aebd519","_docIDNew":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}]}`, + ImportContent: `{"Book":[{"_docID":"bae-236c14bd-4621-5d43-bc03-4442f3b8719e","_docIDNew":"bae-6dbb3738-d3db-5121-acee-6fbdd97ff7a8","author_id":"bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f","favourite_id":"bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f","name":"John and the sourcerers' stone"},{"_docID":"bae-ffba7007-d4d4-5630-be53-d66f56da57fd","_docIDNew":"bae-ffba7007-d4d4-5630-be53-d66f56da57fd","name":"Game of chains"}],"User":[{"_docID":"bae-ebfe11e2-045d-525d-9fb7-2abb961dc84f","_docIDNew":"bae-ebfe11e2-045d-525d-9fb7-2abb961dc84f","age":31,"name":"Bob"},{"_docID":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","_docIDNew":"bae-9918e1ec-c62b-5de2-8fbf-c82795b8ac7f","age":31,"name":"John"}]}`, }, testUtils.Request{ Request: ` diff --git a/tests/integration/backup/self_reference/export_test.go b/tests/integration/backup/self_reference/export_test.go index 9a0c73a8d1..b31216a1ad 100644 --- a/tests/integration/backup/self_reference/export_test.go +++ b/tests/integration/backup/self_reference/export_test.go @@ -26,13 +26,13 @@ func TestBackupExport_Simple_NoError(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - Doc: `{"name": "Bob", "age": 31, "boss": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`, + Doc: `{"name": "Bob", "age": 31, "boss": "bae-a2162ff0-3257-50f1-ba2f-39c299921220"}`, }, testUtils.BackupExport{ Config: client.BackupConfig{ Collections: []string{"User"}, }, - ExpectedContent: `{"User":[{"_docID":"bae-790e7e49-f2e3-5ad6-83d9-5dfb6d8ba81d","_docIDNew":"bae-790e7e49-f2e3-5ad6-83d9-5dfb6d8ba81d","age":31,"boss_id":"bae-e933420a-988a-56f8-8952-6c245aebd519","name":"Bob"},{"_docID":"bae-e933420a-988a-56f8-8952-6c245aebd519","_docIDNew":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`, + ExpectedContent: `{"User":[{"_docID":"bae-a2162ff0-3257-50f1-ba2f-39c299921220","_docIDNew":"bae-a2162ff0-3257-50f1-ba2f-39c299921220","age":30,"name":"John"},{"_docID":"bae-f4def2b3-2fe8-5e3b-838e-b9d9f8aca102","_docIDNew":"bae-f4def2b3-2fe8-5e3b-838e-b9d9f8aca102","age":31,"boss_id":"bae-a2162ff0-3257-50f1-ba2f-39c299921220","name":"Bob"}]}`, }, }, } @@ -49,7 +49,7 @@ func TestBackupExport_MultipleDocsAndDocUpdate_NoError(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - Doc: `{"name": "Bob", "age": 31, "boss": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`, + Doc: `{"name": "Bob", "age": 31, "boss": "bae-a2162ff0-3257-50f1-ba2f-39c299921220"}`, }, testUtils.UpdateDoc{ CollectionID: 0, @@ -57,7 +57,7 @@ func TestBackupExport_MultipleDocsAndDocUpdate_NoError(t *testing.T) { Doc: `{"age": 31}`, }, testUtils.BackupExport{ - ExpectedContent: `{"User":[{"_docID":"bae-790e7e49-f2e3-5ad6-83d9-5dfb6d8ba81d","_docIDNew":"bae-067fd15e-32a1-5681-8f41-c423f563e21b","age":31,"boss_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"Bob"},{"_docID":"bae-e933420a-988a-56f8-8952-6c245aebd519","_docIDNew":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}]}`, + ExpectedContent: `{"User":[{"_docID":"bae-a2162ff0-3257-50f1-ba2f-39c299921220","_docIDNew":"bae-99fbc678-167f-5325-bdf1-79fa76039125","age":31,"name":"John"},{"_docID":"bae-f4def2b3-2fe8-5e3b-838e-b9d9f8aca102","_docIDNew":"bae-98531af8-dda5-5993-b140-1495fa8f1576","age":31,"boss_id":"bae-99fbc678-167f-5325-bdf1-79fa76039125","name":"Bob"}]}`, }, }, } diff --git a/tests/integration/backup/self_reference/import_test.go b/tests/integration/backup/self_reference/import_test.go index 0a68a66d85..5ddb800a41 100644 --- a/tests/integration/backup/self_reference/import_test.go +++ b/tests/integration/backup/self_reference/import_test.go @@ -25,13 +25,13 @@ func TestBackupSelfRefImport_Simple_NoError(t *testing.T) { ImportContent: `{ "User":[ { - "_docID":"bae-790e7e49-f2e3-5ad6-83d9-5dfb6d8ba81d", + "_docID":"bae-f4def2b3-2fe8-5e3b-838e-b9d9f8aca102", "age":31, - "boss_id":"bae-e933420a-988a-56f8-8952-6c245aebd519", + "boss_id":"bae-a2162ff0-3257-50f1-ba2f-39c299921220", "name":"Bob" }, { - "_docID":"bae-e933420a-988a-56f8-8952-6c245aebd519", + "_docID":"bae-a2162ff0-3257-50f1-ba2f-39c299921220", "age":30, "name":"John" } @@ -49,16 +49,16 @@ func TestBackupSelfRefImport_Simple_NoError(t *testing.T) { } }`, Results: []map[string]any{ + { + "name": "John", + "boss": nil, + }, { "name": "Bob", "boss": map[string]any{ "name": "John", }, }, - { - "name": "John", - "boss": nil, - }, }, }, }, @@ -71,10 +71,10 @@ func TestBackupSelfRefImport_SelfRef_NoError(t *testing.T) { expectedExportData := `{` + `"User":[` + `{` + - `"_docID":"bae-0648f44e-74e8-593b-a662-3310ec278927",` + - `"_docIDNew":"bae-0648f44e-74e8-593b-a662-3310ec278927",` + + `"_docID":"bae-20631b3d-1498-51f1-be29-5c0effbfa646",` + + `"_docIDNew":"bae-20631b3d-1498-51f1-be29-5c0effbfa646",` + `"age":31,` + - `"boss_id":"bae-0648f44e-74e8-593b-a662-3310ec278927",` + + `"boss_id":"bae-20631b3d-1498-51f1-be29-5c0effbfa646",` + `"name":"Bob"` + `}` + `]` + @@ -98,7 +98,7 @@ func TestBackupSelfRefImport_SelfRef_NoError(t *testing.T) { testUtils.UpdateDoc{ NodeID: immutable.Some(0), Doc: `{ - "boss_id": "bae-0648f44e-74e8-593b-a662-3310ec278927" + "boss_id": "bae-20631b3d-1498-51f1-be29-5c0effbfa646" }`, }, testUtils.BackupExport{ @@ -162,8 +162,8 @@ func TestBackupSelfRefImport_PrimaryRelationWithSecondCollection_NoError(t *test "Book":[ { "name":"John and the sourcerers' stone", - "author":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad", - "reviewedBy":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" + "author":"bae-da91935a-9176-57ea-ba68-afe05781da16", + "reviewedBy":"bae-da91935a-9176-57ea-ba68-afe05781da16" } ] }`, @@ -221,8 +221,8 @@ func TestBackupSelfRefImport_PrimaryRelationWithSecondCollectionWrongOrder_NoErr "Book":[ { "name":"John and the sourcerers' stone", - "author":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad", - "reviewedBy":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" + "author":"bae-da91935a-9176-57ea-ba68-afe05781da16", + "reviewedBy":"bae-da91935a-9176-57ea-ba68-afe05781da16" } ], "Author":[ @@ -269,18 +269,18 @@ func TestBackupSelfRefImport_SplitPrimaryRelationWithSecondCollection_NoError(t expectedExportData := `{` + `"Author":[` + `{` + - `"_docID":"bae-d760e445-22ef-5956-9947-26de226891f6",` + - `"_docIDNew":"bae-e3a6ff01-33ff-55f4-88f9-d13db26274c8",` + - `"book_id":"bae-c821a0a9-7afc-583b-accb-dc99a09c1ff8",` + + `"_docID":"bae-069af8c0-9728-5dde-84ff-ab2dd836f165",` + + `"_docIDNew":"bae-f2e84aeb-decc-5e40-94ff-e365f0ed0f4b",` + + `"book_id":"bae-006376a9-5ceb-5bd0-bfed-6ff5afd3eb93",` + `"name":"John"` + `}` + `],` + `"Book":[` + `{` + - `"_docID":"bae-4059cb15-2b30-5049-b0df-64cc7ad9b5e4",` + + `"_docID":"bae-2b931633-22bf-576f-b788-d8098b213e5a",` + `"_docIDNew":"bae-c821a0a9-7afc-583b-accb-dc99a09c1ff8",` + `"name":"John and the sourcerers' stone",` + - `"reviewedBy_id":"bae-e3a6ff01-33ff-55f4-88f9-d13db26274c8"` + + `"reviewedBy_id":"bae-069af8c0-9728-5dde-84ff-ab2dd836f165"` + `}` + `]` + `}` @@ -308,7 +308,7 @@ func TestBackupSelfRefImport_SplitPrimaryRelationWithSecondCollection_NoError(t testUtils.CreateDoc{ NodeID: immutable.Some(0), CollectionID: 1, - // bae-4059cb15-2b30-5049-b0df-64cc7ad9b5e4 + // bae-2b931633-22bf-576f-b788-d8098b213e5a Doc: `{ "name": "John and the sourcerers' stone" }`, @@ -318,7 +318,7 @@ func TestBackupSelfRefImport_SplitPrimaryRelationWithSecondCollection_NoError(t CollectionID: 0, Doc: `{ "name": "John", - "book": "bae-4059cb15-2b30-5049-b0df-64cc7ad9b5e4" + "book": "bae-2b931633-22bf-576f-b788-d8098b213e5a" }`, }, testUtils.UpdateDoc{ @@ -326,7 +326,7 @@ func TestBackupSelfRefImport_SplitPrimaryRelationWithSecondCollection_NoError(t CollectionID: 1, DocID: 0, Doc: `{ - "reviewedBy_id": "bae-d760e445-22ef-5956-9947-26de226891f6" + "reviewedBy_id": "bae-069af8c0-9728-5dde-84ff-ab2dd836f165" }`, }, /* diff --git a/tests/integration/backup/simple/export_test.go b/tests/integration/backup/simple/export_test.go index d7397b13cb..d4cbb98cbb 100644 --- a/tests/integration/backup/simple/export_test.go +++ b/tests/integration/backup/simple/export_test.go @@ -25,7 +25,7 @@ func TestBackupExport_Simple_NoError(t *testing.T) { Doc: `{"name": "John", "age": 30}`, }, testUtils.BackupExport{ - ExpectedContent: `{"User":[{"_docID":"bae-e933420a-988a-56f8-8952-6c245aebd519","_docIDNew":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`, + ExpectedContent: `{"User":[{"_docID":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","_docIDNew":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","age":30,"name":"John"}]}`, }, }, } @@ -41,7 +41,7 @@ func TestBackupExport_Empty_NoError(t *testing.T) { Doc: `{}`, }, testUtils.BackupExport{ - ExpectedContent: `{"User":[{"_docID":"bae-524bfa06-849c-5daf-b6df-05c2da80844d","_docIDNew":"bae-524bfa06-849c-5daf-b6df-05c2da80844d"}]}`, + ExpectedContent: `{"User":[{"_docID":"bae-0e6d7a02-4b15-5c2b-964f-208708db9ba5","_docIDNew":"bae-0e6d7a02-4b15-5c2b-964f-208708db9ba5"}]}`, }, }, } @@ -98,7 +98,7 @@ func TestBackupExport_JustUserCollection_NoError(t *testing.T) { Config: client.BackupConfig{ Collections: []string{"User"}, }, - ExpectedContent: `{"User":[{"_docID":"bae-e933420a-988a-56f8-8952-6c245aebd519","_docIDNew":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`, + ExpectedContent: `{"User":[{"_docID":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","_docIDNew":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","age":30,"name":"John"}]}`, }, }, } diff --git a/tests/integration/backup/simple/import_test.go b/tests/integration/backup/simple/import_test.go index cdfadc6e61..5d8d4a6ccc 100644 --- a/tests/integration/backup/simple/import_test.go +++ b/tests/integration/backup/simple/import_test.go @@ -20,7 +20,7 @@ func TestBackupImport_Simple_NoError(t *testing.T) { test := testUtils.TestCase{ Actions: []any{ testUtils.BackupImport{ - ImportContent: `{"User":[{"_docID":"bae-e933420a-988a-56f8-8952-6c245aebd519","_docIDNew":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`, + ImportContent: `{"User":[{"_docID":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","_docIDNew":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","age":30,"name":"John"}]}`, }, testUtils.Request{ Request: ` @@ -60,7 +60,7 @@ func TestBackupImport_WithInvalidCollection_ReturnError(t *testing.T) { test := testUtils.TestCase{ Actions: []any{ testUtils.BackupImport{ - ImportContent: `{"Invalid":[{"_docID":"bae-e933420a-988a-56f8-8952-6c245aebd519","_docIDNew":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`, + ImportContent: `{"Invalid":[{"_docID":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","_docIDNew":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","age":30,"name":"John"}]}`, ExpectedError: "failed to get collection: datastore: key not found. Name: Invalid", }, }, @@ -77,7 +77,7 @@ func TestBackupImport_WithDocAlreadyExists_ReturnError(t *testing.T) { Doc: `{"name": "John", "age": 30}`, }, testUtils.BackupImport{ - ImportContent: `{"User":[{"_docID":"bae-e933420a-988a-56f8-8952-6c245aebd519","_docIDNew":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`, + ImportContent: `{"User":[{"_docID":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","_docIDNew":"bae-7fca96a2-5f01-5558-a81f-09b47587f26d","age":30,"name":"John"}]}`, ExpectedError: "a document with the given ID already exists", }, }, @@ -133,16 +133,16 @@ func TestBackupImport_WithMultipleNoKeys_NoError(t *testing.T) { }`, Results: []map[string]any{ { - "name": "Smith", - "age": int64(31), + "name": "John", + "age": int64(30), }, { "name": "Bob", "age": int64(32), }, { - "name": "John", - "age": int64(30), + "name": "Smith", + "age": int64(31), }, }, }, diff --git a/tests/integration/collection/update/simple/utils.go b/tests/integration/collection/update/simple/utils.go index c5e73f7e97..91cbe700f7 100644 --- a/tests/integration/collection/update/simple/utils.go +++ b/tests/integration/collection/update/simple/utils.go @@ -34,6 +34,9 @@ func init() { if err != nil { panic(err) } + u := c["Users"] + u.Schema.Root = "bafkreiclkqkxhq3xu3sz5fqcixykk2qfpva5asj3elcaqyxscax66ok4za" + c["Users"] = u colDefMap = c } diff --git a/tests/integration/events/simple/with_create_txn_test.go b/tests/integration/events/simple/with_create_txn_test.go index f77a6fe1f6..c60c47bd34 100644 --- a/tests/integration/events/simple/with_create_txn_test.go +++ b/tests/integration/events/simple/with_create_txn_test.go @@ -59,7 +59,7 @@ func TestEventsSimpleWithCreateWithTxnDiscarded(t *testing.T) { }, ExpectedUpdates: []testUtils.ExpectedUpdate{ { - DocID: immutable.Some("bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad"), + DocID: immutable.Some("bae-6845cfdf-cb0f-56a3-be3a-b5a67be5fbdc"), }, // No event should be received for Shahzad, as the transaction was discarded. }, diff --git a/tests/integration/explain/debug/dagscan_test.go b/tests/integration/explain/debug/dagscan_test.go index 010f866dd7..8da73b747f 100644 --- a/tests/integration/explain/debug/dagscan_test.go +++ b/tests/integration/explain/debug/dagscan_test.go @@ -38,7 +38,7 @@ func TestDebugExplainCommitsDagScanQueryOp(t *testing.T) { testUtils.ExplainRequest{ Request: `query @explain(type: debug) { - commits (docID: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", fieldId: "1") { + commits (docID: "bae-7aabc9d2-fbbc-5911-b0d0-b49a2a1d0e84", fieldId: "1") { links { cid } @@ -64,7 +64,7 @@ func TestDebugExplainCommitsDagScanQueryOpWithoutField(t *testing.T) { testUtils.ExplainRequest{ Request: `query @explain(type: debug) { - commits (docID: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3") { + commits (docID: "bae-7aabc9d2-fbbc-5911-b0d0-b49a2a1d0e84") { links { cid } @@ -90,7 +90,7 @@ func TestDebugExplainLatestCommitsDagScanQueryOp(t *testing.T) { testUtils.ExplainRequest{ Request: `query @explain(type: debug) { - latestCommits(docID: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", fieldId: "1") { + latestCommits(docID: "bae-7aabc9d2-fbbc-5911-b0d0-b49a2a1d0e84", fieldId: "1") { cid links { cid @@ -117,7 +117,7 @@ func TestDebugExplainLatestCommitsDagScanQueryOpWithoutField(t *testing.T) { testUtils.ExplainRequest{ Request: `query @explain(type: debug) { - latestCommits(docID: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3") { + latestCommits(docID: "bae-7aabc9d2-fbbc-5911-b0d0-b49a2a1d0e84") { cid links { cid diff --git a/tests/integration/explain/default/dagscan_test.go b/tests/integration/explain/default/dagscan_test.go index c18f365f82..321d3c3251 100644 --- a/tests/integration/explain/default/dagscan_test.go +++ b/tests/integration/explain/default/dagscan_test.go @@ -38,7 +38,7 @@ func TestDefaultExplainCommitsDagScanQueryOp(t *testing.T) { testUtils.ExplainRequest{ Request: `query @explain { - commits (docID: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", fieldId: "1") { + commits (docID: "bae-7aabc9d2-fbbc-5911-b0d0-b49a2a1d0e84", fieldId: "1") { links { cid } @@ -56,8 +56,8 @@ func TestDefaultExplainCommitsDagScanQueryOp(t *testing.T) { "fieldId": "1", "spans": []dataMap{ { - "start": "/bae-41598f0c-19bc-5da6-813b-e80f14a10df3/1", - "end": "/bae-41598f0c-19bc-5da6-813b-e80f14a10df3/2", + "start": "/bae-7aabc9d2-fbbc-5911-b0d0-b49a2a1d0e84/1", + "end": "/bae-7aabc9d2-fbbc-5911-b0d0-b49a2a1d0e84/2", }, }, }, @@ -81,7 +81,7 @@ func TestDefaultExplainCommitsDagScanQueryOpWithoutField(t *testing.T) { testUtils.ExplainRequest{ Request: `query @explain { - commits (docID: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3") { + commits (docID: "bae-7aabc9d2-fbbc-5911-b0d0-b49a2a1d0e84") { links { cid } @@ -99,8 +99,8 @@ func TestDefaultExplainCommitsDagScanQueryOpWithoutField(t *testing.T) { "fieldId": nil, "spans": []dataMap{ { - "start": "/bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "end": "/bae-41598f0c-19bc-5da6-813b-e80f14a10df4", + "start": "/bae-7aabc9d2-fbbc-5911-b0d0-b49a2a1d0e84", + "end": "/bae-7aabc9d2-fbbc-5911-b0d0-b49a2a1d0e85", }, }, }, @@ -124,7 +124,7 @@ func TestDefaultExplainLatestCommitsDagScanQueryOp(t *testing.T) { testUtils.ExplainRequest{ Request: `query @explain { - latestCommits(docID: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", fieldId: "1") { + latestCommits(docID: "bae-7aabc9d2-fbbc-5911-b0d0-b49a2a1d0e84", fieldId: "1") { cid links { cid @@ -143,8 +143,8 @@ func TestDefaultExplainLatestCommitsDagScanQueryOp(t *testing.T) { "fieldId": "1", "spans": []dataMap{ { - "start": "/bae-41598f0c-19bc-5da6-813b-e80f14a10df3/1", - "end": "/bae-41598f0c-19bc-5da6-813b-e80f14a10df3/2", + "start": "/bae-7aabc9d2-fbbc-5911-b0d0-b49a2a1d0e84/1", + "end": "/bae-7aabc9d2-fbbc-5911-b0d0-b49a2a1d0e84/2", }, }, }, @@ -168,7 +168,7 @@ func TestDefaultExplainLatestCommitsDagScanQueryOpWithoutField(t *testing.T) { testUtils.ExplainRequest{ Request: `query @explain { - latestCommits(docID: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3") { + latestCommits(docID: "bae-7aabc9d2-fbbc-5911-b0d0-b49a2a1d0e84") { cid links { cid @@ -187,8 +187,8 @@ func TestDefaultExplainLatestCommitsDagScanQueryOpWithoutField(t *testing.T) { "fieldId": "C", "spans": []dataMap{ { - "start": "/bae-41598f0c-19bc-5da6-813b-e80f14a10df3/C", - "end": "/bae-41598f0c-19bc-5da6-813b-e80f14a10df3/D", + "start": "/bae-7aabc9d2-fbbc-5911-b0d0-b49a2a1d0e84/C", + "end": "/bae-7aabc9d2-fbbc-5911-b0d0-b49a2a1d0e84/D", }, }, }, diff --git a/tests/integration/explain/execute/dagscan_test.go b/tests/integration/explain/execute/dagscan_test.go index 3edc6e71f0..524e2a1204 100644 --- a/tests/integration/explain/execute/dagscan_test.go +++ b/tests/integration/explain/execute/dagscan_test.go @@ -30,7 +30,7 @@ func TestExecuteExplainCommitsDagScan(t *testing.T) { testUtils.ExplainRequest{ Request: `query @explain(type: execute) { - commits (docID: "bae-7f54d9e0-cbde-5320-aa6c-5c8895a89138") { + commits (docID: "bae-dcdbc1dc-8428-592d-ad9d-ca0f1430e1bf") { links { cid } @@ -75,7 +75,7 @@ func TestExecuteExplainLatestCommitsDagScan(t *testing.T) { testUtils.ExplainRequest{ Request: `query @explain(type: execute) { - latestCommits(docID: "bae-7f54d9e0-cbde-5320-aa6c-5c8895a89138") { + latestCommits(docID: "bae-dcdbc1dc-8428-592d-ad9d-ca0f1430e1bf") { cid links { cid diff --git a/tests/integration/explain/execute/delete_test.go b/tests/integration/explain/execute/delete_test.go index 75aa515c1a..99edd857a3 100644 --- a/tests/integration/explain/execute/delete_test.go +++ b/tests/integration/explain/execute/delete_test.go @@ -30,7 +30,7 @@ func TestExecuteExplainMutationRequestWithDeleteUsingID(t *testing.T) { testUtils.ExplainRequest{ Request: `mutation @explain(type: execute) { - delete_ContactAddress(docIDs: ["bae-f01bf83f-1507-5fb5-a6a3-09ecffa3c692"]) { + delete_ContactAddress(docIDs: ["bae-49f715e7-7f01-5509-a213-ed98cb81583f"]) { city } }`, diff --git a/tests/integration/explain/execute/fixture.go b/tests/integration/explain/execute/fixture.go index 7de5e6a959..17a172552f 100644 --- a/tests/integration/explain/execute/fixture.go +++ b/tests/integration/explain/execute/fixture.go @@ -20,28 +20,27 @@ func create3ArticleDocuments() []testUtils.CreateDoc { return []testUtils.CreateDoc{ { CollectionID: 0, - Doc: `{ - - "name": "After Guantánamo, Another Injustice", - "pages": 100, - "author_id": "bae-7f54d9e0-cbde-5320-aa6c-5c8895a89138" - }`, + DocMap: map[string]any{ + "name": "After Guantánamo, Another Injustice", + "pages": 100, + "author_id": testUtils.NewDocIndex(2, 0), + }, }, { CollectionID: 0, - Doc: `{ - "name": "To my dear readers", - "pages": 200, - "author_id": "bae-68cb395d-df73-5bcb-b623-615a140dee12" - }`, + DocMap: map[string]any{ + "name": "To my dear readers", + "pages": 200, + "author_id": testUtils.NewDocIndex(2, 1), + }, }, { CollectionID: 0, - Doc: `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "pages": 300, - "author_id": "bae-68cb395d-df73-5bcb-b623-615a140dee12" - }`, + DocMap: map[string]any{ + "name": "Twinklestar's Favourite Xmas Cookie", + "pages": 300, + "author_id": testUtils.NewDocIndex(2, 1), + }, }, } } @@ -50,29 +49,29 @@ func create3BookDocuments() []testUtils.CreateDoc { return []testUtils.CreateDoc{ { CollectionID: 1, - Doc: `{ - "name": "Painted House", - "pages": 78, - "chapterPages": [1, 22, 33, 44, 55, 66], - "author_id": "bae-7f54d9e0-cbde-5320-aa6c-5c8895a89138" - }`, + DocMap: map[string]any{ + "name": "Painted House", + "pages": 78, + "chapterPages": []int64{1, 22, 33, 44, 55, 66}, + "author_id": testUtils.NewDocIndex(2, 0), + }, }, { CollectionID: 1, - Doc: `{ - "name": "A Time for Mercy", - "pages": 333, - "chapterPages": [0, 22, 101, 321], - "author_id": "bae-7f54d9e0-cbde-5320-aa6c-5c8895a89138" - }`, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "pages": 333, + "chapterPages": []int64{0, 22, 101, 321}, + "author_id": testUtils.NewDocIndex(2, 0), + }, }, { CollectionID: 1, - Doc: `{ - "name": "Theif Lord", - "pages": 20, - "author_id": "bae-68cb395d-df73-5bcb-b623-615a140dee12" - }`, + DocMap: map[string]any{ + "name": "Theif Lord", + "pages": 20, + "author_id": testUtils.NewDocIndex(2, 1), + }, }, } } @@ -86,7 +85,7 @@ func create2AuthorDocuments() []testUtils.CreateDoc { "name": "John Grisham", "age": 65, "verified": true, - "contact_id": "bae-4db5359b-7dbe-5778-b96f-d71d1e6d0871" + "contact_id": "bae-819c9c03-9d49-5fd5-aaee-0dc5a70bbe44" }`, }, { @@ -96,7 +95,7 @@ func create2AuthorDocuments() []testUtils.CreateDoc { "name": "Cornelia Funke", "age": 62, "verified": false, - "contact_id": "bae-1f19fc5d-de4d-59a5-bbde-492be1757d65" + "contact_id": "bae-9bf0272a-c521-5bef-a7ba-642e8be6e433" }`, }, } @@ -107,21 +106,21 @@ func create2AuthorContactDocuments() []testUtils.CreateDoc { { CollectionID: 3, // "author_id": "bae-7f54d9e0-cbde-5320-aa6c-5c8895a89138" - // _docID: "bae-4db5359b-7dbe-5778-b96f-d71d1e6d0871" + // _docID: "bae-819c9c03-9d49-5fd5-aaee-0dc5a70bbe44" Doc: `{ "cell": "5197212301", "email": "john_grisham@example.com", - "address_id": "bae-c8448e47-6cd1-571f-90bd-364acb80da7b" + "address_id": "bae-14f20db7-3654-58de-9156-596ef2cfd790" }`, }, { CollectionID: 3, // "author_id": "bae-68cb395d-df73-5bcb-b623-615a140dee12", - // _docID: "bae-1f19fc5d-de4d-59a5-bbde-492be1757d65" + // _docID: "bae-9bf0272a-c521-5bef-a7ba-642e8be6e433" Doc: `{ "cell": "5197212302", "email": "cornelia_funke@example.com", - "address_id": "bae-f01bf83f-1507-5fb5-a6a3-09ecffa3c692" + "address_id": "bae-49f715e7-7f01-5509-a213-ed98cb81583f" }`, }, } @@ -131,8 +130,7 @@ func create2AddressDocuments() []testUtils.CreateDoc { return []testUtils.CreateDoc{ { CollectionID: 4, - // "contact_id": "bae-4db5359b-7dbe-5778-b96f-d71d1e6d0871" - // _docID: bae-c8448e47-6cd1-571f-90bd-364acb80da7b + // _docID: bae-14f20db7-3654-58de-9156-596ef2cfd790 Doc: `{ "city": "Waterloo", "country": "Canada" @@ -140,8 +138,7 @@ func create2AddressDocuments() []testUtils.CreateDoc { }, { CollectionID: 4, - // "contact_id": ""bae-1f19fc5d-de4d-59a5-bbde-492be1757d65"" - // _docID: bae-f01bf83f-1507-5fb5-a6a3-09ecffa3c692 + // _docID: bae-49f715e7-7f01-5509-a213-ed98cb81583f Doc: `{ "city": "Brampton", "country": "Canada" diff --git a/tests/integration/explain/execute/query_deleted_docs_test.go b/tests/integration/explain/execute/query_deleted_docs_test.go index 7872eb4847..77a3b3708a 100644 --- a/tests/integration/explain/execute/query_deleted_docs_test.go +++ b/tests/integration/explain/execute/query_deleted_docs_test.go @@ -26,12 +26,12 @@ func TestExecuteExplainQueryDeletedDocs(t *testing.T) { create2AddressDocuments(), testUtils.Request{ Request: `mutation { - delete_ContactAddress(docIDs: ["bae-f01bf83f-1507-5fb5-a6a3-09ecffa3c692"]) { + delete_ContactAddress(docIDs: ["bae-49f715e7-7f01-5509-a213-ed98cb81583f"]) { _docID } }`, Results: []map[string]any{ - {"_docID": "bae-f01bf83f-1507-5fb5-a6a3-09ecffa3c692"}, + {"_docID": "bae-49f715e7-7f01-5509-a213-ed98cb81583f"}, }, }, testUtils.ExplainRequest{ diff --git a/tests/integration/explain/execute/type_join_test.go b/tests/integration/explain/execute/type_join_test.go index eb1e187485..ab89890f02 100644 --- a/tests/integration/explain/execute/type_join_test.go +++ b/tests/integration/explain/execute/type_join_test.go @@ -170,14 +170,9 @@ func TestExecuteExplainWithTwoLevelDeepNestedJoins(t *testing.T) { Actions: []any{ explainUtils.SchemaForExplainTests, - // Authors - create2AuthorDocuments(), - - // Contacts - create2AuthorContactDocuments(), - - // Addresses create2AddressDocuments(), + create2AuthorContactDocuments(), + create2AuthorDocuments(), testUtils.ExplainRequest{ Request: `query @explain(type: execute) { diff --git a/tests/integration/explain/execute/update_test.go b/tests/integration/explain/execute/update_test.go index 4f7a262136..0eaf8cbb22 100644 --- a/tests/integration/explain/execute/update_test.go +++ b/tests/integration/explain/execute/update_test.go @@ -32,8 +32,8 @@ func TestExecuteExplainMutationRequestWithUpdateUsingIDs(t *testing.T) { Request: `mutation @explain(type: execute) { update_ContactAddress( docIDs: [ - "bae-c8448e47-6cd1-571f-90bd-364acb80da7b", - "bae-f01bf83f-1507-5fb5-a6a3-09ecffa3c692" + "bae-14f20db7-3654-58de-9156-596ef2cfd790", + "bae-49f715e7-7f01-5509-a213-ed98cb81583f" ], input: {country: "USA"} ) { diff --git a/tests/integration/explain/execute/with_average_test.go b/tests/integration/explain/execute/with_average_test.go index 9e906c475d..33a238eb68 100644 --- a/tests/integration/explain/execute/with_average_test.go +++ b/tests/integration/explain/execute/with_average_test.go @@ -24,8 +24,7 @@ func TestExecuteExplainAverageRequestOnArrayField(t *testing.T) { Actions: []any{ explainUtils.SchemaForExplainTests, - - // Books + create2AuthorDocuments(), create3BookDocuments(), testUtils.ExplainRequest{ @@ -80,12 +79,8 @@ func TestExplainExplainAverageRequestOnJoinedField(t *testing.T) { Actions: []any{ explainUtils.SchemaForExplainTests, - - // Books - create3BookDocuments(), - - // Authors create2AuthorDocuments(), + create3BookDocuments(), testUtils.ExplainRequest{ Request: `query @explain(type: execute) { diff --git a/tests/integration/explain/execute/with_count_test.go b/tests/integration/explain/execute/with_count_test.go index 43ff3d13df..23858e49d2 100644 --- a/tests/integration/explain/execute/with_count_test.go +++ b/tests/integration/explain/execute/with_count_test.go @@ -25,11 +25,8 @@ func TestExecuteExplainRequestWithCountOnOneToManyRelation(t *testing.T) { Actions: []any{ explainUtils.SchemaForExplainTests, - // Books - create3BookDocuments(), - - // Authors create2AuthorDocuments(), + create3BookDocuments(), testUtils.ExplainRequest{ Request: `query @explain(type: execute) { diff --git a/tests/integration/explain/execute/with_limit_test.go b/tests/integration/explain/execute/with_limit_test.go index 88a1666ca3..cc703c9082 100644 --- a/tests/integration/explain/execute/with_limit_test.go +++ b/tests/integration/explain/execute/with_limit_test.go @@ -24,8 +24,7 @@ func TestExecuteExplainRequestWithBothLimitAndOffsetOnParent(t *testing.T) { Actions: []any{ explainUtils.SchemaForExplainTests, - - // Books + create2AuthorDocuments(), create3BookDocuments(), testUtils.ExplainRequest{ @@ -74,11 +73,8 @@ func TestExecuteExplainRequestWithBothLimitAndOffsetOnParentAndLimitOnChild(t *t Actions: []any{ explainUtils.SchemaForExplainTests, - // Articles - create3ArticleDocuments(), - - // Authors create2AuthorDocuments(), + create3ArticleDocuments(), testUtils.ExplainRequest{ Request: `query @explain(type: execute) { @@ -112,8 +108,8 @@ func TestExecuteExplainRequestWithBothLimitAndOffsetOnParentAndLimitOnChild(t *t }, "subTypeScanNode": dataMap{ "iterations": uint64(2), - "docFetches": uint64(4), - "fieldFetches": uint64(6), + "docFetches": uint64(3), + "fieldFetches": uint64(5), "indexFetches": uint64(0), }, }, diff --git a/tests/integration/explain/execute/with_order_test.go b/tests/integration/explain/execute/with_order_test.go index 9155523b20..a2441b9cd5 100644 --- a/tests/integration/explain/execute/with_order_test.go +++ b/tests/integration/explain/execute/with_order_test.go @@ -159,11 +159,8 @@ func TestExecuteExplainRequestWithOrderFieldOnChild(t *testing.T) { Actions: []any{ explainUtils.SchemaForExplainTests, - // Articles - create3ArticleDocuments(), - - // Authors create2AuthorDocuments(), + create3ArticleDocuments(), testUtils.ExplainRequest{ Request: `query @explain(type: execute) { @@ -220,11 +217,8 @@ func TestExecuteExplainRequestWithOrderFieldOnBothParentAndChild(t *testing.T) { Actions: []any{ explainUtils.SchemaForExplainTests, - // Articles - create3ArticleDocuments(), - - // Authors create2AuthorDocuments(), + create3ArticleDocuments(), testUtils.ExplainRequest{ Request: `query @explain(type: execute) { @@ -285,11 +279,8 @@ func TestExecuteExplainRequestWhereParentFieldIsOrderedByChildField(t *testing.T Actions: []any{ explainUtils.SchemaForExplainTests, - // Articles - create3ArticleDocuments(), - - // Authors create2AuthorDocuments(), + create3ArticleDocuments(), testUtils.ExplainRequest{ Request: `query @explain(type: execute) { diff --git a/tests/integration/explain/execute/with_sum_test.go b/tests/integration/explain/execute/with_sum_test.go index c37e3d0309..e9e3462045 100644 --- a/tests/integration/explain/execute/with_sum_test.go +++ b/tests/integration/explain/execute/with_sum_test.go @@ -24,8 +24,7 @@ func TestExecuteExplainRequestWithSumOfInlineArrayField(t *testing.T) { Actions: []any{ explainUtils.SchemaForExplainTests, - - // Books + create2AuthorDocuments(), create3BookDocuments(), testUtils.ExplainRequest{ @@ -74,12 +73,8 @@ func TestExecuteExplainRequestSumOfRelatedOneToManyField(t *testing.T) { Actions: []any{ explainUtils.SchemaForExplainTests, - - // Articles - create3ArticleDocuments(), - - // Authors create2AuthorDocuments(), + create3ArticleDocuments(), testUtils.ExplainRequest{ Request: `query @explain(type: execute) { diff --git a/tests/integration/index/create_drop_test.go b/tests/integration/index/create_drop_test.go index 0680ea7aed..a9e55f0322 100644 --- a/tests/integration/index/create_drop_test.go +++ b/tests/integration/index/create_drop_test.go @@ -30,7 +30,7 @@ func TestIndexDrop_ShouldNotHinderQuerying(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-52b9170d-b77a-5887-b877-cbdbb99b009f + // bae-d4303725-7db9-53d2-b324-f3ee44020e52 Doc: ` { "name": "John", diff --git a/tests/integration/index/create_test.go b/tests/integration/index/create_test.go index d37dfd07f7..9afb8ea333 100644 --- a/tests/integration/index/create_test.go +++ b/tests/integration/index/create_test.go @@ -31,7 +31,7 @@ func TestIndexCreateWithCollection_ShouldNotHinderQuerying(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-52b9170d-b77a-5887-b877-cbdbb99b009f + // bae-d4303725-7db9-53d2-b324-f3ee44020e52 Doc: ` { "name": "John", @@ -73,7 +73,7 @@ func TestIndexCreate_ShouldNotHinderQuerying(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-52b9170d-b77a-5887-b877-cbdbb99b009f + // bae-d4303725-7db9-53d2-b324-f3ee44020e52 Doc: ` { "name": "John", diff --git a/tests/integration/index/create_unique_composite_test.go b/tests/integration/index/create_unique_composite_test.go index f115668068..9adb6d2e67 100644 --- a/tests/integration/index/create_unique_composite_test.go +++ b/tests/integration/index/create_unique_composite_test.go @@ -55,7 +55,7 @@ func TestCreateUniqueCompositeIndex_IfFieldValuesAreNotUnique_ReturnError(t *tes Fields: []testUtils.IndexedField{{Name: "name"}, {Name: "age"}}, Unique: true, ExpectedError: db.NewErrCanNotIndexNonUniqueFields( - "bae-cae3deac-d371-5a1f-93b4-ede69042f79b", + "bae-c20024f0-bd72-56c2-85d5-865d3aa270b7", errors.NewKV("name", "John"), errors.NewKV("age", 21), ).Error(), }, @@ -100,7 +100,7 @@ func TestUniqueCompositeIndexCreate_UponAddingDocWithExistingFieldValue_ReturnEr "email": "another@gmail.com" }`, ExpectedError: db.NewErrCanNotIndexNonUniqueFields( - "bae-13254430-7e9e-52e2-9861-9a7ec7a75c8d", + "bae-4da27b71-f735-59f6-b6b8-ea0fa181e3e3", errors.NewKV("name", "John"), errors.NewKV("age", 21)).Error(), }, }, diff --git a/tests/integration/index/create_unique_test.go b/tests/integration/index/create_unique_test.go index 3d723f5d55..36488ecaab 100644 --- a/tests/integration/index/create_unique_test.go +++ b/tests/integration/index/create_unique_test.go @@ -19,7 +19,7 @@ import ( testUtils "github.com/sourcenetwork/defradb/tests/integration" ) -const johnDocID = "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7" +const johnDocID = "bae-774fbeea-813b-52c8-82b0-d08515a075d7" func TestCreateUniqueIndex_IfFieldValuesAreNotUnique_ReturnError(t *testing.T) { test := testUtils.TestCase{ diff --git a/tests/integration/index/drop_test.go b/tests/integration/index/drop_test.go index 96e136c332..4639bfa756 100644 --- a/tests/integration/index/drop_test.go +++ b/tests/integration/index/drop_test.go @@ -30,7 +30,7 @@ func TestIndexDrop_IfIndexDoesNotExist_ReturnError(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-52b9170d-b77a-5887-b877-cbdbb99b009f + // bae-d4303725-7db9-53d2-b324-f3ee44020e52 Doc: ` { "name": "John", diff --git a/tests/integration/index/query_with_composite_index_only_filter_test.go b/tests/integration/index/query_with_composite_index_only_filter_test.go index bf7e8b17c3..ccb2a43e66 100644 --- a/tests/integration/index/query_with_composite_index_only_filter_test.go +++ b/tests/integration/index/query_with_composite_index_only_filter_test.go @@ -909,8 +909,8 @@ func TestQueryWithCompositeIndex_IfConsecutiveEqOps_ShouldUseAllToOptimizeQuery( Request: reqWithName, Results: []map[string]any{ {"about": "bob3"}, - {"about": "bob1"}, {"about": "bob2"}, + {"about": "bob1"}, {"about": "bob4"}, }, }, @@ -922,8 +922,8 @@ func TestQueryWithCompositeIndex_IfConsecutiveEqOps_ShouldUseAllToOptimizeQuery( Request: reqWithNameAge, Results: []map[string]any{ {"about": "bob3"}, - {"about": "bob1"}, {"about": "bob2"}, + {"about": "bob1"}, }, }, testUtils.Request{ @@ -933,8 +933,8 @@ func TestQueryWithCompositeIndex_IfConsecutiveEqOps_ShouldUseAllToOptimizeQuery( testUtils.Request{ Request: reqWithNameAgeNumChildren, Results: []map[string]any{ - {"about": "bob1"}, {"about": "bob2"}, + {"about": "bob1"}, }, }, testUtils.Request{ diff --git a/tests/integration/index/query_with_compound_filter_relation_test.go b/tests/integration/index/query_with_compound_filter_relation_test.go index 31667d8bc7..8f74411536 100644 --- a/tests/integration/index/query_with_compound_filter_relation_test.go +++ b/tests/integration/index/query_with_compound_filter_relation_test.go @@ -189,10 +189,10 @@ func TestIndex_QueryWithIndexOnOneToManyRelationAndFilter_Data(t *testing.T) { }`, Results: []map[string]any{ { - "name": "DefraDB", + "name": "LensVM", }, { - "name": "LensVM", + "name": "DefraDB", }, }, }, @@ -274,10 +274,10 @@ func TestIndex_QueryWithIndexOnOneToManyRelationOrFilter_Data(t *testing.T) { "name": "Zanzi", }, { - "name": "DefraDB", + "name": "LensVM", }, { - "name": "LensVM", + "name": "DefraDB", }, }, }, @@ -348,10 +348,10 @@ func TestIndex_QueryWithIndexOnOneToManyRelationNotFilter_Data(t *testing.T) { }`, Results: []map[string]any{ { - "name": "Horizon", + "name": "Zanzi", }, { - "name": "Zanzi", + "name": "Horizon", }, }, }, diff --git a/tests/integration/index/query_with_relation_filter_test.go b/tests/integration/index/query_with_relation_filter_test.go index 94160a5e3c..ef4ec989ca 100644 --- a/tests/integration/index/query_with_relation_filter_test.go +++ b/tests/integration/index/query_with_relation_filter_test.go @@ -194,8 +194,8 @@ func TestQueryWithIndexOnOneToOnesSecondaryRelation_IfFilterOnIndexedRelation_Sh Request: req2, Results: []map[string]any{ {"name": "Shahzad"}, - {"name": "Fred"}, {"name": "John"}, + {"name": "Fred"}, }, }, testUtils.Request{ @@ -261,9 +261,9 @@ func TestQueryWithIndexOnOneToOnePrimaryRelation_IfFilterOnIndexedFieldOfRelatio testUtils.Request{ Request: req2, Results: []map[string]any{ + {"name": "Shahzad"}, {"name": "John"}, {"name": "Fred"}, - {"name": "Shahzad"}, }, }, testUtils.Request{ @@ -332,9 +332,9 @@ func TestQueryWithIndexOnOneToOnePrimaryRelation_IfFilterOnIndexedFieldOfRelatio testUtils.Request{ Request: req2, Results: []map[string]any{ + {"name": "Shahzad"}, {"name": "John"}, {"name": "Fred"}, - {"name": "Shahzad"}, }, }, testUtils.Request{ @@ -427,27 +427,27 @@ func TestQueryWithIndexOnOneToMany_IfFilterOnIndexedRelation_ShouldFilter(t *tes }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "model": "Walkman", + DocMap: map[string]any{ + "model": "Walkman", "manufacturer": "Sony", - "owner": "bae-403d7337-f73e-5c81-8719-e853938c8985" - }`, + "owner": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "model": "Walkman", + DocMap: map[string]any{ + "model": "Walkman", "manufacturer": "The Proclaimers", - "owner": "bae-403d7337-f73e-5c81-8719-e853938c8985" - }`, + "owner": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "model": "Running Man", + DocMap: map[string]any{ + "model": "Running Man", "manufacturer": "Braveworld Productions", - "owner": "bae-403d7337-f73e-5c81-8719-e853938c8985" - }`, + "owner": testUtils.NewDocIndex(0, 0), + }, }, testUtils.Request{ Request: `query { @@ -467,11 +467,11 @@ func TestQueryWithIndexOnOneToMany_IfFilterOnIndexedRelation_ShouldFilter(t *tes "devices": []map[string]any{ { "model": "Walkman", - "manufacturer": "Sony", + "manufacturer": "The Proclaimers", }, { "model": "Walkman", - "manufacturer": "The Proclaimers", + "manufacturer": "Sony", }, // The filter is on User, so all devices belonging to it will be returned { @@ -525,27 +525,27 @@ func TestQueryWithIndexOnOneToMany_IfFilterOnIndexedRelation_ShouldFilterWithExp }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "model": "Walkman", + DocMap: map[string]any{ + "model": "Walkman", "manufacturer": "Sony", - "owner": "bae-403d7337-f73e-5c81-8719-e853938c8985" - }`, + "owner": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "model": "Walkman", + DocMap: map[string]any{ + "model": "Walkman", "manufacturer": "The Proclaimers", - "owner": "bae-403d7337-f73e-5c81-8719-e853938c8985" - }`, + "owner": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "model": "Running Man", + DocMap: map[string]any{ + "model": "Running Man", "manufacturer": "Braveworld Productions", - "owner": "bae-403d7337-f73e-5c81-8719-e853938c8985" - }`, + "owner": testUtils.NewDocIndex(0, 0), + }, }, testUtils.Request{ Request: req, @@ -555,11 +555,11 @@ func TestQueryWithIndexOnOneToMany_IfFilterOnIndexedRelation_ShouldFilterWithExp "devices": []map[string]any{ { "model": "Walkman", - "manufacturer": "Sony", + "manufacturer": "The Proclaimers", }, { "model": "Walkman", - "manufacturer": "The Proclaimers", + "manufacturer": "Sony", }, { "model": "Running Man", @@ -670,19 +670,19 @@ func TestQueryWithIndexOnManyToOne_IfFilterOnIndexedField_ShouldFilterWithExplai { "model": "Playstation 5", "owner": map[string]any{ - "name": "Islam", + "name": "Addo", }, }, { - "model": "Playstation 5", + "model": "iPhone 10", "owner": map[string]any{ "name": "Addo", }, }, { - "model": "iPhone 10", + "model": "Playstation 5", "owner": map[string]any{ - "name": "Addo", + "name": "Islam", }, }, }, @@ -732,9 +732,9 @@ func TestQueryWithIndexOnManyToOne_IfFilterOnIndexedRelation_ShouldFilterWithExp testUtils.Request{ Request: req, Results: []map[string]any{ - {"model": "iPhone 13"}, - {"model": "iPad Mini"}, {"model": "MacBook Pro"}, + {"model": "iPad Mini"}, + {"model": "iPhone 13"}, }, }, testUtils.Request{ @@ -783,19 +783,19 @@ func TestQueryWithIndexOnOneToMany_IfIndexedRelationIsNil_NeNilFilterShouldUseIn }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "model": "Walkman", + DocMap: map[string]any{ + "model": "Walkman", "manufacturer": "Sony", - "owner": "bae-403d7337-f73e-5c81-8719-e853938c8985" - }`, + "owner": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "model": "iPhone", + DocMap: map[string]any{ + "model": "iPhone", "manufacturer": "Apple", - "owner": "bae-403d7337-f73e-5c81-8719-e853938c8985" - }`, + "owner": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, @@ -814,8 +814,8 @@ func TestQueryWithIndexOnOneToMany_IfIndexedRelationIsNil_NeNilFilterShouldUseIn testUtils.Request{ Request: req, Results: []map[string]any{ - {"model": "iPhone"}, {"model": "Walkman"}, + {"model": "iPhone"}, }, }, testUtils.Request{ @@ -867,7 +867,7 @@ func TestQueryWithIndexOnOneToMany_IfIndexedRelationIsNil_EqNilFilterShouldUseIn Doc: `{ "model": "Walkman", "manufacturer": "Sony", - "owner": "bae-403d7337-f73e-5c81-8719-e853938c8985" + "owner": "bae-5622129c-b893-5768-a3f4-8f745db4cc04" }`, }, testUtils.CreateDoc{ @@ -875,7 +875,7 @@ func TestQueryWithIndexOnOneToMany_IfIndexedRelationIsNil_EqNilFilterShouldUseIn Doc: `{ "model": "iPhone", "manufacturer": "Apple", - "owner": "bae-403d7337-f73e-5c81-8719-e853938c8985" + "owner": "bae-5622129c-b893-5768-a3f4-8f745db4cc04" }`, }, testUtils.CreateDoc{ @@ -895,8 +895,8 @@ func TestQueryWithIndexOnOneToMany_IfIndexedRelationIsNil_EqNilFilterShouldUseIn testUtils.Request{ Request: req, Results: []map[string]any{ - {"model": "Running Man"}, {"model": "PlayStation 5"}, + {"model": "Running Man"}, }, }, testUtils.Request{ diff --git a/tests/integration/index/query_with_unique_composite_index_filter_test.go b/tests/integration/index/query_with_unique_composite_index_filter_test.go index 52712bc181..26f35a0165 100644 --- a/tests/integration/index/query_with_unique_composite_index_filter_test.go +++ b/tests/integration/index/query_with_unique_composite_index_filter_test.go @@ -1037,8 +1037,8 @@ func TestQueryWithUniqueCompositeIndex_WithMultipleNilOnSecondFieldsAndNilFilter testUtils.Request{ Request: req, Results: []map[string]any{ - {"name": "Bob", "age": nil, "email": "bob2@gmail.com"}, {"name": "Bob", "age": nil, "email": "bob1@gmail.com"}, + {"name": "Bob", "age": nil, "email": "bob2@gmail.com"}, }, }, testUtils.Request{ @@ -1135,8 +1135,8 @@ func TestQueryWithUniqueCompositeIndex_WithMultipleNilOnBothFieldsAndNilFilter_S } }`, Results: []map[string]any{ - {"about": "nil_nil_2"}, {"about": "bob_nil"}, + {"about": "nil_nil_2"}, {"about": "nil_nil_1"}, }, }, @@ -1269,10 +1269,10 @@ func TestQueryWithUniqueCompositeIndex_AfterUpdateOnNilFields_ShouldFetch(t *tes } }`, Results: []map[string]any{ - {"about": "bob_nil -> nil_nil"}, - {"about": "nil_nil -> bob_nil"}, {"about": "bob_22 -> bob_nil"}, {"about": "nil_22 -> bob_nil"}, + {"about": "bob_nil -> nil_nil"}, + {"about": "nil_nil -> bob_nil"}, }, }, }, diff --git a/tests/integration/index/query_with_unique_index_only_filter_test.go b/tests/integration/index/query_with_unique_index_only_filter_test.go index 23563335d4..f21de4630c 100644 --- a/tests/integration/index/query_with_unique_index_only_filter_test.go +++ b/tests/integration/index/query_with_unique_index_only_filter_test.go @@ -727,8 +727,8 @@ func TestQueryWithUniqueIndex_WithMultipleNilValuesAndEqualFilter_ShouldFetch(t } }`, Results: []map[string]any{ - {"name": "Bob"}, {"name": "Alice"}, + {"name": "Bob"}, }, }, }, diff --git a/tests/integration/mutation/create/crdt/pcounter_test.go b/tests/integration/mutation/create/crdt/pcounter_test.go index 681ca2ec76..e54dba4ea6 100644 --- a/tests/integration/mutation/create/crdt/pcounter_test.go +++ b/tests/integration/mutation/create/crdt/pcounter_test.go @@ -44,7 +44,7 @@ func TestPCounterCreate_IntKindWithPositiveValue_NoError(t *testing.T) { }`, Results: []map[string]any{ { - "_docID": "bae-a688789e-d8a6-57a7-be09-22e005ab79e0", + "_docID": "bae-d8cb53d4-ac5a-5c55-8306-64df633d400d", "name": "John", "points": int64(10), }, diff --git a/tests/integration/mutation/create/crdt/pncounter_test.go b/tests/integration/mutation/create/crdt/pncounter_test.go index 2d445bff80..b8808ff40a 100644 --- a/tests/integration/mutation/create/crdt/pncounter_test.go +++ b/tests/integration/mutation/create/crdt/pncounter_test.go @@ -44,7 +44,7 @@ func TestPNCounterCreate_IntKindWithPositiveValue_NoError(t *testing.T) { }`, Results: []map[string]any{ { - "_docID": "bae-a688789e-d8a6-57a7-be09-22e005ab79e0", + "_docID": "bae-bc5464e4-26a6-5307-b516-aada0abeb089", "name": "John", "points": int64(10), }, diff --git a/tests/integration/mutation/create/field_kinds/field_kind_json_test.go b/tests/integration/mutation/create/field_kinds/field_kind_json_test.go index 69b1075464..8833a74b31 100644 --- a/tests/integration/mutation/create/field_kinds/field_kind_json_test.go +++ b/tests/integration/mutation/create/field_kinds/field_kind_json_test.go @@ -38,7 +38,7 @@ func TestMutationCreate_WithJSONFieldGivenValidJSON_NoError(t *testing.T) { }`, Results: []map[string]any{ { - "_docID": "bae-b2dff82c-ab26-5d06-a29a-02aa4807dde2", + "_docID": "bae-84ae4ef8-ca0c-5f32-bc85-cee97e731bc0", "custom": "{\"tree\":\"maple\",\"age\":250}", "name": "John", }, diff --git a/tests/integration/mutation/create/field_kinds/one_to_many/with_alias_test.go b/tests/integration/mutation/create/field_kinds/one_to_many/with_alias_test.go index 43275f8404..3bddea1a4a 100644 --- a/tests/integration/mutation/create/field_kinds/one_to_many/with_alias_test.go +++ b/tests/integration/mutation/create/field_kinds/one_to_many/with_alias_test.go @@ -31,7 +31,7 @@ func TestMutationCreateOneToMany_AliasedRelationNameWithInvalidField_Error(t *te testUtils.CreateDoc{ Doc: `{ "notName": "Painted House", - "author": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" + "author": "bae-be6d8024-4953-5a92-84b4-f042d25230c6" }`, ExpectedError: "The given field does not exist. Name: notName", }, @@ -54,7 +54,7 @@ func TestMutationCreateOneToMany_AliasedRelationNameNonExistingRelationSingleSid CollectionID: 0, Doc: `{ "name": "John Grisham", - "published": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" + "published": "bae-be6d8024-4953-5a92-84b4-f042d25230c6" }`, ExpectedError: "The given field does not exist. Name: published", }, @@ -73,7 +73,7 @@ func TestMutationCreateOneToMany_AliasedRelationNameNonExistingRelationManySide_ CollectionID: 0, Doc: `{ "name": "Painted House", - "author": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" + "author": "bae-be6d8024-4953-5a92-84b4-f042d25230c6" }`, }, testUtils.Request{ @@ -183,7 +183,7 @@ func TestMutationCreateOneToMany_AliasedRelationNameToLinkFromManySide(t *testin func TestMutationUpdateOneToMany_AliasRelationNameAndInternalIDBothProduceSameDocID(t *testing.T) { // These IDs MUST be shared by both tests below. - bookID := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" + bookID := "bae-e4888569-d423-58b7-94c5-5886e3cffe22" nonAliasedTest := testUtils.TestCase{ Description: "One to many update mutation using relation alias name from single side (wrong)", diff --git a/tests/integration/mutation/create/field_kinds/one_to_many/with_simple_test.go b/tests/integration/mutation/create/field_kinds/one_to_many/with_simple_test.go index 3d15e52323..56efed2216 100644 --- a/tests/integration/mutation/create/field_kinds/one_to_many/with_simple_test.go +++ b/tests/integration/mutation/create/field_kinds/one_to_many/with_simple_test.go @@ -31,7 +31,7 @@ func TestMutationCreateOneToMany_WithInvalidField_Error(t *testing.T) { testUtils.CreateDoc{ Doc: `{ "notName": "Painted House", - "author_id": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" + "author_id": "bae-be6d8024-4953-5a92-84b4-f042d25230c6" }`, ExpectedError: "The given field does not exist. Name: notName", }, @@ -54,7 +54,7 @@ func TestMutationCreateOneToMany_NonExistingRelationSingleSide_NoIDFieldError(t CollectionID: 0, Doc: `{ "name": "John Grisham", - "published_id": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" + "published_id": "bae-be6d8024-4953-5a92-84b4-f042d25230c6" }`, ExpectedError: "The given field does not exist. Name: published_id", }, @@ -73,7 +73,7 @@ func TestMutationCreateOneToMany_NonExistingRelationManySide_CreatedDoc(t *testi CollectionID: 0, Doc: `{ "name": "Painted House", - "author_id": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" + "author_id": "bae-be6d8024-4953-5a92-84b4-f042d25230c6" }`, }, testUtils.Request{ diff --git a/tests/integration/mutation/create/field_kinds/one_to_one/with_alias_test.go b/tests/integration/mutation/create/field_kinds/one_to_one/with_alias_test.go index 16da55ce78..ef263bd47e 100644 --- a/tests/integration/mutation/create/field_kinds/one_to_one/with_alias_test.go +++ b/tests/integration/mutation/create/field_kinds/one_to_one/with_alias_test.go @@ -11,7 +11,6 @@ package one_to_one import ( - "fmt" "testing" "github.com/sourcenetwork/immutable" @@ -33,7 +32,7 @@ func TestMutationCreateOneToOne_UseAliasWithInvalidField_Error(t *testing.T) { CollectionID: 1, Doc: `{ "notName": "John Grisham", - "published": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" + "published": "bae-be6d8024-4953-5a92-84b4-f042d25230c6" }`, ExpectedError: "The given field does not exist. Name: notName", }, @@ -52,7 +51,7 @@ func TestMutationCreateOneToOne_UseAliasWithNonExistingRelationPrimarySide_Creat CollectionID: 1, Doc: `{ "name": "John Grisham", - "published": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" + "published": "bae-be6d8024-4953-5a92-84b4-f042d25230c6" }`, }, testUtils.Request{ @@ -80,7 +79,7 @@ func TestMutationCreateOneToOne_UseAliasWithNonExistingRelationSecondarySide_Err CollectionID: 0, Doc: `{ "name": "Painted House", - "author": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" + "author": "bae-be6d8024-4953-5a92-84b4-f042d25230c6" }`, ExpectedError: "document not found or not authorized to access", }, @@ -90,8 +89,6 @@ func TestMutationCreateOneToOne_UseAliasWithNonExistingRelationSecondarySide_Err } func TestMutationCreateOneToOne_UseAliasedRelationNameToLink_QueryFromPrimarySide(t *testing.T) { - bookID := "bae-3d236f89-6a31-5add-a36a-27971a2eac76" - test := testUtils.TestCase{ Description: "One to one create mutation with an alias relation.", Actions: []any{ @@ -103,13 +100,10 @@ func TestMutationCreateOneToOne_UseAliasedRelationNameToLink_QueryFromPrimarySid }, testUtils.CreateDoc{ CollectionID: 1, - Doc: fmt.Sprintf( - `{ - "name": "John Grisham", - "published": "%s" - }`, - bookID, - ), + DocMap: map[string]any{ + "name": "John Grisham", + "published": testUtils.NewDocIndex(0, 0), + }, }, testUtils.Request{ Request: `query { @@ -154,8 +148,6 @@ func TestMutationCreateOneToOne_UseAliasedRelationNameToLink_QueryFromPrimarySid } func TestMutationCreateOneToOne_UseAliasedRelationNameToLink_QueryFromSecondarySide(t *testing.T) { - authorID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" - test := testUtils.TestCase{ Description: "One to one create mutation from secondary side with alias relation.", Actions: []any{ @@ -167,13 +159,10 @@ func TestMutationCreateOneToOne_UseAliasedRelationNameToLink_QueryFromSecondaryS }, testUtils.CreateDoc{ CollectionID: 0, - Doc: fmt.Sprintf( - `{ - "name": "Painted House", - "author": "%s" - }`, - authorID, - ), + DocMap: map[string]any{ + "name": "Painted House", + "author": testUtils.NewDocIndex(1, 0), + }, }, testUtils.Request{ Request: `query { diff --git a/tests/integration/mutation/create/field_kinds/one_to_one/with_simple_test.go b/tests/integration/mutation/create/field_kinds/one_to_one/with_simple_test.go index c693b05187..fa890f8e70 100644 --- a/tests/integration/mutation/create/field_kinds/one_to_one/with_simple_test.go +++ b/tests/integration/mutation/create/field_kinds/one_to_one/with_simple_test.go @@ -11,7 +11,6 @@ package one_to_one import ( - "fmt" "testing" testUtils "github.com/sourcenetwork/defradb/tests/integration" @@ -33,7 +32,7 @@ func TestMutationCreateOneToOne_WithInvalidField_Error(t *testing.T) { CollectionID: 1, Doc: `{ "notName": "John Grisham", - "published_id": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" + "published_id": "bae-be6d8024-4953-5a92-84b4-f042d25230c6" }`, ExpectedError: "The given field does not exist. Name: notName", }, @@ -52,7 +51,7 @@ func TestMutationCreateOneToOneNoChild(t *testing.T) { CollectionID: 1, Doc: `{ "name": "John Grisham", - "published_id": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" + "published_id": "bae-be6d8024-4953-5a92-84b4-f042d25230c6" }`, }, testUtils.Request{ @@ -80,7 +79,7 @@ func TestMutationCreateOneToOne_NonExistingRelationSecondarySide_Error(t *testin CollectionID: 0, Doc: `{ "name": "Painted House", - "author_id": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" + "author_id": "bae-be6d8024-4953-5a92-84b4-f042d25230c6" }`, ExpectedError: "document not found or not authorized to access", }, @@ -90,8 +89,6 @@ func TestMutationCreateOneToOne_NonExistingRelationSecondarySide_Error(t *testin } func TestMutationCreateOneToOne(t *testing.T) { - bookID := "bae-3d236f89-6a31-5add-a36a-27971a2eac76" - test := testUtils.TestCase{ Description: "One to one create mutation", Actions: []any{ @@ -103,13 +100,10 @@ func TestMutationCreateOneToOne(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 1, - Doc: fmt.Sprintf( - `{ - "name": "John Grisham", - "published_id": "%s" - }`, - bookID, - ), + DocMap: map[string]any{ + "name": "John Grisham", + "published_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.Request{ Request: ` @@ -156,8 +150,6 @@ func TestMutationCreateOneToOne(t *testing.T) { } func TestMutationCreateOneToOneSecondarySide(t *testing.T) { - authorID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" - test := testUtils.TestCase{ Description: "One to one create mutation from secondary side", Actions: []any{ @@ -169,13 +161,10 @@ func TestMutationCreateOneToOneSecondarySide(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - Doc: fmt.Sprintf( - `{ - "name": "Painted House", - "author_id": "%s" - }`, - authorID, - ), + DocMap: map[string]any{ + "name": "Painted House", + "author_id": testUtils.NewDocIndex(1, 0), + }, }, testUtils.Request{ Request: ` @@ -222,8 +211,6 @@ func TestMutationCreateOneToOneSecondarySide(t *testing.T) { } func TestMutationCreateOneToOne_ErrorsGivenRelationAlreadyEstablishedViaPrimary(t *testing.T) { - bookID := "bae-3d236f89-6a31-5add-a36a-27971a2eac76" - test := testUtils.TestCase{ Description: "One to one create mutation, errors due to link already existing, primary side", Actions: []any{ @@ -235,21 +222,17 @@ func TestMutationCreateOneToOne_ErrorsGivenRelationAlreadyEstablishedViaPrimary( }, testUtils.CreateDoc{ CollectionID: 1, - Doc: fmt.Sprintf(`{ - "name": "John Grisham", - "published_id": "%s" - }`, - bookID, - ), + DocMap: map[string]any{ + "name": "John Grisham", + "published_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - Doc: fmt.Sprintf(`{ - "name": "Saadi Shirazi", - "published_id": "%s" - }`, - bookID, - ), + DocMap: map[string]any{ + "name": "Saadi Shirazi", + "published_id": testUtils.NewDocIndex(0, 0), + }, ExpectedError: "target document is already linked to another document.", }, }, @@ -259,8 +242,6 @@ func TestMutationCreateOneToOne_ErrorsGivenRelationAlreadyEstablishedViaPrimary( } func TestMutationCreateOneToOne_ErrorsGivenRelationAlreadyEstablishedViaSecondary(t *testing.T) { - authorID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" - test := testUtils.TestCase{ Description: "One to one create mutation, errors due to link already existing, secondary side", Actions: []any{ @@ -272,21 +253,17 @@ func TestMutationCreateOneToOne_ErrorsGivenRelationAlreadyEstablishedViaSecondar }, testUtils.CreateDoc{ CollectionID: 0, - Doc: fmt.Sprintf(`{ - "name": "Painted House", - "author_id": "%s" - }`, - authorID, - ), + DocMap: map[string]any{ + "name": "Painted House", + "author_id": testUtils.NewDocIndex(1, 0), + }, }, testUtils.CreateDoc{ CollectionID: 0, - Doc: fmt.Sprintf(`{ - "name": "Golestan", - "author_id": "%s" - }`, - authorID, - ), + DocMap: map[string]any{ + "name": "Golestan", + "author_id": testUtils.NewDocIndex(1, 0), + }, ExpectedError: "target document is already linked to another document.", }, }, diff --git a/tests/integration/mutation/create/field_kinds/one_to_one_to_one/with_txn_test.go b/tests/integration/mutation/create/field_kinds/one_to_one_to_one/with_txn_test.go index 64272779d3..3b40c19f6c 100644 --- a/tests/integration/mutation/create/field_kinds/one_to_one_to_one/with_txn_test.go +++ b/tests/integration/mutation/create/field_kinds/one_to_one_to_one/with_txn_test.go @@ -24,7 +24,7 @@ func TestTransactionalCreationAndLinkingOfRelationalDocumentsForward(t *testing. Actions: []any{ testUtils.CreateDoc{ CollectionID: 2, - // "_docID": "bae-0e7c3bb5-4917-5d98-9fcf-b9db369ea6e4", + // "_docID": "bae-07fd000a-d023-54b9-b8f3-a4318fac8fed", Doc: `{ "name": "Website", "address": "Manning Publications" @@ -32,7 +32,7 @@ func TestTransactionalCreationAndLinkingOfRelationalDocumentsForward(t *testing. }, testUtils.CreateDoc{ CollectionID: 2, - // "_docID": "bae-8a381044-9206-51e7-8bc8-dc683d5f2523", + // "_docID": "bae-21084f46-b12a-53ab-94dd-04d075b4218c", Doc: `{ "name": "Online", "address": "Manning Early Access Program (MEAP)" @@ -42,26 +42,26 @@ func TestTransactionalCreationAndLinkingOfRelationalDocumentsForward(t *testing. testUtils.Request{ TransactionID: immutable.Some(0), Request: `mutation { - create_Book(input: {name: "Book By Website", rating: 4.0, publisher_id: "bae-0e7c3bb5-4917-5d98-9fcf-b9db369ea6e4"}) { + create_Book(input: {name: "Book By Website", rating: 4.0, publisher_id: "bae-07fd000a-d023-54b9-b8f3-a4318fac8fed"}) { _docID } }`, Results: []map[string]any{ { - "_docID": "bae-37de3681-1856-5bc9-9fd6-1595647b7d96", + "_docID": "bae-e7943028-5c74-5fd4-9661-0a233edcd287", }, }, }, testUtils.Request{ TransactionID: immutable.Some(1), Request: `mutation { - create_Book(input: {name: "Book By Online", rating: 4.0, publisher_id: "bae-8a381044-9206-51e7-8bc8-dc683d5f2523"}) { + create_Book(input: {name: "Book By Online", rating: 4.0, publisher_id: "bae-21084f46-b12a-53ab-94dd-04d075b4218c"}) { _docID } }`, Results: []map[string]any{ { - "_docID": "bae-60ffc9b4-0e31-5d63-82dc-c5cb007f2985", + "_docID": "bae-7f6a5a76-b90d-5715-a452-708ded9e7ae7", }, }, }, @@ -80,16 +80,16 @@ func TestTransactionalCreationAndLinkingOfRelationalDocumentsForward(t *testing. }`, Results: []map[string]any{ { - "_docID": "bae-0e7c3bb5-4917-5d98-9fcf-b9db369ea6e4", + "_docID": "bae-07fd000a-d023-54b9-b8f3-a4318fac8fed", "name": "Website", "published": map[string]any{ - "_docID": "bae-37de3681-1856-5bc9-9fd6-1595647b7d96", + "_docID": "bae-e7943028-5c74-5fd4-9661-0a233edcd287", "name": "Book By Website", }, }, { - "_docID": "bae-8a381044-9206-51e7-8bc8-dc683d5f2523", + "_docID": "bae-21084f46-b12a-53ab-94dd-04d075b4218c", "name": "Online", "published": nil, }, @@ -110,16 +110,15 @@ func TestTransactionalCreationAndLinkingOfRelationalDocumentsForward(t *testing. }`, Results: []map[string]any{ { - "_docID": "bae-0e7c3bb5-4917-5d98-9fcf-b9db369ea6e4", + "_docID": "bae-07fd000a-d023-54b9-b8f3-a4318fac8fed", "name": "Website", "published": nil, }, - { - "_docID": "bae-8a381044-9206-51e7-8bc8-dc683d5f2523", + "_docID": "bae-21084f46-b12a-53ab-94dd-04d075b4218c", "name": "Online", "published": map[string]any{ - "_docID": "bae-60ffc9b4-0e31-5d63-82dc-c5cb007f2985", + "_docID": "bae-7f6a5a76-b90d-5715-a452-708ded9e7ae7", "name": "Book By Online", }, }, @@ -146,20 +145,19 @@ func TestTransactionalCreationAndLinkingOfRelationalDocumentsForward(t *testing. }`, Results: []map[string]any{ { - "_docID": "bae-37de3681-1856-5bc9-9fd6-1595647b7d96", - "name": "Book By Website", + "_docID": "bae-7f6a5a76-b90d-5715-a452-708ded9e7ae7", + "name": "Book By Online", "publisher": map[string]any{ - "_docID": "bae-0e7c3bb5-4917-5d98-9fcf-b9db369ea6e4", - "name": "Website", + "_docID": "bae-21084f46-b12a-53ab-94dd-04d075b4218c", + "name": "Online", }, }, - { - "_docID": "bae-60ffc9b4-0e31-5d63-82dc-c5cb007f2985", - "name": "Book By Online", + "_docID": "bae-e7943028-5c74-5fd4-9661-0a233edcd287", + "name": "Book By Website", "publisher": map[string]any{ - "_docID": "bae-8a381044-9206-51e7-8bc8-dc683d5f2523", - "name": "Online", + "_docID": "bae-07fd000a-d023-54b9-b8f3-a4318fac8fed", + "name": "Website", }, }, }, @@ -176,7 +174,7 @@ func TestTransactionalCreationAndLinkingOfRelationalDocumentsBackward(t *testing Actions: []any{ testUtils.CreateDoc{ CollectionID: 2, - // "_docID": "bae-0e7c3bb5-4917-5d98-9fcf-b9db369ea6e4", + // "_docID": "bae-07fd000a-d023-54b9-b8f3-a4318fac8fed", Doc: `{ "name": "Website", "address": "Manning Publications" @@ -184,7 +182,7 @@ func TestTransactionalCreationAndLinkingOfRelationalDocumentsBackward(t *testing }, testUtils.CreateDoc{ CollectionID: 2, - // "_docID": "bae-8a381044-9206-51e7-8bc8-dc683d5f2523", + // "_docID": "bae-21084f46-b12a-53ab-94dd-04d075b4218c", Doc: `{ "name": "Online", "address": "Manning Early Access Program (MEAP)" @@ -194,26 +192,26 @@ func TestTransactionalCreationAndLinkingOfRelationalDocumentsBackward(t *testing testUtils.Request{ TransactionID: immutable.Some(0), Request: `mutation { - create_Book(input: {name: "Book By Website", rating: 4.0, publisher_id: "bae-0e7c3bb5-4917-5d98-9fcf-b9db369ea6e4"}) { + create_Book(input: {name: "Book By Website", rating: 4.0, publisher_id: "bae-07fd000a-d023-54b9-b8f3-a4318fac8fed"}) { _docID } }`, Results: []map[string]any{ { - "_docID": "bae-37de3681-1856-5bc9-9fd6-1595647b7d96", + "_docID": "bae-e7943028-5c74-5fd4-9661-0a233edcd287", }, }, }, testUtils.Request{ TransactionID: immutable.Some(1), Request: `mutation { - create_Book(input: {name: "Book By Online", rating: 4.0, publisher_id: "bae-8a381044-9206-51e7-8bc8-dc683d5f2523"}) { + create_Book(input: {name: "Book By Online", rating: 4.0, publisher_id: "bae-21084f46-b12a-53ab-94dd-04d075b4218c"}) { _docID } }`, Results: []map[string]any{ { - "_docID": "bae-60ffc9b4-0e31-5d63-82dc-c5cb007f2985", + "_docID": "bae-7f6a5a76-b90d-5715-a452-708ded9e7ae7", }, }, }, @@ -232,10 +230,10 @@ func TestTransactionalCreationAndLinkingOfRelationalDocumentsBackward(t *testing }`, Results: []map[string]any{ { - "_docID": "bae-37de3681-1856-5bc9-9fd6-1595647b7d96", + "_docID": "bae-e7943028-5c74-5fd4-9661-0a233edcd287", "name": "Book By Website", "publisher": map[string]any{ - "_docID": "bae-0e7c3bb5-4917-5d98-9fcf-b9db369ea6e4", + "_docID": "bae-07fd000a-d023-54b9-b8f3-a4318fac8fed", "name": "Website", }, }, @@ -256,10 +254,10 @@ func TestTransactionalCreationAndLinkingOfRelationalDocumentsBackward(t *testing }`, Results: []map[string]any{ { - "_docID": "bae-60ffc9b4-0e31-5d63-82dc-c5cb007f2985", + "_docID": "bae-7f6a5a76-b90d-5715-a452-708ded9e7ae7", "name": "Book By Online", "publisher": map[string]any{ - "_docID": "bae-8a381044-9206-51e7-8bc8-dc683d5f2523", + "_docID": "bae-21084f46-b12a-53ab-94dd-04d075b4218c", "name": "Online", }, }, @@ -286,19 +284,19 @@ func TestTransactionalCreationAndLinkingOfRelationalDocumentsBackward(t *testing }`, Results: []map[string]any{ { - "_docID": "bae-0e7c3bb5-4917-5d98-9fcf-b9db369ea6e4", + "_docID": "bae-07fd000a-d023-54b9-b8f3-a4318fac8fed", "name": "Website", "published": map[string]any{ - "_docID": "bae-37de3681-1856-5bc9-9fd6-1595647b7d96", + "_docID": "bae-e7943028-5c74-5fd4-9661-0a233edcd287", "name": "Book By Website", }, }, { - "_docID": "bae-8a381044-9206-51e7-8bc8-dc683d5f2523", + "_docID": "bae-21084f46-b12a-53ab-94dd-04d075b4218c", "name": "Online", "published": map[string]any{ - "_docID": "bae-60ffc9b4-0e31-5d63-82dc-c5cb007f2985", + "_docID": "bae-7f6a5a76-b90d-5715-a452-708ded9e7ae7", "name": "Book By Online", }, }, diff --git a/tests/integration/mutation/create/simple_test.go b/tests/integration/mutation/create/simple_test.go index d095fdfc6d..61280c0cc8 100644 --- a/tests/integration/mutation/create/simple_test.go +++ b/tests/integration/mutation/create/simple_test.go @@ -89,7 +89,7 @@ func TestMutationCreate(t *testing.T) { `, Results: []map[string]any{ { - "_docID": "bae-88b63198-7d38-5714-a9ff-21ba46374fd1", + "_docID": "bae-8c89a573-c287-5d8c-8ba6-c47c814c594d", "name": "John", "age": int64(27), }, @@ -157,7 +157,7 @@ func TestMutationCreate_GivenEmptyInput(t *testing.T) { }`, Results: []map[string]any{ { - "_docID": "bae-524bfa06-849c-5daf-b6df-05c2da80844d", + "_docID": "bae-332de69b-47da-5175-863f-2480107f4884", }, }, }, diff --git a/tests/integration/mutation/create/with_version_test.go b/tests/integration/mutation/create/with_version_test.go index 69f6d2b9f3..ea44ba6038 100644 --- a/tests/integration/mutation/create/with_version_test.go +++ b/tests/integration/mutation/create/with_version_test.go @@ -39,7 +39,7 @@ func TestMutationCreate_ReturnsVersionCID(t *testing.T) { { "_version": []map[string]any{ { - "cid": "bafyreifvrmwmlwtglxe3afki36spu6d5qs6vvza57kxs4giyi53r5vbbnu", + "cid": "bafyreicceacb554vtciciumodqmz6vmnfvr6uod2hfhnwujmfqx5pgq3fi", }, }, }, diff --git a/tests/integration/mutation/delete/field_kinds/one_to_many/with_show_deleted_test.go b/tests/integration/mutation/delete/field_kinds/one_to_many/with_show_deleted_test.go index 260a9a7b70..8b8e7088a6 100644 --- a/tests/integration/mutation/delete/field_kinds/one_to_many/with_show_deleted_test.go +++ b/tests/integration/mutation/delete/field_kinds/one_to_many/with_show_deleted_test.go @@ -11,12 +11,8 @@ package one_to_many import ( - "fmt" "testing" - "github.com/stretchr/testify/require" - - "github.com/sourcenetwork/defradb/client" testUtils "github.com/sourcenetwork/defradb/tests/integration" ) @@ -34,32 +30,6 @@ type Author { ` func TestDeletionOfADocumentUsingSingleDocIDWithShowDeletedDocumentQuery(t *testing.T) { - colDefMap, err := testUtils.ParseSDL(schemas) - require.NoError(t, err) - - jsonString1 := `{ - "name": "John", - "age": 30 - }` - doc1, err := client.NewDocFromJSON([]byte(jsonString1), colDefMap["Author"]) - require.NoError(t, err) - - jsonString2 := fmt.Sprintf(`{ - "name": "John and the philosopher are stoned", - "rating": 9.9, - "author_id": "%s" - }`, doc1.ID()) - doc2, err := client.NewDocFromJSON([]byte(jsonString2), colDefMap["Book"]) - require.NoError(t, err) - - jsonString3 := fmt.Sprintf(`{ - "name": "John has a chamber of secrets", - "rating": 9.9, - "author_id": "%s" - }`, doc1.ID()) - // doc3, err := client.NewDocFromJSON([]byte(jsonString1)) - // require.NoError(t, err) - test := testUtils.TestCase{ Description: "One to many delete document using single document id, show deleted.", Actions: []any{ @@ -68,25 +38,36 @@ func TestDeletionOfADocumentUsingSingleDocIDWithShowDeletedDocumentQuery(t *test }, testUtils.CreateDoc{ CollectionID: 1, - Doc: jsonString1, + Doc: `{ + "name": "John", + "age": 30 + }`, }, testUtils.CreateDoc{ CollectionID: 0, - Doc: jsonString2, + DocMap: map[string]any{ + "name": "John and the philosopher are stoned", + "rating": 9.9, + "author_id": testUtils.NewDocIndex(1, 0), + }, }, testUtils.CreateDoc{ CollectionID: 0, - Doc: jsonString3, + DocMap: map[string]any{ + "name": "John has a chamber of secrets", + "rating": 9.9, + "author_id": testUtils.NewDocIndex(1, 0), + }, }, testUtils.Request{ - Request: fmt.Sprintf(`mutation { - delete_Book(docID: "%s") { + Request: `mutation { + delete_Book(docID: "bae-b5c56d8f-b2f5-57f9-b371-4e9e04903e91") { _docID } - }`, doc2.ID()), + }`, Results: []map[string]any{ { - "_docID": doc2.ID().String(), + "_docID": "bae-b5c56d8f-b2f5-57f9-b371-4e9e04903e91", }, }, }, @@ -110,13 +91,13 @@ func TestDeletionOfADocumentUsingSingleDocIDWithShowDeletedDocumentQuery(t *test "age": int64(30), "published": []map[string]any{ { - "_deleted": true, - "name": "John and the philosopher are stoned", + "_deleted": false, + "name": "John has a chamber of secrets", "rating": 9.9, }, { - "_deleted": false, - "name": "John has a chamber of secrets", + "_deleted": true, + "name": "John and the philosopher are stoned", "rating": 9.9, }, }, diff --git a/tests/integration/mutation/delete/field_kinds/one_to_one_to_one/with_id_test.go b/tests/integration/mutation/delete/field_kinds/one_to_one_to_one/with_id_test.go index 2efa3ef960..057f20a00b 100644 --- a/tests/integration/mutation/delete/field_kinds/one_to_one_to_one/with_id_test.go +++ b/tests/integration/mutation/delete/field_kinds/one_to_one_to_one/with_id_test.go @@ -24,28 +24,28 @@ func TestRelationalDeletionOfADocumentUsingSingleKey_Success(t *testing.T) { testUtils.CreateDoc{ // Books CollectionID: 0, - // bae-80eded16-ee4b-5c9d-b33f-6a7b83958af2 + // bae-8e8b2923-e167-5fd9-aee6-98267dd0ab40 Doc: `{ "name": "100 Go Mistakes to Avoid.", "rating": 4.8, - "publisher_id": "bae-176ebdf0-77e7-5b2f-91ae-f620e37a29e3" + "publisher_id": "bae-9c689bec-071e-5650-9378-bc11d5d3325c" }`, }, testUtils.CreateDoc{ // Authors CollectionID: 1, - // bae-2f80f359-535d-508e-ba58-088a309ce3c3 + // bae-455081f4-b810-5363-ab95-50dbd2ec03d0 Doc: `{ "name": "Teiva Harsanyi", "age": 48, "verified": true, - "wrote_id": "bae-80eded16-ee4b-5c9d-b33f-6a7b83958af2" + "wrote_id": "bae-8e8b2923-e167-5fd9-aee6-98267dd0ab40" }`, }, testUtils.CreateDoc{ // Publishers CollectionID: 2, - // bae-176ebdf0-77e7-5b2f-91ae-f620e37a29e3 + // bae-9c689bec-071e-5650-9378-bc11d5d3325c Doc: `{ "name": "Manning Early Access Program (MEAP)", "address": "Online" @@ -53,13 +53,13 @@ func TestRelationalDeletionOfADocumentUsingSingleKey_Success(t *testing.T) { }, testUtils.Request{ Request: `mutation { - delete_Author(docID: "bae-2f80f359-535d-508e-ba58-088a309ce3c3") { + delete_Author(docID: "bae-455081f4-b810-5363-ab95-50dbd2ec03d0") { _docID } }`, Results: []map[string]any{ { - "_docID": "bae-2f80f359-535d-508e-ba58-088a309ce3c3", + "_docID": "bae-455081f4-b810-5363-ab95-50dbd2ec03d0", }, }, }, @@ -72,28 +72,28 @@ func TestRelationalDeletionOfADocumentUsingSingleKey_Success(t *testing.T) { testUtils.CreateDoc{ // Books CollectionID: 0, - // bae-80eded16-ee4b-5c9d-b33f-6a7b83958af2 + // bae-8e8b2923-e167-5fd9-aee6-98267dd0ab40 Doc: `{ "name": "100 Go Mistakes to Avoid.", "rating": 4.8, - "publisher_id": "bae-176ebdf0-77e7-5b2f-91ae-f620e37a29e3" + "publisher_id": "bae-9c689bec-071e-5650-9378-bc11d5d3325c" }`, }, testUtils.CreateDoc{ // Authors CollectionID: 1, - // bae-2f80f359-535d-508e-ba58-088a309ce3c3 + // bae-455081f4-b810-5363-ab95-50dbd2ec03d0 Doc: `{ "name": "Teiva Harsanyi", "age": 48, "verified": true, - "wrote_id": "bae-80eded16-ee4b-5c9d-b33f-6a7b83958af2" + "wrote_id": "bae-8e8b2923-e167-5fd9-aee6-98267dd0ab40" }`, }, testUtils.CreateDoc{ // Publishers CollectionID: 2, - // bae-176ebdf0-77e7-5b2f-91ae-f620e37a29e3 + // bae-9c689bec-071e-5650-9378-bc11d5d3325c Doc: `{ "name": "Manning Early Access Program (MEAP)", "address": "Online" @@ -101,13 +101,13 @@ func TestRelationalDeletionOfADocumentUsingSingleKey_Success(t *testing.T) { }, testUtils.Request{ Request: `mutation { - delete_Author(docID: "bae-2f80f359-535d-508e-ba58-088a309ce3c3") { + delete_Author(docID: "bae-455081f4-b810-5363-ab95-50dbd2ec03d0") { AliasOfKey: _docID } }`, Results: []map[string]any{ { - "AliasOfKey": "bae-2f80f359-535d-508e-ba58-088a309ce3c3", + "AliasOfKey": "bae-455081f4-b810-5363-ab95-50dbd2ec03d0", }, }, }, @@ -120,28 +120,28 @@ func TestRelationalDeletionOfADocumentUsingSingleKey_Success(t *testing.T) { testUtils.CreateDoc{ // Books CollectionID: 0, - // bae-80eded16-ee4b-5c9d-b33f-6a7b83958af2 + // bae-8e8b2923-e167-5fd9-aee6-98267dd0ab40 Doc: `{ "name": "100 Go Mistakes to Avoid.", "rating": 4.8, - "publisher_id": "bae-176ebdf0-77e7-5b2f-91ae-f620e37a29e3" + "publisher_id": "bae-9c689bec-071e-5650-9378-bc11d5d3325c" }`, }, testUtils.CreateDoc{ // Authors CollectionID: 1, - // bae-2f80f359-535d-508e-ba58-088a309ce3c3 + // bae-455081f4-b810-5363-ab95-50dbd2ec03d0 Doc: `{ "name": "Teiva Harsanyi", "age": 48, "verified": true, - "wrote_id": "bae-80eded16-ee4b-5c9d-b33f-6a7b83958af2" + "wrote_id": "bae-8e8b2923-e167-5fd9-aee6-98267dd0ab40" }`, }, testUtils.CreateDoc{ // Publishers CollectionID: 2, - // bae-176ebdf0-77e7-5b2f-91ae-f620e37a29e3 + // bae-9c689bec-071e-5650-9378-bc11d5d3325c Doc: `{ "name": "Manning Early Access Program (MEAP)", "address": "Online" @@ -166,13 +166,13 @@ func TestRelationalDeletionOfADocumentUsingSingleKey_Success(t *testing.T) { }, testUtils.Request{ Request: `mutation { - delete_Author(docID: "bae-2f80f359-535d-508e-ba58-088a309ce3c3") { + delete_Author(docID: "bae-455081f4-b810-5363-ab95-50dbd2ec03d0") { Key: _docID } }`, Results: []map[string]any{ { - "Key": "bae-2f80f359-535d-508e-ba58-088a309ce3c3", + "Key": "bae-455081f4-b810-5363-ab95-50dbd2ec03d0", }, }, }, diff --git a/tests/integration/mutation/delete/field_kinds/one_to_one_to_one/with_txn_test.go b/tests/integration/mutation/delete/field_kinds/one_to_one_to_one/with_txn_test.go index 4eed71eeb4..9298f26198 100644 --- a/tests/integration/mutation/delete/field_kinds/one_to_one_to_one/with_txn_test.go +++ b/tests/integration/mutation/delete/field_kinds/one_to_one_to_one/with_txn_test.go @@ -25,17 +25,17 @@ func TestTxnDeletionOfRelatedDocFromPrimarySideForwardDirection(t *testing.T) { testUtils.CreateDoc{ // books CollectionID: 0, - // "_docID": "bae-37de3681-1856-5bc9-9fd6-1595647b7d96", + // "_docID": "bae-e7943028-5c74-5fd4-9661-0a233edcd287", Doc: `{ "name": "Book By Website", "rating": 4.0, - "publisher_id": "bae-0e7c3bb5-4917-5d98-9fcf-b9db369ea6e4" + "publisher_id": "bae-07fd000a-d023-54b9-b8f3-a4318fac8fed" }`, }, testUtils.CreateDoc{ // publishers CollectionID: 2, - // "_docID": "bae-0e7c3bb5-4917-5d98-9fcf-b9db369ea6e4", + // "_docID": "bae-07fd000a-d023-54b9-b8f3-a4318fac8fed", Doc: `{ "name": "Website", "address": "Manning Publications" @@ -45,13 +45,13 @@ func TestTxnDeletionOfRelatedDocFromPrimarySideForwardDirection(t *testing.T) { // Delete a linked book that exists. TransactionID: immutable.Some(0), Request: `mutation { - delete_Book(docID: "bae-37de3681-1856-5bc9-9fd6-1595647b7d96") { + delete_Book(docID: "bae-e7943028-5c74-5fd4-9661-0a233edcd287") { _docID } }`, Results: []map[string]any{ { - "_docID": "bae-37de3681-1856-5bc9-9fd6-1595647b7d96", + "_docID": "bae-e7943028-5c74-5fd4-9661-0a233edcd287", }, }, }, @@ -72,7 +72,7 @@ func TestTxnDeletionOfRelatedDocFromPrimarySideForwardDirection(t *testing.T) { }`, Results: []map[string]any{ { - "_docID": "bae-0e7c3bb5-4917-5d98-9fcf-b9db369ea6e4", + "_docID": "bae-07fd000a-d023-54b9-b8f3-a4318fac8fed", "name": "Website", "published": nil, }, @@ -91,17 +91,17 @@ func TestTxnDeletionOfRelatedDocFromPrimarySideBackwardDirection(t *testing.T) { testUtils.CreateDoc{ // books CollectionID: 0, - // "_docID": "bae-37de3681-1856-5bc9-9fd6-1595647b7d96", + // "_docID": "bae-e7943028-5c74-5fd4-9661-0a233edcd287", Doc: `{ "name": "Book By Website", "rating": 4.0, - "publisher_id": "bae-0e7c3bb5-4917-5d98-9fcf-b9db369ea6e4" + "publisher_id": "bae-07fd000a-d023-54b9-b8f3-a4318fac8fed" }`, }, testUtils.CreateDoc{ // publishers CollectionID: 2, - // "_docID": "bae-0e7c3bb5-4917-5d98-9fcf-b9db369ea6e4", + // "_docID": "bae-07fd000a-d023-54b9-b8f3-a4318fac8fed", Doc: `{ "name": "Website", "address": "Manning Publications" @@ -111,13 +111,13 @@ func TestTxnDeletionOfRelatedDocFromPrimarySideBackwardDirection(t *testing.T) { // Delete a linked book that exists. TransactionID: immutable.Some(0), Request: `mutation { - delete_Book(docID: "bae-37de3681-1856-5bc9-9fd6-1595647b7d96") { + delete_Book(docID: "bae-e7943028-5c74-5fd4-9661-0a233edcd287") { _docID } }`, Results: []map[string]any{ { - "_docID": "bae-37de3681-1856-5bc9-9fd6-1595647b7d96", + "_docID": "bae-e7943028-5c74-5fd4-9661-0a233edcd287", }, }, }, @@ -151,17 +151,17 @@ func TestATxnCanReadARecordThatIsDeletedInANonCommitedTxnForwardDirection(t *tes testUtils.CreateDoc{ // books CollectionID: 0, - // "_docID": "bae-37de3681-1856-5bc9-9fd6-1595647b7d96", + // "_docID": "bae-e7943028-5c74-5fd4-9661-0a233edcd287", Doc: `{ "name": "Book By Website", "rating": 4.0, - "publisher_id": "bae-0e7c3bb5-4917-5d98-9fcf-b9db369ea6e4" + "publisher_id": "bae-07fd000a-d023-54b9-b8f3-a4318fac8fed" }`, }, testUtils.CreateDoc{ // publishers CollectionID: 2, - // "_docID": "bae-0e7c3bb5-4917-5d98-9fcf-b9db369ea6e4", + // "_docID": "bae-07fd000a-d023-54b9-b8f3-a4318fac8fed", Doc: `{ "name": "Website", "address": "Manning Publications" @@ -171,13 +171,13 @@ func TestATxnCanReadARecordThatIsDeletedInANonCommitedTxnForwardDirection(t *tes // Delete a linked book that exists. TransactionID: immutable.Some(0), Request: `mutation { - delete_Book(docID: "bae-37de3681-1856-5bc9-9fd6-1595647b7d96") { + delete_Book(docID: "bae-e7943028-5c74-5fd4-9661-0a233edcd287") { _docID } }`, Results: []map[string]any{ { - "_docID": "bae-37de3681-1856-5bc9-9fd6-1595647b7d96", + "_docID": "bae-e7943028-5c74-5fd4-9661-0a233edcd287", }, }, }, @@ -196,10 +196,10 @@ func TestATxnCanReadARecordThatIsDeletedInANonCommitedTxnForwardDirection(t *tes }`, Results: []map[string]any{ { - "_docID": "bae-0e7c3bb5-4917-5d98-9fcf-b9db369ea6e4", + "_docID": "bae-07fd000a-d023-54b9-b8f3-a4318fac8fed", "name": "Website", "published": map[string]any{ - "_docID": "bae-37de3681-1856-5bc9-9fd6-1595647b7d96", + "_docID": "bae-e7943028-5c74-5fd4-9661-0a233edcd287", "name": "Book By Website", }, }, @@ -222,7 +222,7 @@ func TestATxnCanReadARecordThatIsDeletedInANonCommitedTxnForwardDirection(t *tes }`, Results: []map[string]any{ { - "_docID": "bae-0e7c3bb5-4917-5d98-9fcf-b9db369ea6e4", + "_docID": "bae-07fd000a-d023-54b9-b8f3-a4318fac8fed", "name": "Website", "published": nil, }, @@ -241,17 +241,17 @@ func TestATxnCanReadARecordThatIsDeletedInANonCommitedTxnBackwardDirection(t *te testUtils.CreateDoc{ // books CollectionID: 0, - // "_docID": "bae-37de3681-1856-5bc9-9fd6-1595647b7d96", + // "_docID": "bae-e7943028-5c74-5fd4-9661-0a233edcd287", Doc: `{ "name": "Book By Website", "rating": 4.0, - "publisher_id": "bae-0e7c3bb5-4917-5d98-9fcf-b9db369ea6e4" + "publisher_id": "bae-07fd000a-d023-54b9-b8f3-a4318fac8fed" }`, }, testUtils.CreateDoc{ // publishers CollectionID: 2, - // "_docID": "bae-0e7c3bb5-4917-5d98-9fcf-b9db369ea6e4", + // "_docID": "bae-07fd000a-d023-54b9-b8f3-a4318fac8fed", Doc: `{ "name": "Website", "address": "Manning Publications" @@ -261,13 +261,13 @@ func TestATxnCanReadARecordThatIsDeletedInANonCommitedTxnBackwardDirection(t *te // Delete a linked book that exists in transaction 0. TransactionID: immutable.Some(0), Request: `mutation { - delete_Book(docID: "bae-37de3681-1856-5bc9-9fd6-1595647b7d96") { + delete_Book(docID: "bae-e7943028-5c74-5fd4-9661-0a233edcd287") { _docID } }`, Results: []map[string]any{ { - "_docID": "bae-37de3681-1856-5bc9-9fd6-1595647b7d96", + "_docID": "bae-e7943028-5c74-5fd4-9661-0a233edcd287", }, }, }, @@ -286,10 +286,10 @@ func TestATxnCanReadARecordThatIsDeletedInANonCommitedTxnBackwardDirection(t *te }`, Results: []map[string]any{ { - "_docID": "bae-37de3681-1856-5bc9-9fd6-1595647b7d96", + "_docID": "bae-e7943028-5c74-5fd4-9661-0a233edcd287", "name": "Book By Website", "publisher": map[string]any{ - "_docID": "bae-0e7c3bb5-4917-5d98-9fcf-b9db369ea6e4", + "_docID": "bae-07fd000a-d023-54b9-b8f3-a4318fac8fed", "name": "Website", }, }, @@ -325,17 +325,17 @@ func TestTxnDeletionOfRelatedDocFromNonPrimarySideForwardDirection(t *testing.T) testUtils.CreateDoc{ // books CollectionID: 0, - // "_docID": "bae-60ffc9b4-0e31-5d63-82dc-c5cb007f2985", + // "_docID": "bae-7f6a5a76-b90d-5715-a452-708ded9e7ae7", Doc: `{ "name": "Book By Online", "rating": 4.0, - "publisher_id": "bae-8a381044-9206-51e7-8bc8-dc683d5f2523" + "publisher_id": "bae-21084f46-b12a-53ab-94dd-04d075b4218c" }`, }, testUtils.CreateDoc{ // publishers CollectionID: 2, - // "_docID": "bae-8a381044-9206-51e7-8bc8-dc683d5f2523", + // "_docID": "bae-21084f46-b12a-53ab-94dd-04d075b4218c", Doc: `{ "name": "Online", "address": "Manning Early Access Program (MEAP)" @@ -346,13 +346,13 @@ func TestTxnDeletionOfRelatedDocFromNonPrimarySideForwardDirection(t *testing.T) // book gets correctly unlinked too. TransactionID: immutable.Some(0), Request: `mutation { - delete_Publisher(docID: "bae-8a381044-9206-51e7-8bc8-dc683d5f2523") { + delete_Publisher(docID: "bae-21084f46-b12a-53ab-94dd-04d075b4218c") { _docID } }`, Results: []map[string]any{ { - "_docID": "bae-8a381044-9206-51e7-8bc8-dc683d5f2523", + "_docID": "bae-21084f46-b12a-53ab-94dd-04d075b4218c", }, }, }, @@ -386,17 +386,17 @@ func TestTxnDeletionOfRelatedDocFromNonPrimarySideBackwardDirection(t *testing.T testUtils.CreateDoc{ // books CollectionID: 0, - // "_docID": "bae-60ffc9b4-0e31-5d63-82dc-c5cb007f2985", + // "_docID": "bae-7f6a5a76-b90d-5715-a452-708ded9e7ae7", Doc: `{ "name": "Book By Online", "rating": 4.0, - "publisher_id": "bae-8a381044-9206-51e7-8bc8-dc683d5f2523" + "publisher_id": "bae-21084f46-b12a-53ab-94dd-04d075b4218c" }`, }, testUtils.CreateDoc{ // publishers CollectionID: 2, - // "_docID": "bae-8a381044-9206-51e7-8bc8-dc683d5f2523", + // "_docID": "bae-21084f46-b12a-53ab-94dd-04d075b4218c", Doc: `{ "name": "Online", "address": "Manning Early Access Program (MEAP)" @@ -407,13 +407,13 @@ func TestTxnDeletionOfRelatedDocFromNonPrimarySideBackwardDirection(t *testing.T // book gets correctly unlinked too. TransactionID: immutable.Some(0), Request: `mutation { - delete_Publisher(docID: "bae-8a381044-9206-51e7-8bc8-dc683d5f2523") { + delete_Publisher(docID: "bae-21084f46-b12a-53ab-94dd-04d075b4218c") { _docID } }`, Results: []map[string]any{ { - "_docID": "bae-8a381044-9206-51e7-8bc8-dc683d5f2523", + "_docID": "bae-21084f46-b12a-53ab-94dd-04d075b4218c", }, }, }, @@ -434,7 +434,7 @@ func TestTxnDeletionOfRelatedDocFromNonPrimarySideBackwardDirection(t *testing.T }`, Results: []map[string]any{ { - "_docID": "bae-60ffc9b4-0e31-5d63-82dc-c5cb007f2985", + "_docID": "bae-7f6a5a76-b90d-5715-a452-708ded9e7ae7", "name": "Book By Online", "publisher": nil, }, diff --git a/tests/integration/mutation/delete/with_deleted_field_test.go b/tests/integration/mutation/delete/with_deleted_field_test.go index 0067b8b12c..274d224253 100644 --- a/tests/integration/mutation/delete/with_deleted_field_test.go +++ b/tests/integration/mutation/delete/with_deleted_field_test.go @@ -34,7 +34,7 @@ func TestMutationDeletion_WithDeletedField(t *testing.T) { }, testUtils.Request{ Request: `mutation { - delete_User(docID: "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad") { + delete_User(docID: "bae-1ef746f8-821e-586f-99b2-4cb1fb9b782f") { _deleted _docID } @@ -42,7 +42,7 @@ func TestMutationDeletion_WithDeletedField(t *testing.T) { Results: []map[string]any{ { "_deleted": true, - "_docID": "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad", + "_docID": "bae-1ef746f8-821e-586f-99b2-4cb1fb9b782f", }, }, }, diff --git a/tests/integration/mutation/delete/with_filter_test.go b/tests/integration/mutation/delete/with_filter_test.go index 79bf04753d..1884d38e17 100644 --- a/tests/integration/mutation/delete/with_filter_test.go +++ b/tests/integration/mutation/delete/with_filter_test.go @@ -135,13 +135,13 @@ func TestMutationDeletion_WithEmptyFilter(t *testing.T) { }`, Results: []map[string]any{ { - "name": "Fred", + "name": "John", }, { "name": "Shahzad", }, { - "name": "John", + "name": "Fred", }, }, }, diff --git a/tests/integration/mutation/delete/with_id_alias_test.go b/tests/integration/mutation/delete/with_id_alias_test.go index 5709b7cadc..c9d259ac8b 100644 --- a/tests/integration/mutation/delete/with_id_alias_test.go +++ b/tests/integration/mutation/delete/with_id_alias_test.go @@ -34,13 +34,13 @@ func TestMutationDeletion_WithIDAndAlias(t *testing.T) { }, testUtils.Request{ Request: `mutation { - delete_User(docIDs: ["bae-d7546ac1-c133-5853-b866-9b9f926fe7e5"]) { + delete_User(docIDs: ["bae-22dacd35-4560-583a-9a80-8edbf28aa85c"]) { fancyKey: _docID } }`, Results: []map[string]any{ { - "fancyKey": "bae-d7546ac1-c133-5853-b866-9b9f926fe7e5", + "fancyKey": "bae-22dacd35-4560-583a-9a80-8edbf28aa85c", }, }, }, diff --git a/tests/integration/mutation/delete/with_id_test.go b/tests/integration/mutation/delete/with_id_test.go index 78c923693e..774e9a8c18 100644 --- a/tests/integration/mutation/delete/with_id_test.go +++ b/tests/integration/mutation/delete/with_id_test.go @@ -29,7 +29,7 @@ func TestMutationDeletion_WithIDUnknownValue(t *testing.T) { }, testUtils.Request{ Request: `mutation { - delete_User(docIDs: ["bae-d7546ac1-c133-5853-b866-9b9f926fe7e5"]) { + delete_User(docIDs: ["bae-22dacd35-4560-583a-9a80-8edbf28aa85c"]) { _docID } }`, @@ -59,7 +59,7 @@ func TestMutationDeletion_WithIDUnknownValueAndUnrelatedRecordInCollection(t *te }, testUtils.Request{ Request: `mutation { - delete_User(docIDs: ["bae-d7546ac1-c133-5853-b866-9b9f926fe7e5"]) { + delete_User(docIDs: ["bae-22dacd35-4560-583a-9a80-8edbf28aa85c"]) { _docID } }`, diff --git a/tests/integration/mutation/delete/with_id_txn_test.go b/tests/integration/mutation/delete/with_id_txn_test.go index aeb6a4772b..f704acea56 100644 --- a/tests/integration/mutation/delete/with_id_txn_test.go +++ b/tests/integration/mutation/delete/with_id_txn_test.go @@ -37,13 +37,13 @@ func TestMutationDeletion_WithIDAndTxn(t *testing.T) { testUtils.Request{ TransactionID: immutable.Some(0), Request: `mutation { - delete_User(docIDs: ["bae-d7546ac1-c133-5853-b866-9b9f926fe7e5"]) { + delete_User(docIDs: ["bae-22dacd35-4560-583a-9a80-8edbf28aa85c"]) { _docID } }`, Results: []map[string]any{ { - "_docID": "bae-d7546ac1-c133-5853-b866-9b9f926fe7e5", + "_docID": "bae-22dacd35-4560-583a-9a80-8edbf28aa85c", }, }, }, diff --git a/tests/integration/mutation/delete/with_ids_alias_test.go b/tests/integration/mutation/delete/with_ids_alias_test.go index e91432e787..b815ec703d 100644 --- a/tests/integration/mutation/delete/with_ids_alias_test.go +++ b/tests/integration/mutation/delete/with_ids_alias_test.go @@ -48,16 +48,16 @@ func TestMutationDeletion_WithIDsAndSelectAlias(t *testing.T) { }, testUtils.Request{ Request: `mutation { - delete_User(docIDs: ["bae-6a6482a8-24e1-5c73-a237-ca569e41507d", "bae-3a1a496e-24eb-5ae3-9c17-524c146a393e"]) { + delete_User(docIDs: ["bae-959725a4-17cb-5e04-8908-98bc78fd06dd", "bae-3eed37ed-5c7b-53ff-b125-d04fb173f6c0"]) { AliasID: _docID } }`, Results: []map[string]any{ { - "AliasID": "bae-3a1a496e-24eb-5ae3-9c17-524c146a393e", + "AliasID": "bae-3eed37ed-5c7b-53ff-b125-d04fb173f6c0", }, { - "AliasID": "bae-6a6482a8-24e1-5c73-a237-ca569e41507d", + "AliasID": "bae-959725a4-17cb-5e04-8908-98bc78fd06dd", }, }, }, diff --git a/tests/integration/mutation/delete/with_ids_filter_test.go b/tests/integration/mutation/delete/with_ids_filter_test.go index 02ddb656f7..41f79fa859 100644 --- a/tests/integration/mutation/delete/with_ids_filter_test.go +++ b/tests/integration/mutation/delete/with_ids_filter_test.go @@ -34,13 +34,13 @@ func TestMutationDeletion_WithIDsAndEmptyFilter(t *testing.T) { }, testUtils.Request{ Request: `mutation { - delete_User(docIDs: ["bae-d7546ac1-c133-5853-b866-9b9f926fe7e5"], filter: {}) { + delete_User(docIDs: ["bae-22dacd35-4560-583a-9a80-8edbf28aa85c"], filter: {}) { _docID } }`, Results: []map[string]any{ { - "_docID": "bae-d7546ac1-c133-5853-b866-9b9f926fe7e5", + "_docID": "bae-22dacd35-4560-583a-9a80-8edbf28aa85c", }, }, }, diff --git a/tests/integration/mutation/delete/with_ids_test.go b/tests/integration/mutation/delete/with_ids_test.go index 18371c2d70..4f3e462b00 100644 --- a/tests/integration/mutation/delete/with_ids_test.go +++ b/tests/integration/mutation/delete/with_ids_test.go @@ -39,16 +39,16 @@ func TestMutationDeletion_WithIDs(t *testing.T) { }, testUtils.Request{ Request: `mutation { - delete_User(docIDs: ["bae-d7546ac1-c133-5853-b866-9b9f926fe7e5", "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad"]) { + delete_User(docIDs: ["bae-22dacd35-4560-583a-9a80-8edbf28aa85c", "bae-1ef746f8-821e-586f-99b2-4cb1fb9b782f"]) { _docID } }`, Results: []map[string]any{ { - "_docID": "bae-d7546ac1-c133-5853-b866-9b9f926fe7e5", + "_docID": "bae-1ef746f8-821e-586f-99b2-4cb1fb9b782f", }, { - "_docID": "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad", + "_docID": "bae-22dacd35-4560-583a-9a80-8edbf28aa85c", }, }, }, @@ -96,10 +96,10 @@ func TestMutationDeletion_WithEmptyIDs(t *testing.T) { }`, Results: []map[string]any{ { - "name": "Shahzad", + "name": "John", }, { - "name": "John", + "name": "Shahzad", }, }, }, @@ -177,13 +177,13 @@ func TestMutationDeletion_WithIDsKnownAndUnknown(t *testing.T) { }, testUtils.Request{ Request: `mutation { - delete_User(docIDs: ["bae-d7546ac1-c133-5853-b866-9b9f926fe7e5", "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad"]) { + delete_User(docIDs: ["bae-22dacd35-4560-583a-9a80-8edbf28aa85c", "bae-1ef746f8-821e-586f-99b2-4cb1fb9b782f"]) { _docID } }`, Results: []map[string]any{ { - "_docID": "bae-d7546ac1-c133-5853-b866-9b9f926fe7e5", + "_docID": "bae-22dacd35-4560-583a-9a80-8edbf28aa85c", }, }, }, diff --git a/tests/integration/mutation/delete/with_ids_txn_test.go b/tests/integration/mutation/delete/with_ids_txn_test.go index c59ec5c262..53cb44c9e9 100644 --- a/tests/integration/mutation/delete/with_ids_txn_test.go +++ b/tests/integration/mutation/delete/with_ids_txn_test.go @@ -43,20 +43,20 @@ func TestMutationDeletion_WithIDsAndTxn(t *testing.T) { testUtils.Request{ TransactionID: immutable.Some(0), Request: `mutation { - delete_User(docIDs: ["bae-6a6482a8-24e1-5c73-a237-ca569e41507d"]) { + delete_User(docIDs: ["bae-959725a4-17cb-5e04-8908-98bc78fd06dd"]) { _docID } }`, Results: []map[string]any{ { - "_docID": "bae-6a6482a8-24e1-5c73-a237-ca569e41507d", + "_docID": "bae-959725a4-17cb-5e04-8908-98bc78fd06dd", }, }, }, testUtils.Request{ TransactionID: immutable.Some(0), Request: `query { - User(docIDs: ["bae-6a6482a8-24e1-5c73-a237-ca569e41507d"]) { + User(docIDs: ["bae-959725a4-17cb-5e04-8908-98bc78fd06dd"]) { _docID } }`, diff --git a/tests/integration/mutation/delete/with_ids_update_alias_test.go b/tests/integration/mutation/delete/with_ids_update_alias_test.go index 076f4f9967..551b52a175 100644 --- a/tests/integration/mutation/delete/with_ids_update_alias_test.go +++ b/tests/integration/mutation/delete/with_ids_update_alias_test.go @@ -56,16 +56,16 @@ func TestMutationDeletion_WithUpdateAndIDsAndSelectAlias(t *testing.T) { }, testUtils.Request{ Request: `mutation { - delete_User(docIDs: ["bae-6a6482a8-24e1-5c73-a237-ca569e41507d", "bae-3a1a496e-24eb-5ae3-9c17-524c146a393e"]) { + delete_User(docIDs: ["bae-959725a4-17cb-5e04-8908-98bc78fd06dd", "bae-3eed37ed-5c7b-53ff-b125-d04fb173f6c0"]) { AliasID: _docID } }`, Results: []map[string]any{ { - "AliasID": "bae-3a1a496e-24eb-5ae3-9c17-524c146a393e", + "AliasID": "bae-3eed37ed-5c7b-53ff-b125-d04fb173f6c0", }, { - "AliasID": "bae-6a6482a8-24e1-5c73-a237-ca569e41507d", + "AliasID": "bae-959725a4-17cb-5e04-8908-98bc78fd06dd", }, }, }, diff --git a/tests/integration/mutation/mix/with_txn_test.go b/tests/integration/mutation/mix/with_txn_test.go index de45e22fd4..b7c193b10b 100644 --- a/tests/integration/mutation/mix/with_txn_test.go +++ b/tests/integration/mutation/mix/with_txn_test.go @@ -39,20 +39,20 @@ func TestMutationWithTxnDeletesUserGivenSameTransaction(t *testing.T) { }`, Results: []map[string]any{ { - "_docID": "bae-88b63198-7d38-5714-a9ff-21ba46374fd1", + "_docID": "bae-948fc3eb-9b68-5a8d-9c3c-8f76157002a9", }, }, }, testUtils.Request{ TransactionID: immutable.Some(0), Request: `mutation { - delete_User(docID: "bae-88b63198-7d38-5714-a9ff-21ba46374fd1") { + delete_User(docID: "bae-948fc3eb-9b68-5a8d-9c3c-8f76157002a9") { _docID } }`, Results: []map[string]any{ { - "_docID": "bae-88b63198-7d38-5714-a9ff-21ba46374fd1", + "_docID": "bae-948fc3eb-9b68-5a8d-9c3c-8f76157002a9", }, }, }, @@ -83,14 +83,14 @@ func TestMutationWithTxnDoesNotDeletesUserGivenDifferentTransactions(t *testing. }`, Results: []map[string]any{ { - "_docID": "bae-88b63198-7d38-5714-a9ff-21ba46374fd1", + "_docID": "bae-948fc3eb-9b68-5a8d-9c3c-8f76157002a9", }, }, }, testUtils.Request{ TransactionID: immutable.Some(1), Request: `mutation { - delete_User(docID: "bae-88b63198-7d38-5714-a9ff-21ba46374fd1") { + delete_User(docID: "bae-948fc3eb-9b68-5a8d-9c3c-8f76157002a9") { _docID } }`, @@ -107,7 +107,7 @@ func TestMutationWithTxnDoesNotDeletesUserGivenDifferentTransactions(t *testing. }`, Results: []map[string]any{ { - "_docID": "bae-88b63198-7d38-5714-a9ff-21ba46374fd1", + "_docID": "bae-948fc3eb-9b68-5a8d-9c3c-8f76157002a9", "name": "John", "age": int64(27), }, @@ -157,7 +157,7 @@ func TestMutationWithTxnDoesUpdateUserGivenSameTransactions(t *testing.T) { }`, Results: []map[string]any{ { - "_docID": "bae-88b63198-7d38-5714-a9ff-21ba46374fd1", + "_docID": "bae-948fc3eb-9b68-5a8d-9c3c-8f76157002a9", }, }, }, @@ -172,7 +172,7 @@ func TestMutationWithTxnDoesUpdateUserGivenSameTransactions(t *testing.T) { }`, Results: []map[string]any{ { - "_docID": "bae-88b63198-7d38-5714-a9ff-21ba46374fd1", + "_docID": "bae-948fc3eb-9b68-5a8d-9c3c-8f76157002a9", "name": "John", "age": int64(28), }, @@ -213,7 +213,7 @@ func TestMutationWithTxnDoesNotUpdateUserGivenDifferentTransactions(t *testing.T }`, Results: []map[string]any{ { - "_docID": "bae-88b63198-7d38-5714-a9ff-21ba46374fd1", + "_docID": "bae-948fc3eb-9b68-5a8d-9c3c-8f76157002a9", "name": "John", "age": int64(28), }, @@ -230,7 +230,7 @@ func TestMutationWithTxnDoesNotUpdateUserGivenDifferentTransactions(t *testing.T }`, Results: []map[string]any{ { - "_docID": "bae-88b63198-7d38-5714-a9ff-21ba46374fd1", + "_docID": "bae-948fc3eb-9b68-5a8d-9c3c-8f76157002a9", "name": "John", "age": int64(27), }, @@ -272,7 +272,7 @@ func TestMutationWithTxnDoesNotAllowUpdateInSecondTransactionUser(t *testing.T) }`, Results: []map[string]any{ { - "_docID": "bae-88b63198-7d38-5714-a9ff-21ba46374fd1", + "_docID": "bae-948fc3eb-9b68-5a8d-9c3c-8f76157002a9", "name": "John", "age": int64(28), }, @@ -289,7 +289,7 @@ func TestMutationWithTxnDoesNotAllowUpdateInSecondTransactionUser(t *testing.T) }`, Results: []map[string]any{ { - "_docID": "bae-88b63198-7d38-5714-a9ff-21ba46374fd1", + "_docID": "bae-948fc3eb-9b68-5a8d-9c3c-8f76157002a9", "name": "John", "age": int64(29), }, @@ -313,7 +313,7 @@ func TestMutationWithTxnDoesNotAllowUpdateInSecondTransactionUser(t *testing.T) }`, Results: []map[string]any{ { - "_docID": "bae-88b63198-7d38-5714-a9ff-21ba46374fd1", + "_docID": "bae-948fc3eb-9b68-5a8d-9c3c-8f76157002a9", "name": "John", "age": int64(28), }, diff --git a/tests/integration/mutation/update/field_kinds/one_to_many/simple_test.go b/tests/integration/mutation/update/field_kinds/one_to_many/simple_test.go index dda55ffcfa..1b515c62fd 100644 --- a/tests/integration/mutation/update/field_kinds/one_to_many/simple_test.go +++ b/tests/integration/mutation/update/field_kinds/one_to_many/simple_test.go @@ -20,7 +20,7 @@ import ( ) func TestMutationUpdateOneToMany_RelationIDToLinkFromSingleSide_Error(t *testing.T) { - author1ID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + author1ID := "bae-a47f80ab-1c30-53b3-9dac-04a4a3fda77e" bookID := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" test := testUtils.TestCase{ @@ -75,7 +75,7 @@ func TestMutationUpdateOneToMany_RelationIDToLinkFromSingleSide_Error(t *testing // Note: This test should probably not pass, as it contains a // reference to a document that doesnt exist. func TestMutationUpdateOneToMany_InvalidRelationIDToLinkFromManySide(t *testing.T) { - author1ID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + author1ID := "bae-a47f80ab-1c30-53b3-9dac-04a4a3fda77e" invalidAuthorID := "bae-35953ca-518d-9e6b-9ce6cd00eff5" test := testUtils.TestCase{ @@ -146,8 +146,8 @@ func TestMutationUpdateOneToMany_InvalidRelationIDToLinkFromManySide(t *testing. } func TestMutationUpdateOneToMany_RelationIDToLinkFromManySideWithWrongField_Error(t *testing.T) { - author1ID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" - author2ID := "bae-35953caf-4898-518d-9e6b-9ce6cd86ebe5" + author1ID := "bae-a47f80ab-1c30-53b3-9dac-04a4a3fda77e" + author2ID := "bae-789d10d4-e54f-531b-ae81-e15100f8e506" test := testUtils.TestCase{ Description: "One to many update mutation using relation id from many side, with a wrong field.", @@ -199,8 +199,8 @@ func TestMutationUpdateOneToMany_RelationIDToLinkFromManySideWithWrongField_Erro } func TestMutationUpdateOneToMany_RelationIDToLinkFromManySide(t *testing.T) { - author1ID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" - author2ID := "bae-35953caf-4898-518d-9e6b-9ce6cd86ebe5" + author1ID := "bae-a47f80ab-1c30-53b3-9dac-04a4a3fda77e" + author2ID := "bae-789d10d4-e54f-531b-ae81-e15100f8e506" test := testUtils.TestCase{ Description: "One to many update mutation using relation id from many side", @@ -247,10 +247,6 @@ func TestMutationUpdateOneToMany_RelationIDToLinkFromManySide(t *testing.T) { } }`, Results: []map[string]any{ - { - "name": "John Grisham", - "published": []map[string]any{}, - }, { "name": "New Shahzad", "published": []map[string]any{ @@ -259,6 +255,10 @@ func TestMutationUpdateOneToMany_RelationIDToLinkFromManySide(t *testing.T) { }, }, }, + { + "name": "John Grisham", + "published": []map[string]any{}, + }, }, }, testUtils.Request{ diff --git a/tests/integration/mutation/update/field_kinds/one_to_many/with_alias_test.go b/tests/integration/mutation/update/field_kinds/one_to_many/with_alias_test.go index d3df327de2..bc0af76015 100644 --- a/tests/integration/mutation/update/field_kinds/one_to_many/with_alias_test.go +++ b/tests/integration/mutation/update/field_kinds/one_to_many/with_alias_test.go @@ -20,7 +20,7 @@ import ( ) func TestMutationUpdateOneToMany_AliasRelationNameToLinkFromSingleSide_Collection(t *testing.T) { - author1ID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + author1ID := "bae-a47f80ab-1c30-53b3-9dac-04a4a3fda77e" bookID := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" test := testUtils.TestCase{ @@ -74,7 +74,7 @@ func TestMutationUpdateOneToMany_AliasRelationNameToLinkFromSingleSide_Collectio } func TestMutationUpdateOneToMany_AliasRelationNameToLinkFromSingleSide_GQL(t *testing.T) { - author1ID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + author1ID := "bae-a47f80ab-1c30-53b3-9dac-04a4a3fda77e" bookID := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" test := testUtils.TestCase{ @@ -129,7 +129,7 @@ func TestMutationUpdateOneToMany_AliasRelationNameToLinkFromSingleSide_GQL(t *te // Note: This test should probably not pass, as it contains a // reference to a document that doesnt exist. func TestMutationUpdateOneToMany_InvalidAliasRelationNameToLinkFromManySide_GQL(t *testing.T) { - author1ID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + author1ID := "bae-a47f80ab-1c30-53b3-9dac-04a4a3fda77e" invalidAuthorID := "bae-35953ca-518d-9e6b-9ce6cd00eff5" test := testUtils.TestCase{ @@ -202,7 +202,7 @@ func TestMutationUpdateOneToMany_InvalidAliasRelationNameToLinkFromManySide_GQL( // Note: This test should probably not pass, as it contains a // reference to a document that doesnt exist. func TestMutationUpdateOneToMany_InvalidAliasRelationNameToLinkFromManySide_Collection(t *testing.T) { - author1ID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + author1ID := "bae-a47f80ab-1c30-53b3-9dac-04a4a3fda77e" invalidAuthorID := "bae-35953ca-518d-9e6b-9ce6cd00eff5" test := testUtils.TestCase{ @@ -257,8 +257,8 @@ func TestMutationUpdateOneToMany_InvalidAliasRelationNameToLinkFromManySide_Coll } func TestMutationUpdateOneToMany_AliasRelationNameToLinkFromManySideWithWrongField_Error(t *testing.T) { - author1ID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" - author2ID := "bae-35953caf-4898-518d-9e6b-9ce6cd86ebe5" + author1ID := "bae-a47f80ab-1c30-53b3-9dac-04a4a3fda77e" + author2ID := "bae-789d10d4-e54f-531b-ae81-e15100f8e506" test := testUtils.TestCase{ Description: "One to many update mutation using relation alias name from many side, with a wrong field.", @@ -310,8 +310,8 @@ func TestMutationUpdateOneToMany_AliasRelationNameToLinkFromManySideWithWrongFie } func TestMutationUpdateOneToMany_AliasRelationNameToLinkFromManySide(t *testing.T) { - author1ID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" - author2ID := "bae-35953caf-4898-518d-9e6b-9ce6cd86ebe5" + author1ID := "bae-a47f80ab-1c30-53b3-9dac-04a4a3fda77e" + author2ID := "bae-789d10d4-e54f-531b-ae81-e15100f8e506" test := testUtils.TestCase{ Description: "One to many update mutation using relation alias name from many side", @@ -358,10 +358,6 @@ func TestMutationUpdateOneToMany_AliasRelationNameToLinkFromManySide(t *testing. } }`, Results: []map[string]any{ - { - "name": "John Grisham", - "published": []map[string]any{}, - }, { "name": "New Shahzad", "published": []map[string]any{ @@ -370,6 +366,10 @@ func TestMutationUpdateOneToMany_AliasRelationNameToLinkFromManySide(t *testing. }, }, }, + { + "name": "John Grisham", + "published": []map[string]any{}, + }, }, }, testUtils.Request{ diff --git a/tests/integration/mutation/update/field_kinds/one_to_one/with_alias_test.go b/tests/integration/mutation/update/field_kinds/one_to_one/with_alias_test.go index 39e132a6c6..5996dde5ea 100644 --- a/tests/integration/mutation/update/field_kinds/one_to_one/with_alias_test.go +++ b/tests/integration/mutation/update/field_kinds/one_to_one/with_alias_test.go @@ -20,8 +20,8 @@ import ( ) func TestMutationUpdateOneToOne_AliasRelationNameToLinkFromPrimarySide(t *testing.T) { - author1ID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" - bookID := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" + author1ID := "bae-42d197b8-d14f-5570-a55d-9e8714b2a82a" + bookID := "bae-dfce6a1a-27fa-5dde-bea7-44df2dffac1a" test := testUtils.TestCase{ Description: "One to one update mutation using alias relation id from single side", @@ -66,8 +66,8 @@ func TestMutationUpdateOneToOne_AliasRelationNameToLinkFromPrimarySide(t *testin } func TestMutationUpdateOneToOne_AliasRelationNameToLinkFromSecondarySide(t *testing.T) { - author1ID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" - author2ID := "bae-35953caf-4898-518d-9e6b-9ce6cd86ebe5" + author1ID := "bae-42d197b8-d14f-5570-a55d-9e8714b2a82a" + author2ID := "bae-a34d8759-e549-5083-8ba6-e04038c41caa" test := testUtils.TestCase{ Description: "One to one update mutation using alias relation id from secondary side", @@ -112,7 +112,7 @@ func TestMutationUpdateOneToOne_AliasRelationNameToLinkFromSecondarySide(t *test } func TestMutationUpdateOneToOne_AliasWithInvalidLengthRelationIDToLink_Error(t *testing.T) { - author1ID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + author1ID := "bae-42d197b8-d14f-5570-a55d-9e8714b2a82a" invalidLenSubID := "35953ca-518d-9e6b-9ce6cd00eff5" invalidAuthorID := "bae-" + invalidLenSubID @@ -153,7 +153,7 @@ func TestMutationUpdateOneToOne_AliasWithInvalidLengthRelationIDToLink_Error(t * } func TestMutationUpdateOneToOne_InvalidAliasRelationNameToLinkFromSecondarySide_Error(t *testing.T) { - author1ID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + author1ID := "bae-42d197b8-d14f-5570-a55d-9e8714b2a82a" invalidAuthorID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ee" test := testUtils.TestCase{ @@ -193,8 +193,8 @@ func TestMutationUpdateOneToOne_InvalidAliasRelationNameToLinkFromSecondarySide_ } func TestMutationUpdateOneToOne_AliasRelationNameToLinkFromSecondarySideWithWrongField_Error(t *testing.T) { - author1ID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" - author2ID := "bae-35953caf-4898-518d-9e6b-9ce6cd86ebe5" + author1ID := "bae-42d197b8-d14f-5570-a55d-9e8714b2a82a" + author2ID := "bae-a34d8759-e549-5083-8ba6-e04038c41caa" test := testUtils.TestCase{ Description: "One to one update mutation using relation alias name from secondary side, with a wrong field.", diff --git a/tests/integration/mutation/update/field_kinds/one_to_one/with_self_ref_test.go b/tests/integration/mutation/update/field_kinds/one_to_one/with_self_ref_test.go index 16225f4ab3..dc49999627 100644 --- a/tests/integration/mutation/update/field_kinds/one_to_one/with_self_ref_test.go +++ b/tests/integration/mutation/update/field_kinds/one_to_one/with_self_ref_test.go @@ -18,7 +18,7 @@ import ( ) func TestMutationUpdateOneToOne_SelfReferencingFromPrimary(t *testing.T) { - user1ID := "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" + user1ID := "bae-ec56fb02-88fb-5113-b4d8-1b9be5f2217b" test := testUtils.TestCase{ Description: "One to one update mutation, self referencing from primary", @@ -104,7 +104,7 @@ func TestMutationUpdateOneToOne_SelfReferencingFromPrimary(t *testing.T) { } func TestMutationUpdateOneToOne_SelfReferencingFromSecondary(t *testing.T) { - user1ID := "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" + user1ID := "bae-12a7f594-f02e-53b7-81c4-aba27b2e7ea7" test := testUtils.TestCase{ Description: "One to one update mutation, self referencing from secondary", @@ -149,16 +149,16 @@ func TestMutationUpdateOneToOne_SelfReferencingFromSecondary(t *testing.T) { } }`, Results: []map[string]any{ + { + "name": "John", + "boss": nil, + }, { "name": "Fred", "boss": map[string]any{ "name": "John", }, }, - { - "name": "John", - "boss": nil, - }, }, }, testUtils.Request{ @@ -172,16 +172,16 @@ func TestMutationUpdateOneToOne_SelfReferencingFromSecondary(t *testing.T) { } }`, Results: []map[string]any{ - { - "name": "Fred", - "underling": nil, - }, { "name": "John", "underling": map[string]any{ "name": "Fred", }, }, + { + "name": "Fred", + "underling": nil, + }, }, }, }, diff --git a/tests/integration/mutation/update/field_kinds/one_to_one/with_simple_test.go b/tests/integration/mutation/update/field_kinds/one_to_one/with_simple_test.go index 0c05734204..c2df5a9db8 100644 --- a/tests/integration/mutation/update/field_kinds/one_to_one/with_simple_test.go +++ b/tests/integration/mutation/update/field_kinds/one_to_one/with_simple_test.go @@ -22,7 +22,7 @@ import ( // Note: This test should probably not pass, as it contains a // reference to a document that doesnt exist. func TestMutationUpdateOneToOneNoChild(t *testing.T) { - unknownID := "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" + unknownID := "bae-be6d8024-4953-5a92-84b4-f042d25230c6" test := testUtils.TestCase{ Description: "One to one create mutation, from the wrong side", @@ -61,7 +61,7 @@ func TestMutationUpdateOneToOneNoChild(t *testing.T) { } func TestMutationUpdateOneToOne(t *testing.T) { - bookID := "bae-3d236f89-6a31-5add-a36a-27971a2eac76" + bookID := "bae-dafb74e9-2bf1-5f12-aea9-967814592bad" test := testUtils.TestCase{ Description: "One to one update mutation", @@ -133,7 +133,7 @@ func TestMutationUpdateOneToOne(t *testing.T) { } func TestMutationUpdateOneToOneSecondarySide(t *testing.T) { - authorID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + authorID := "bae-42d197b8-d14f-5570-a55d-9e8714b2a82a" test := testUtils.TestCase{ Description: "One to one create mutation, from the secondary side", @@ -204,8 +204,8 @@ func TestMutationUpdateOneToOneSecondarySide(t *testing.T) { } func TestMutationUpdateOneToOne_RelationIDToLinkFromPrimarySide(t *testing.T) { - author1ID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" - bookID := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" + author1ID := "bae-42d197b8-d14f-5570-a55d-9e8714b2a82a" + bookID := "bae-dfce6a1a-27fa-5dde-bea7-44df2dffac1a" test := testUtils.TestCase{ Description: "One to one update mutation using relation id from single side (wrong)", @@ -250,8 +250,8 @@ func TestMutationUpdateOneToOne_RelationIDToLinkFromPrimarySide(t *testing.T) { } func TestMutationUpdateOneToOne_RelationIDToLinkFromSecondarySide(t *testing.T) { - author1ID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" - author2ID := "bae-35953caf-4898-518d-9e6b-9ce6cd86ebe5" + author1ID := "bae-42d197b8-d14f-5570-a55d-9e8714b2a82a" + author2ID := "bae-a34d8759-e549-5083-8ba6-e04038c41caa" test := testUtils.TestCase{ Description: "One to one update mutation using relation id from secondary side", @@ -296,7 +296,7 @@ func TestMutationUpdateOneToOne_RelationIDToLinkFromSecondarySide(t *testing.T) } func TestMutationUpdateOneToOne_InvalidLengthRelationIDToLink_Error(t *testing.T) { - author1ID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + author1ID := "bae-42d197b8-d14f-5570-a55d-9e8714b2a82a" invalidLenSubID := "35953ca-518d-9e6b-9ce6cd00eff5" invalidAuthorID := "bae-" + invalidLenSubID @@ -337,7 +337,7 @@ func TestMutationUpdateOneToOne_InvalidLengthRelationIDToLink_Error(t *testing.T } func TestMutationUpdateOneToOne_InvalidRelationIDToLinkFromSecondarySide_Error(t *testing.T) { - author1ID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + author1ID := "bae-42d197b8-d14f-5570-a55d-9e8714b2a82a" invalidAuthorID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ee" test := testUtils.TestCase{ @@ -377,8 +377,8 @@ func TestMutationUpdateOneToOne_InvalidRelationIDToLinkFromSecondarySide_Error(t } func TestMutationUpdateOneToOne_RelationIDToLinkFromSecondarySideWithWrongField_Error(t *testing.T) { - author1ID := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" - author2ID := "bae-35953caf-4898-518d-9e6b-9ce6cd86ebe5" + author1ID := "bae-42d197b8-d14f-5570-a55d-9e8714b2a82a" + author2ID := "bae-a34d8759-e549-5083-8ba6-e04038c41caa" test := testUtils.TestCase{ Description: "One to one update mutation using relation id from secondary side, with a wrong field.", diff --git a/tests/integration/mutation/update/with_id_test.go b/tests/integration/mutation/update/with_id_test.go index 899711a1ab..342e228052 100644 --- a/tests/integration/mutation/update/with_id_test.go +++ b/tests/integration/mutation/update/with_id_test.go @@ -29,7 +29,7 @@ func TestMutationUpdate_WithId(t *testing.T) { `, }, testUtils.CreateDoc{ - // bae-cc36febf-4029-52b3-a876-c99c6293f588 + // bae-0289c22a-aec7-5b59-adfc-60968698fcdf Doc: `{ "name": "John", "points": 42.1 @@ -43,7 +43,7 @@ func TestMutationUpdate_WithId(t *testing.T) { }, testUtils.Request{ Request: `mutation { - update_Users(docID: "bae-cc36febf-4029-52b3-a876-c99c6293f588", input: {points: 59}) { + update_Users(docID: "bae-0289c22a-aec7-5b59-adfc-60968698fcdf", input: {points: 59}) { name points } @@ -74,7 +74,7 @@ func TestMutationUpdate_WithNonExistantId(t *testing.T) { `, }, testUtils.CreateDoc{ - // bae-cc36febf-4029-52b3-a876-c99c6293f588 + // bae-0289c22a-aec7-5b59-adfc-60968698fcdf Doc: `{ "name": "John", "points": 42.1 diff --git a/tests/integration/mutation/update/with_ids_test.go b/tests/integration/mutation/update/with_ids_test.go index 59f4e7ac73..5b7108948f 100644 --- a/tests/integration/mutation/update/with_ids_test.go +++ b/tests/integration/mutation/update/with_ids_test.go @@ -29,7 +29,7 @@ func TestMutationUpdate_WithIds(t *testing.T) { `, }, testUtils.CreateDoc{ - // bae-cc36febf-4029-52b3-a876-c99c6293f588 + // bae-0289c22a-aec7-5b59-adfc-60968698fcdf Doc: `{ "name": "John", "points": 42.1 @@ -42,7 +42,7 @@ func TestMutationUpdate_WithIds(t *testing.T) { }`, }, testUtils.CreateDoc{ - // bae-3ac659d1-521a-5eba-a833-5c58b151ca72 + // bae-fcc8673d-25f9-5f24-a529-4bc997035278 Doc: `{ "name": "Fred", "points": 33 @@ -51,7 +51,7 @@ func TestMutationUpdate_WithIds(t *testing.T) { testUtils.Request{ Request: `mutation { update_Users( - docIDs: ["bae-cc36febf-4029-52b3-a876-c99c6293f588", "bae-3ac659d1-521a-5eba-a833-5c58b151ca72"], + docIDs: ["bae-0289c22a-aec7-5b59-adfc-60968698fcdf", "bae-fcc8673d-25f9-5f24-a529-4bc997035278"], input: {points: 59} ) { name @@ -60,11 +60,11 @@ func TestMutationUpdate_WithIds(t *testing.T) { }`, Results: []map[string]any{ { - "name": "Fred", + "name": "John", "points": float64(59), }, { - "name": "John", + "name": "Fred", "points": float64(59), }, }, diff --git a/tests/integration/net/one_to_many/peer/with_create_update_test.go b/tests/integration/net/one_to_many/peer/with_create_update_test.go index a7451fc956..e6862754de 100644 --- a/tests/integration/net/one_to_many/peer/with_create_update_test.go +++ b/tests/integration/net/one_to_many/peer/with_create_update_test.go @@ -63,7 +63,7 @@ func TestP2POneToManyPeerWithCreateUpdateLinkingSyncedDocToUnsyncedDoc(t *testin CollectionID: 1, DocID: 0, Doc: `{ - "Author_id": "bae-cf278a29-5680-565d-9c7f-4c46d3700cf0" + "Author_id": "bae-6a4c24c0-7b0b-5f51-a274-132d7ca90499" }`, }, testUtils.WaitForSync{}, @@ -81,7 +81,7 @@ func TestP2POneToManyPeerWithCreateUpdateLinkingSyncedDocToUnsyncedDoc(t *testin Results: []map[string]any{ { "Name": "Gulistan", - "Author_id": "bae-cf278a29-5680-565d-9c7f-4c46d3700cf0", + "Author_id": "bae-6a4c24c0-7b0b-5f51-a274-132d7ca90499", "Author": map[string]any{ "Name": "Saadi", }, @@ -102,7 +102,7 @@ func TestP2POneToManyPeerWithCreateUpdateLinkingSyncedDocToUnsyncedDoc(t *testin Results: []map[string]any{ { "Name": "Gulistan", - "Author_id": "bae-cf278a29-5680-565d-9c7f-4c46d3700cf0", + "Author_id": "bae-6a4c24c0-7b0b-5f51-a274-132d7ca90499", // "Saadi" was not synced to node 1, the update did not // result in an error and synced to relational id even though "Saadi" // does not exist in this node. diff --git a/tests/integration/net/one_to_many/replicator/with_create_test.go b/tests/integration/net/one_to_many/replicator/with_create_test.go index 10e80a9a02..7d2d706805 100644 --- a/tests/integration/net/one_to_many/replicator/with_create_test.go +++ b/tests/integration/net/one_to_many/replicator/with_create_test.go @@ -54,7 +54,7 @@ func TestP2POneToManyReplicator(t *testing.T) { CollectionID: 1, Doc: `{ "Name": "Gulistan", - "Author_id": "bae-cf278a29-5680-565d-9c7f-4c46d3700cf0" + "Author_id": "bae-6a4c24c0-7b0b-5f51-a274-132d7ca90499" }`, }, testUtils.WaitForSync{}, diff --git a/tests/integration/net/simple/peer/with_create_test.go b/tests/integration/net/simple/peer/with_create_test.go index a6c095024c..e89030690d 100644 --- a/tests/integration/net/simple/peer/with_create_test.go +++ b/tests/integration/net/simple/peer/with_create_test.go @@ -59,10 +59,10 @@ func TestP2PCreateDoesNotSync(t *testing.T) { }`, Results: []map[string]any{ { - "Age": int64(21), + "Age": int64(300), }, { - "Age": int64(300), + "Age": int64(21), }, }, }, @@ -147,13 +147,13 @@ func TestP2PCreateWithP2PCollection(t *testing.T) { }`, Results: []map[string]any{ { - "Age": int64(21), + "Age": int64(28), }, { "Age": int64(30), }, { - "Age": int64(28), + "Age": int64(21), }, // Peer sync should not sync new documents to nodes that is not subscribed // to the P2P collection. @@ -168,16 +168,16 @@ func TestP2PCreateWithP2PCollection(t *testing.T) { }`, Results: []map[string]any{ { - "Age": int64(21), + "Age": int64(28), }, { - "Age": int64(31), + "Age": int64(30), }, { - "Age": int64(30), + "Age": int64(21), }, { - "Age": int64(28), + "Age": int64(31), }, }, }, diff --git a/tests/integration/net/simple/peer/with_delete_test.go b/tests/integration/net/simple/peer/with_delete_test.go index 49a0b98c41..b0b5fe3ded 100644 --- a/tests/integration/net/simple/peer/with_delete_test.go +++ b/tests/integration/net/simple/peer/with_delete_test.go @@ -123,16 +123,16 @@ func TestP2PWithMultipleDocumentsSingleDeleteWithShowDeleted(t *testing.T) { } }`, Results: []map[string]any{ - { - "_deleted": false, - "Name": "Andy", - "Age": int64(74), - }, { "_deleted": true, "Name": "John", "Age": int64(43), }, + { + "_deleted": false, + "Name": "Andy", + "Age": int64(74), + }, }, }, }, @@ -195,16 +195,16 @@ func TestP2PWithMultipleDocumentsWithSingleUpdateBeforeConnectSingleDeleteWithSh } }`, Results: []map[string]any{ - { - "_deleted": false, - "Name": "Andy", - "Age": int64(74), - }, { "_deleted": true, "Name": "John", "Age": int64(60), }, + { + "_deleted": false, + "Name": "Andy", + "Age": int64(74), + }, }, }, }, @@ -276,16 +276,16 @@ func TestP2PWithMultipleDocumentsWithMultipleUpdatesBeforeConnectSingleDeleteWit } }`, Results: []map[string]any{ - { - "_deleted": false, - "Name": "Andy", - "Age": int64(74), - }, { "_deleted": true, "Name": "John", "Age": int64(62), }, + { + "_deleted": false, + "Name": "Andy", + "Age": int64(74), + }, }, }, }, @@ -367,16 +367,16 @@ func TestP2PWithMultipleDocumentsWithUpdateAndDeleteBeforeConnectSingleDeleteWit } }`, Results: []map[string]any{ - { - "_deleted": false, - "Name": "Andy", - "Age": int64(74), - }, { "_deleted": true, "Name": "John", "Age": int64(62), }, + { + "_deleted": false, + "Name": "Andy", + "Age": int64(74), + }, }, }, // The target node currently won't receive the pre-connection updates from the source. @@ -393,13 +393,13 @@ func TestP2PWithMultipleDocumentsWithUpdateAndDeleteBeforeConnectSingleDeleteWit Results: []map[string]any{ { "_deleted": false, - "Name": "Andy", - "Age": int64(74), + "Name": "John", + "Age": int64(66), }, { "_deleted": false, - "Name": "John", - "Age": int64(66), + "Name": "Andy", + "Age": int64(74), }, }, }, diff --git a/tests/integration/net/simple/peer_replicator/with_create_test.go b/tests/integration/net/simple/peer_replicator/with_create_test.go index c7b1bf0e8e..3b1f299cfa 100644 --- a/tests/integration/net/simple/peer_replicator/with_create_test.go +++ b/tests/integration/net/simple/peer_replicator/with_create_test.go @@ -63,10 +63,10 @@ func TestP2PPeerReplicatorWithCreate(t *testing.T) { }`, Results: []map[string]any{ { - "Age": int64(21), + "Age": int64(3000), }, { - "Age": int64(3000), + "Age": int64(21), }, }, }, @@ -92,10 +92,10 @@ func TestP2PPeerReplicatorWithCreate(t *testing.T) { }`, Results: []map[string]any{ { - "Age": int64(21), + "Age": int64(3000), }, { - "Age": int64(3000), + "Age": int64(21), }, }, }, diff --git a/tests/integration/net/simple/replicator/with_create_test.go b/tests/integration/net/simple/replicator/with_create_test.go index 0d3dbad143..5785337b98 100644 --- a/tests/integration/net/simple/replicator/with_create_test.go +++ b/tests/integration/net/simple/replicator/with_create_test.go @@ -361,10 +361,10 @@ func TestP2POneToOneReplicatorManyDocs(t *testing.T) { }`, Results: []map[string]any{ { - "Age": int64(21), + "Age": int64(22), }, { - "Age": int64(22), + "Age": int64(21), }, }, }, @@ -421,10 +421,10 @@ func TestP2POneToManyReplicatorManyDocs(t *testing.T) { }`, Results: []map[string]any{ { - "Age": int64(21), + "Age": int64(22), }, { - "Age": int64(22), + "Age": int64(21), }, }, }, @@ -487,7 +487,7 @@ func TestP2POneToOneReplicatorOrderIndependent(t *testing.T) { }`, Results: []map[string]any{ { - "_docID": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", + "_docID": "bae-0b2f15e5-bfe7-5cb7-8045-471318d7dbc3", "age": int64(21), "name": "John", "_version": []map[string]any{ @@ -549,7 +549,7 @@ func TestP2POneToOneReplicatorOrderIndependentDirectCreate(t *testing.T) { }`, Results: []map[string]any{ { - "_docID": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", + "_docID": "bae-0b2f15e5-bfe7-5cb7-8045-471318d7dbc3", "_version": []map[string]any{ { "schemaVersionId": "bafkreihhd6bqrjhl5zidwztgxzeseveplv3cj3fwtn3unjkdx7j2vr2vrq", diff --git a/tests/integration/query/commits/simple_test.go b/tests/integration/query/commits/simple_test.go index 4b2d037d61..bcd7ccc1c0 100644 --- a/tests/integration/query/commits/simple_test.go +++ b/tests/integration/query/commits/simple_test.go @@ -36,13 +36,13 @@ func TestQueryCommits(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", }, { - "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", }, { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", }, }, }, @@ -79,22 +79,22 @@ func TestQueryCommitsMultipleDocs(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreidoznu3dbvgngnfk3xjjgpmrpjs3wrpbc7ntl3rbacspeee5kp53a", + "cid": "bafyreiazgtllwk7znzuapv3fsukzhpekqqjjvgv4fzypkfp7mljfabie3q", }, { - "cid": "bafyreia5q5oya6vnv2kvffzfnl23762zvtqxmatjd5s3ldpwxfdo3aey6i", + "cid": "bafyreicbr2jo7y4d6773q66kxvzq4k3jss2rw5ysr3co2mjdhcdyiz7buq", }, { - "cid": "bafyreibr7mv7b4kg4zym7fow6ljhl6kdzfgcvfi5q26ogz5gsev75ewxcq", + "cid": "bafyreihmvuytwy5ofcm5bqyazxwnquksutxvybznavmw23vddb7nooh6pq", }, { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", }, { - "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", }, { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", }, }, }, @@ -125,15 +125,15 @@ func TestQueryCommitsWithSchemaVersionIDField(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", "schemaVersionId": "bafkreicprhqxzlw3akyssz2v6pifwfueavp7jq2yj3dghapi3qcq6achs4", }, { - "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", "schemaVersionId": "bafkreicprhqxzlw3akyssz2v6pifwfueavp7jq2yj3dghapi3qcq6achs4", }, { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", "schemaVersionId": "bafkreicprhqxzlw3akyssz2v6pifwfueavp7jq2yj3dghapi3qcq6achs4", }, }, @@ -349,74 +349,74 @@ func TestQuery_CommitsWithAllFieldsWithUpdate_NoError(t *testing.T) { `, Results: []map[string]any{ { - "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", + "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", "collectionID": int64(1), "delta": testUtils.CBORValue(22), - "docID": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", + "docID": "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", "fieldId": "1", "fieldName": "age", "height": int64(2), "links": []map[string]any{ { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", "name": "_head", }, }, }, { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", "collectionID": int64(1), "delta": testUtils.CBORValue(21), - "docID": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", + "docID": "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", "fieldId": "1", "fieldName": "age", "height": int64(1), "links": []map[string]any{}, }, { - "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", "collectionID": int64(1), "delta": testUtils.CBORValue("John"), - "docID": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", + "docID": "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", "fieldId": "2", "fieldName": "name", "height": int64(1), "links": []map[string]any{}, }, { - "cid": "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4", + "cid": "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba", "collectionID": int64(1), "delta": nil, - "docID": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", + "docID": "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", "fieldId": "C", "fieldName": nil, "height": int64(2), "links": []map[string]any{ { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", "name": "_head", }, { - "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", + "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", "name": "age", }, }, }, { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", "collectionID": int64(1), "delta": nil, - "docID": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", + "docID": "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", "fieldId": "C", "fieldName": nil, "height": int64(1), "links": []map[string]any{ { - "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", "name": "name", }, { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", "name": "age", }, }, diff --git a/tests/integration/query/commits/with_cid_test.go b/tests/integration/query/commits/with_cid_test.go index 0c502ea7e2..6a85691e74 100644 --- a/tests/integration/query/commits/with_cid_test.go +++ b/tests/integration/query/commits/with_cid_test.go @@ -38,14 +38,14 @@ func TestQueryCommitsWithCid(t *testing.T) { testUtils.Request{ Request: `query { commits( - cid: "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq" + cid: "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq" ) { cid } }`, Results: []map[string]any{ { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", }, }, }, @@ -71,14 +71,14 @@ func TestQueryCommitsWithCidForFieldCommit(t *testing.T) { testUtils.Request{ Request: `query { commits( - cid: "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq" + cid: "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq" ) { cid } }`, Results: []map[string]any{ { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", }, }, }, diff --git a/tests/integration/query/commits/with_collectionid_prop_test.go b/tests/integration/query/commits/with_collectionid_prop_test.go index 6de4a71e2e..91d4d60373 100644 --- a/tests/integration/query/commits/with_collectionid_prop_test.go +++ b/tests/integration/query/commits/with_collectionid_prop_test.go @@ -43,19 +43,19 @@ func TestQueryCommitsWithCollectionID(t *testing.T) { }`, Results: []map[string]any{ { - "collectionID": int64(2), + "collectionID": int64(1), }, { - "collectionID": int64(2), + "collectionID": int64(1), }, { "collectionID": int64(1), }, { - "collectionID": int64(1), + "collectionID": int64(2), }, { - "collectionID": int64(1), + "collectionID": int64(2), }, }, }, diff --git a/tests/integration/query/commits/with_depth_test.go b/tests/integration/query/commits/with_depth_test.go index a950e72b85..7660a04585 100644 --- a/tests/integration/query/commits/with_depth_test.go +++ b/tests/integration/query/commits/with_depth_test.go @@ -36,13 +36,13 @@ func TestQueryCommitsWithDepth1(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", }, { - "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", }, { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", }, }, }, @@ -81,16 +81,16 @@ func TestQueryCommitsWithDepth1WithUpdate(t *testing.T) { Results: []map[string]any{ { // "Age" field head - "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", + "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", "height": int64(2), }, { // "Name" field head (unchanged from create) - "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", "height": int64(1), }, { - "cid": "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4", + "cid": "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba", "height": int64(2), }, }, @@ -137,27 +137,27 @@ func TestQueryCommitsWithDepth2WithUpdate(t *testing.T) { Results: []map[string]any{ { // Composite head - "cid": "bafyreial53rqep7uoheucc3rzvhs6dbhydnkbqe4w2bhd3hsybub4u3h6m", + "cid": "bafyreicoci4ah2uft5giiyl2lfg4jgcegwbvt3mbllnqnmfh3oy24usxsy", "height": int64(3), }, { // Composite head -1 - "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", + "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", "height": int64(2), }, { // "Name" field head (unchanged from create) - "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", "height": int64(1), }, { // "Age" field head - "cid": "bafyreifyknutg2lsajcsqrfegr65t5h5s743jkp3bfuzphx4nmqztfwmga", + "cid": "bafyreichbcwfiwampbb2w2stlrk5yryu5ao4ubz2utybv5fc6qioj3qhu4", "height": int64(3), }, { // "Age" field head -1 - "cid": "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4", + "cid": "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba", "height": int64(2), }, }, @@ -195,22 +195,22 @@ func TestQueryCommitsWithDepth1AndMultipleDocs(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreicab3zaizu5bwyfn25hy5zf6hsdx6un5kyixiktxkgwtyobdlr52e", + "cid": "bafyreibqku2yhbclue774qtkh2gmn3oeaw7gan7ervjyqwqsxfr2c7ymwu", }, { - "cid": "bafyreig3kdxtlbaohkcxx5bysmyrjvdggoeq47x6cyrbyfevi2hbgkq4sa", + "cid": "bafyreicrku377qakqjzzlh4phornkj36otic6oc27ano6mril5hu5fuulu", }, { - "cid": "bafyreifq22zqplhdxr2rvuanegqw6ogaur46f5ud3upp7p2dw7tt6vozpa", + "cid": "bafyreibqvl2lwj3icgbg7rconymk5pmnpzr6htvd37qw4fmawe7bxvpuse", }, { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", }, { - "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", }, { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", }, }, }, diff --git a/tests/integration/query/commits/with_doc_id_cid_test.go b/tests/integration/query/commits/with_doc_id_cid_test.go index 9e1972e391..493f07b477 100644 --- a/tests/integration/query/commits/with_doc_id_cid_test.go +++ b/tests/integration/query/commits/with_doc_id_cid_test.go @@ -103,15 +103,15 @@ func TestQueryCommitsWithDocIDAndCidWithUpdate(t *testing.T) { testUtils.Request{ Request: ` { commits( - docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", - cid: "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4" + docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", + cid: "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba" ) { cid } }`, Results: []map[string]any{ { - "cid": "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4", + "cid": "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba", }, }, }, diff --git a/tests/integration/query/commits/with_doc_id_count_test.go b/tests/integration/query/commits/with_doc_id_count_test.go index ef4dc103bf..11034ac09f 100644 --- a/tests/integration/query/commits/with_doc_id_count_test.go +++ b/tests/integration/query/commits/with_doc_id_count_test.go @@ -30,22 +30,22 @@ func TestQueryCommitsWithDocIDAndLinkCount(t *testing.T) { }, testUtils.Request{ Request: `query { - commits(docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7") { + commits(docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3") { cid _count(field: links) } }`, Results: []map[string]any{ { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", "_count": 0, }, { - "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", "_count": 0, }, { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", "_count": 2, }, }, diff --git a/tests/integration/query/commits/with_doc_id_field_test.go b/tests/integration/query/commits/with_doc_id_field_test.go index 4d0e838be7..702f4cc93b 100644 --- a/tests/integration/query/commits/with_doc_id_field_test.go +++ b/tests/integration/query/commits/with_doc_id_field_test.go @@ -30,7 +30,7 @@ func TestQueryCommitsWithDocIDAndUnknownField(t *testing.T) { }, testUtils.Request{ Request: `query { - commits(docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", fieldId: "not a field") { + commits(docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", fieldId: "not a field") { cid } }`, @@ -56,7 +56,7 @@ func TestQueryCommitsWithDocIDAndUnknownFieldId(t *testing.T) { }, testUtils.Request{ Request: `query { - commits(docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", fieldId: "999999") { + commits(docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", fieldId: "999999") { cid } }`, @@ -84,7 +84,7 @@ func TestQueryCommitsWithDocIDAndField(t *testing.T) { }, testUtils.Request{ Request: `query { - commits(docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", fieldId: "Age") { + commits(docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", fieldId: "Age") { cid } }`, @@ -112,13 +112,13 @@ func TestQueryCommitsWithDocIDAndFieldId(t *testing.T) { }, testUtils.Request{ Request: `query { - commits(docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", fieldId: "1") { + commits(docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", fieldId: "1") { cid } }`, Results: []map[string]any{ { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", }, }, }, @@ -144,13 +144,13 @@ func TestQueryCommitsWithDocIDAndCompositeFieldId(t *testing.T) { }, testUtils.Request{ Request: `query { - commits(docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", fieldId: "C") { + commits(docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", fieldId: "C") { cid } }`, Results: []map[string]any{ { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", }, }, }, diff --git a/tests/integration/query/commits/with_doc_id_group_order_test.go b/tests/integration/query/commits/with_doc_id_group_order_test.go index d858bcf819..5443b4e427 100644 --- a/tests/integration/query/commits/with_doc_id_group_order_test.go +++ b/tests/integration/query/commits/with_doc_id_group_order_test.go @@ -43,10 +43,10 @@ func TestQueryCommitsOrderedAndGroupedByDocID(t *testing.T) { }`, Results: []map[string]any{ { - "docID": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", + "docID": "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", }, { - "docID": "bae-72f3dc53-1846-55d5-915c-28c4e83cc891", + "docID": "bae-a839588e-e2e5-5ede-bb91-ffe6871645cb", }, }, }, diff --git a/tests/integration/query/commits/with_doc_id_limit_offset_test.go b/tests/integration/query/commits/with_doc_id_limit_offset_test.go index fc2a51da20..42779293c0 100644 --- a/tests/integration/query/commits/with_doc_id_limit_offset_test.go +++ b/tests/integration/query/commits/with_doc_id_limit_offset_test.go @@ -51,16 +51,16 @@ func TestQueryCommitsWithDocIDAndLimitAndOffset(t *testing.T) { }, testUtils.Request{ Request: ` { - commits(docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", limit: 2, offset: 1) { + commits(docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", limit: 2, offset: 1) { cid } }`, Results: []map[string]any{ { - "cid": "bafyreial53rqep7uoheucc3rzvhs6dbhydnkbqe4w2bhd3hsybub4u3h6m", + "cid": "bafyreicoci4ah2uft5giiyl2lfg4jgcegwbvt3mbllnqnmfh3oy24usxsy", }, { - "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", + "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", }, }, }, diff --git a/tests/integration/query/commits/with_doc_id_limit_test.go b/tests/integration/query/commits/with_doc_id_limit_test.go index 78bdc100c4..6be85f5339 100644 --- a/tests/integration/query/commits/with_doc_id_limit_test.go +++ b/tests/integration/query/commits/with_doc_id_limit_test.go @@ -44,16 +44,16 @@ func TestQueryCommitsWithDocIDAndLimit(t *testing.T) { }, testUtils.Request{ Request: ` { - commits(docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", limit: 2) { + commits(docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", limit: 2) { cid } }`, Results: []map[string]any{ { - "cid": "bafyreial53rqep7uoheucc3rzvhs6dbhydnkbqe4w2bhd3hsybub4u3h6m", + "cid": "bafyreicoci4ah2uft5giiyl2lfg4jgcegwbvt3mbllnqnmfh3oy24usxsy", }, { - "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", + "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", }, }, }, diff --git a/tests/integration/query/commits/with_doc_id_order_limit_offset_test.go b/tests/integration/query/commits/with_doc_id_order_limit_offset_test.go index cc2663ba1b..c1685e7ad4 100644 --- a/tests/integration/query/commits/with_doc_id_order_limit_offset_test.go +++ b/tests/integration/query/commits/with_doc_id_order_limit_offset_test.go @@ -51,18 +51,18 @@ func TestQueryCommitsWithDocIDAndOrderAndLimitAndOffset(t *testing.T) { }, testUtils.Request{ Request: `query { - commits(docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", order: {height: ASC}, limit: 2, offset: 4) { + commits(docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", order: {height: ASC}, limit: 2, offset: 4) { cid height } }`, Results: []map[string]any{ { - "cid": "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4", + "cid": "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba", "height": int64(2), }, { - "cid": "bafyreial53rqep7uoheucc3rzvhs6dbhydnkbqe4w2bhd3hsybub4u3h6m", + "cid": "bafyreicoci4ah2uft5giiyl2lfg4jgcegwbvt3mbllnqnmfh3oy24usxsy", "height": int64(3), }, }, diff --git a/tests/integration/query/commits/with_doc_id_order_test.go b/tests/integration/query/commits/with_doc_id_order_test.go index 5de3f114b6..f7fb045a1f 100644 --- a/tests/integration/query/commits/with_doc_id_order_test.go +++ b/tests/integration/query/commits/with_doc_id_order_test.go @@ -37,30 +37,30 @@ func TestQueryCommitsWithDocIDAndOrderHeightDesc(t *testing.T) { }, testUtils.Request{ Request: `query { - commits(docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", order: {height: DESC}) { + commits(docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", order: {height: DESC}) { cid height } }`, Results: []map[string]any{ { - "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", + "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", "height": int64(2), }, { - "cid": "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4", + "cid": "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba", "height": int64(2), }, { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", "height": int64(1), }, { - "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", "height": int64(1), }, { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", "height": int64(1), }, }, @@ -92,30 +92,30 @@ func TestQueryCommitsWithDocIDAndOrderHeightAsc(t *testing.T) { }, testUtils.Request{ Request: `query { - commits(docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", order: {height: ASC}) { + commits(docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", order: {height: ASC}) { cid height } }`, Results: []map[string]any{ { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", "height": int64(1), }, { - "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", "height": int64(1), }, { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", "height": int64(1), }, { - "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", + "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", "height": int64(2), }, { - "cid": "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4", + "cid": "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba", "height": int64(2), }, }, @@ -147,31 +147,31 @@ func TestQueryCommitsWithDocIDAndOrderCidDesc(t *testing.T) { }, testUtils.Request{ Request: `query { - commits(docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", order: {cid: DESC}) { + commits(docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", order: {cid: DESC}) { cid height } }`, Results: []map[string]any{ { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", - "height": int64(1), + "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", + "height": int64(2), }, { - "cid": "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4", + "cid": "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba", "height": int64(2), }, { - "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", "height": int64(1), }, { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", "height": int64(1), }, { - "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", - "height": int64(2), + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "height": int64(1), }, }, }, @@ -202,31 +202,31 @@ func TestQueryCommitsWithDocIDAndOrderCidAsc(t *testing.T) { }, testUtils.Request{ Request: `query { - commits(docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", order: {cid: ASC}) { + commits(docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", order: {cid: ASC}) { cid height } }`, Results: []map[string]any{ { - "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", - "height": int64(2), + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "height": int64(1), }, { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", "height": int64(1), }, { - "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", "height": int64(1), }, { - "cid": "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4", + "cid": "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba", "height": int64(2), }, { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", - "height": int64(1), + "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", + "height": int64(2), }, }, }, @@ -271,46 +271,46 @@ func TestQueryCommitsWithDocIDAndOrderAndMultiUpdatesCidAsc(t *testing.T) { }, testUtils.Request{ Request: `query { - commits(docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", order: {height: ASC}) { + commits(docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", order: {height: ASC}) { cid height } }`, Results: []map[string]any{ { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", "height": int64(1), }, { - "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", "height": int64(1), }, { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", "height": int64(1), }, { - "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", + "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", "height": int64(2), }, { - "cid": "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4", + "cid": "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba", "height": int64(2), }, { - "cid": "bafyreial53rqep7uoheucc3rzvhs6dbhydnkbqe4w2bhd3hsybub4u3h6m", + "cid": "bafyreicoci4ah2uft5giiyl2lfg4jgcegwbvt3mbllnqnmfh3oy24usxsy", "height": int64(3), }, { - "cid": "bafyreifyknutg2lsajcsqrfegr65t5h5s743jkp3bfuzphx4nmqztfwmga", + "cid": "bafyreichbcwfiwampbb2w2stlrk5yryu5ao4ubz2utybv5fc6qioj3qhu4", "height": int64(3), }, { - "cid": "bafyreiarqxac3gnfrfj3j6fukof375kp56i6jcvgmpfytenczsilx4xkey", + "cid": "bafyreiefwtmw7gtwjmvhapfpq2gmi6j772a6zx5uyyrys6ft4za4oljwfm", "height": int64(4), }, { - "cid": "bafyreihazmfdrwrk3udgnnkuxipcg7cjmzndfazif3t2vxyrq36qwfkrt4", + "cid": "bafyreidde6teqyfjruflxo3yy25rayu7yrxg54siqvloxzqt7o32g2wicy", "height": int64(4), }, }, diff --git a/tests/integration/query/commits/with_doc_id_prop_test.go b/tests/integration/query/commits/with_doc_id_prop_test.go index 6404114217..1d370ab27a 100644 --- a/tests/integration/query/commits/with_doc_id_prop_test.go +++ b/tests/integration/query/commits/with_doc_id_prop_test.go @@ -36,13 +36,13 @@ func TestQueryCommitsWithDocIDProperty(t *testing.T) { }`, Results: []map[string]any{ { - "docID": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", + "docID": "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", }, { - "docID": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", + "docID": "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", }, { - "docID": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", + "docID": "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", }, }, }, diff --git a/tests/integration/query/commits/with_doc_id_test.go b/tests/integration/query/commits/with_doc_id_test.go index 800dad5bfd..e082e6e790 100644 --- a/tests/integration/query/commits/with_doc_id_test.go +++ b/tests/integration/query/commits/with_doc_id_test.go @@ -56,19 +56,19 @@ func TestQueryCommitsWithDocID(t *testing.T) { }, testUtils.Request{ Request: `query { - commits(docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7") { + commits(docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3") { cid } }`, Results: []map[string]any{ { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", }, { - "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", }, { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", }, }, }, @@ -92,7 +92,7 @@ func TestQueryCommitsWithDocIDAndLinks(t *testing.T) { }, testUtils.Request{ Request: `query { - commits(docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7") { + commits(docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3") { cid links { cid @@ -102,22 +102,22 @@ func TestQueryCommitsWithDocIDAndLinks(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", "links": []map[string]any{}, }, { - "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", "links": []map[string]any{}, }, { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", "links": []map[string]any{ { - "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", "name": "name", }, { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", "name": "age", }, }, @@ -151,30 +151,30 @@ func TestQueryCommitsWithDocIDAndUpdate(t *testing.T) { }, testUtils.Request{ Request: `query { - commits(docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7") { + commits(docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3") { cid height } }`, Results: []map[string]any{ { - "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", + "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", "height": int64(2), }, { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", "height": int64(1), }, { - "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", "height": int64(1), }, { - "cid": "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4", + "cid": "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba", "height": int64(2), }, { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", "height": int64(1), }, }, @@ -209,7 +209,7 @@ func TestQueryCommitsWithDocIDAndUpdateAndLinks(t *testing.T) { }, testUtils.Request{ Request: `query { - commits(docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7") { + commits(docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3") { cid links { cid @@ -219,44 +219,44 @@ func TestQueryCommitsWithDocIDAndUpdateAndLinks(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", + "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", "links": []map[string]any{ { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", "name": "_head", }, }, }, { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", "links": []map[string]any{}, }, { - "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", "links": []map[string]any{}, }, { - "cid": "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4", + "cid": "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba", "links": []map[string]any{ { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", "name": "_head", }, { - "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", + "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", "name": "age", }, }, }, { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", "links": []map[string]any{ { - "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", "name": "name", }, { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", "name": "age", }, }, diff --git a/tests/integration/query/commits/with_doc_id_typename_test.go b/tests/integration/query/commits/with_doc_id_typename_test.go index d15ebba47e..77a58d2fdc 100644 --- a/tests/integration/query/commits/with_doc_id_typename_test.go +++ b/tests/integration/query/commits/with_doc_id_typename_test.go @@ -30,22 +30,22 @@ func TestQueryCommitsWithDocIDWithTypeName(t *testing.T) { }, testUtils.Request{ Request: `query { - commits(docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7") { + commits(docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3") { cid __typename } }`, Results: []map[string]any{ { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", "__typename": "Commit", }, { - "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", "__typename": "Commit", }, { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", "__typename": "Commit", }, }, diff --git a/tests/integration/query/commits/with_field_test.go b/tests/integration/query/commits/with_field_test.go index 6d4922d9b8..15cc2a4cd9 100644 --- a/tests/integration/query/commits/with_field_test.go +++ b/tests/integration/query/commits/with_field_test.go @@ -66,7 +66,7 @@ func TestQueryCommitsWithFieldId(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", }, }, }, @@ -98,7 +98,7 @@ func TestQueryCommitsWithCompositeFieldId(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", }, }, }, @@ -131,7 +131,7 @@ func TestQueryCommitsWithCompositeFieldIdWithReturnedSchemaVersionID(t *testing. }`, Results: []map[string]any{ { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", "schemaVersionId": "bafkreicprhqxzlw3akyssz2v6pifwfueavp7jq2yj3dghapi3qcq6achs4", }, }, diff --git a/tests/integration/query/commits/with_group_test.go b/tests/integration/query/commits/with_group_test.go index f69476a648..fb958ad077 100644 --- a/tests/integration/query/commits/with_group_test.go +++ b/tests/integration/query/commits/with_group_test.go @@ -89,10 +89,10 @@ func TestQueryCommitsWithGroupByHeightWithChild(t *testing.T) { "height": int64(2), "_group": []map[string]any{ { - "cid": "bafyreictnkwvit6jp4mwhai3xp75nvtacxpq6zgbbjm55ylae3t6qshrze", + "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", }, { - "cid": "bafyreif3pvxatyqbmwllb7mcxvs734fgfmdkavbu6ambhay37w6vxjzkx4", + "cid": "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba", }, }, }, @@ -100,13 +100,13 @@ func TestQueryCommitsWithGroupByHeightWithChild(t *testing.T) { "height": int64(1), "_group": []map[string]any{ { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", }, { - "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", }, { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", }, }, }, @@ -142,7 +142,7 @@ func TestQueryCommitsWithGroupByCidWithChild(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", "_group": []map[string]any{ { "height": int64(1), @@ -150,7 +150,7 @@ func TestQueryCommitsWithGroupByCidWithChild(t *testing.T) { }, }, { - "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", "_group": []map[string]any{ { "height": int64(1), @@ -158,7 +158,7 @@ func TestQueryCommitsWithGroupByCidWithChild(t *testing.T) { }, }, { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", "_group": []map[string]any{ { "height": int64(1), @@ -214,10 +214,10 @@ func TestQueryCommitsWithGroupByDocID(t *testing.T) { }`, Results: []map[string]any{ { - "docID": "bae-72f3dc53-1846-55d5-915c-28c4e83cc891", + "docID": "bae-a839588e-e2e5-5ede-bb91-ffe6871645cb", }, { - "docID": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", + "docID": "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", }, }, }, diff --git a/tests/integration/query/inline_array/with_group_test.go b/tests/integration/query/inline_array/with_group_test.go index a21a34d214..8ab6eb356e 100644 --- a/tests/integration/query/inline_array/with_group_test.go +++ b/tests/integration/query/inline_array/with_group_test.go @@ -86,22 +86,22 @@ func TestQueryInlineArrayWithGroupByArray(t *testing.T) { }, Results: []map[string]any{ { - "favouriteIntegers": []int64{-1, 2, -1, 1, 0}, + "favouriteIntegers": []int64{1, 2, 3}, "_group": []map[string]any{ { - "name": "Andy", - }, - { - - "name": "Shahzad", + "name": "John", }, }, }, { - "favouriteIntegers": []int64{1, 2, 3}, + "favouriteIntegers": []int64{-1, 2, -1, 1, 0}, "_group": []map[string]any{ { - "name": "John", + "name": "Andy", + }, + { + + "name": "Shahzad", }, }, }, diff --git a/tests/integration/query/latest_commits/with_collectionid_prop_test.go b/tests/integration/query/latest_commits/with_collectionid_prop_test.go index 78ffab9b3c..5509671aef 100644 --- a/tests/integration/query/latest_commits/with_collectionid_prop_test.go +++ b/tests/integration/query/latest_commits/with_collectionid_prop_test.go @@ -37,7 +37,7 @@ func TestQueryLastCommitsWithCollectionIdProperty(t *testing.T) { }, testUtils.Request{ Request: `query { - latestCommits(docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7") { + latestCommits(docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3") { collectionID } }`, @@ -49,7 +49,7 @@ func TestQueryLastCommitsWithCollectionIdProperty(t *testing.T) { }, testUtils.Request{ Request: `query { - latestCommits(docID: "bae-de8c99bf-ee0e-5655-8a72-919c2d459a30") { + latestCommits(docID: "bae-f824cbf5-cc66-5e44-a84f-e71f72ff9841") { collectionID } }`, diff --git a/tests/integration/query/latest_commits/with_doc_id_field_test.go b/tests/integration/query/latest_commits/with_doc_id_field_test.go index 624f318c27..300aabbc46 100644 --- a/tests/integration/query/latest_commits/with_doc_id_field_test.go +++ b/tests/integration/query/latest_commits/with_doc_id_field_test.go @@ -22,7 +22,7 @@ func TestQueryLatestCommitsWithDocIDAndFieldName(t *testing.T) { test := testUtils.RequestTestCase{ Description: "Simple latest commits query with docID and field name", Request: `query { - latestCommits(docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", fieldId: "age") { + latestCommits(docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", fieldId: "age") { cid links { cid @@ -50,7 +50,7 @@ func TestQueryLatestCommitsWithDocIDAndFieldId(t *testing.T) { test := testUtils.RequestTestCase{ Description: "Simple latest commits query with docID and field id", Request: `query { - latestCommits(docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", fieldId: "1") { + latestCommits(docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", fieldId: "1") { cid links { cid @@ -68,7 +68,7 @@ func TestQueryLatestCommitsWithDocIDAndFieldId(t *testing.T) { }, Results: []map[string]any{ { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", "links": []map[string]any{}, }, }, @@ -83,7 +83,7 @@ func TestQueryLatestCommitsWithDocIDAndCompositeFieldId(t *testing.T) { test := testUtils.RequestTestCase{ Description: "Simple latest commits query with docID and composite field id", Request: `query { - latestCommits(docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", fieldId: "C") { + latestCommits(docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", fieldId: "C") { cid links { cid @@ -101,14 +101,14 @@ func TestQueryLatestCommitsWithDocIDAndCompositeFieldId(t *testing.T) { }, Results: []map[string]any{ { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", "links": []map[string]any{ { - "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", "name": "name", }, { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", "name": "age", }, }, diff --git a/tests/integration/query/latest_commits/with_doc_id_prop_test.go b/tests/integration/query/latest_commits/with_doc_id_prop_test.go index 247d536532..5cef0f2d06 100644 --- a/tests/integration/query/latest_commits/with_doc_id_prop_test.go +++ b/tests/integration/query/latest_commits/with_doc_id_prop_test.go @@ -30,13 +30,13 @@ func TestQueryLastCommitsWithDocIDProperty(t *testing.T) { }, testUtils.Request{ Request: `query { - latestCommits(docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7") { + latestCommits(docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3") { docID } }`, Results: []map[string]any{ { - "docID": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", + "docID": "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", }, }, }, diff --git a/tests/integration/query/latest_commits/with_doc_id_test.go b/tests/integration/query/latest_commits/with_doc_id_test.go index 290dea175d..1eea07f6de 100644 --- a/tests/integration/query/latest_commits/with_doc_id_test.go +++ b/tests/integration/query/latest_commits/with_doc_id_test.go @@ -20,7 +20,7 @@ func TestQueryLatestCommitsWithDocID(t *testing.T) { test := testUtils.RequestTestCase{ Description: "Simple latest commits query with docID", Request: `query { - latestCommits(docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7") { + latestCommits(docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3") { cid links { cid @@ -38,14 +38,14 @@ func TestQueryLatestCommitsWithDocID(t *testing.T) { }, Results: []map[string]any{ { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", "links": []map[string]any{ { - "cid": "bafyreidcls23tu7qwp4siw3avyb42eukovpxg6dqifqruvy5wyc6b2ovvq", + "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", "name": "name", }, { - "cid": "bafyreih5awhipv4pk7truqm3pyyhle7xersbiyzyyacud6c3f7urzutpui", + "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", "name": "age", }, }, @@ -60,7 +60,7 @@ func TestQueryLatestCommitsWithDocIDWithSchemaVersionIDField(t *testing.T) { test := testUtils.RequestTestCase{ Description: "Simple latest commits query with docID and schema versiion id field", Request: `query { - latestCommits(docID: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7") { + latestCommits(docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3") { cid schemaVersionId } @@ -75,7 +75,7 @@ func TestQueryLatestCommitsWithDocIDWithSchemaVersionIDField(t *testing.T) { }, Results: []map[string]any{ { - "cid": "bafyreid7n6a673spwjwl3ogtuqmrba4i4ntjqvsu4l3spqe6qutdtnqwlq", + "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", "schemaVersionId": "bafkreicprhqxzlw3akyssz2v6pifwfueavp7jq2yj3dghapi3qcq6achs4", }, }, diff --git a/tests/integration/query/one_to_many/simple_test.go b/tests/integration/query/one_to_many/simple_test.go index 63f27c3e17..bf80618d83 100644 --- a/tests/integration/query/one_to_many/simple_test.go +++ b/tests/integration/query/one_to_many/simple_test.go @@ -32,15 +32,15 @@ func TestQueryOneToMany(t *testing.T) { }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, }, //authors - 1: { // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + 1: { // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, @@ -73,32 +73,32 @@ func TestQueryOneToMany(t *testing.T) { }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace `{ "name": "Cornelia Funke", "age": 62, @@ -107,6 +107,16 @@ func TestQueryOneToMany(t *testing.T) { }, }, Results: []map[string]any{ + { + "name": "Cornelia Funke", + "age": int64(62), + "published": []map[string]any{ + { + "name": "Theif Lord", + "rating": 4.8, + }, + }, + }, { "name": "John Grisham", "age": int64(65), @@ -121,16 +131,6 @@ func TestQueryOneToMany(t *testing.T) { }, }, }, - { - "name": "Cornelia Funke", - "age": int64(62), - "published": []map[string]any{ - { - "name": "Theif Lord", - "rating": 4.8, - }, - }, - }, }, }, } @@ -159,7 +159,7 @@ func TestQueryOneToManyWithNonExistantParent(t *testing.T) { `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, }, }, diff --git a/tests/integration/query/one_to_many/with_cid_doc_id_test.go b/tests/integration/query/one_to_many/with_cid_doc_id_test.go index a097e6fe29..d335159697 100644 --- a/tests/integration/query/one_to_many/with_cid_doc_id_test.go +++ b/tests/integration/query/one_to_many/with_cid_doc_id_test.go @@ -24,7 +24,7 @@ import ( // Request: `query { // Book ( // cid: "bafybeicgwjdyqyuntdop5ytpsfrqg5a4t2r25pfv6prfppl5ta5k5altca", -// docID: "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" +// docID: "bae-be6d8024-4953-5a92-84b4-f042d25230c6" // ) { // name // author { @@ -34,15 +34,15 @@ import ( // }`, // Docs: map[int][]string{ // //books -// 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d +// 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 // `{ // "name": "Painted House", // "rating": 4.9, -// "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" +// "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" // }`, // }, // //authors -// 1: { // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 +// 1: { // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b // `{ // "name": "John Grisham", // "age": 65, @@ -85,16 +85,16 @@ func TestQueryOneToManyWithCidAndDocID(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + // bae-be6d8024-4953-5a92-84b4-f042d25230c6 Doc: `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b Doc: `{ "name": "John Grisham", "age": 65, @@ -104,8 +104,8 @@ func TestQueryOneToManyWithCidAndDocID(t *testing.T) { testUtils.Request{ Request: `query { Book ( - cid: "bafyreicj6hg76f5hveo5ykaw6kmldtujbmamzyasje6a3gxrro7nlplhba" - docID: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" + cid: "bafyreiauqb3yovbcofeysjckmex5xdzd6ilvspvypk7cqooguimi6kac5e" + docID: "bae-5366ba09-54e8-5381-8169-a770aa9282ae" ) { name author { @@ -154,16 +154,16 @@ func TestQueryOneToManyWithChildUpdateAndFirstCidAndDocID(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + // bae-be6d8024-4953-5a92-84b4-f042d25230c6 Doc: `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b Doc: `{ "name": "John Grisham", "age": 65, @@ -179,8 +179,8 @@ func TestQueryOneToManyWithChildUpdateAndFirstCidAndDocID(t *testing.T) { testUtils.Request{ Request: `query { Book ( - cid: "bafyreicj6hg76f5hveo5ykaw6kmldtujbmamzyasje6a3gxrro7nlplhba", - docID: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" + cid: "bafyreiauqb3yovbcofeysjckmex5xdzd6ilvspvypk7cqooguimi6kac5e", + docID: "bae-5366ba09-54e8-5381-8169-a770aa9282ae" ) { name author { @@ -227,16 +227,16 @@ func TestQueryOneToManyWithParentUpdateAndFirstCidAndDocID(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + // bae-be6d8024-4953-5a92-84b4-f042d25230c6 Doc: `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b Doc: `{ "name": "John Grisham", "age": 65, @@ -252,8 +252,8 @@ func TestQueryOneToManyWithParentUpdateAndFirstCidAndDocID(t *testing.T) { testUtils.Request{ Request: `query { Book ( - cid: "bafyreicj6hg76f5hveo5ykaw6kmldtujbmamzyasje6a3gxrro7nlplhba", - docID: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" + cid: "bafyreiauqb3yovbcofeysjckmex5xdzd6ilvspvypk7cqooguimi6kac5e", + docID: "bae-5366ba09-54e8-5381-8169-a770aa9282ae" ) { name rating @@ -300,16 +300,16 @@ func TestQueryOneToManyWithParentUpdateAndLastCidAndDocID(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + // bae-be6d8024-4953-5a92-84b4-f042d25230c6 Doc: `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b Doc: `{ "name": "John Grisham", "age": 65, @@ -325,8 +325,8 @@ func TestQueryOneToManyWithParentUpdateAndLastCidAndDocID(t *testing.T) { testUtils.Request{ Request: `query { Book ( - cid: "bafyreibom3twkrprkfljn4hh6hyenpzofdwhl2qfrnfa4eljikpyexnn2q", - docID: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" + cid: "bafyreifc646gthndgspdnkftmeaiowwangpfbtm7bpngosfsd72oul5a3u", + docID: "bae-5366ba09-54e8-5381-8169-a770aa9282ae" ) { name rating diff --git a/tests/integration/query/one_to_many/with_count_filter_test.go b/tests/integration/query/one_to_many/with_count_filter_test.go index b367e6c856..4d1a590479 100644 --- a/tests/integration/query/one_to_many/with_count_filter_test.go +++ b/tests/integration/query/one_to_many/with_count_filter_test.go @@ -27,32 +27,32 @@ func TestQueryOneToManyWithCountWithFilter(t *testing.T) { }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace `{ "name": "Cornelia Funke", "age": 62, @@ -61,14 +61,14 @@ func TestQueryOneToManyWithCountWithFilter(t *testing.T) { }, }, Results: []map[string]any{ - { - "name": "John Grisham", - "_count": 1, - }, { "name": "Cornelia Funke", "_count": 0, }, + { + "name": "John Grisham", + "_count": 1, + }, }, } @@ -89,36 +89,36 @@ func TestQueryOneToManyWithCountWithFilterAndChildFilter(t *testing.T) { }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Associate", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace `{ "name": "Cornelia Funke", "age": 62, @@ -128,23 +128,23 @@ func TestQueryOneToManyWithCountWithFilterAndChildFilter(t *testing.T) { }, Results: []map[string]any{ { - "name": "John Grisham", - "_count": 2, + "name": "Cornelia Funke", + "_count": 1, "published": []map[string]any{ { - "name": "Painted House", - }, - { - "name": "A Time for Mercy", + "name": "Theif Lord", }, }, }, { - "name": "Cornelia Funke", - "_count": 1, + "name": "John Grisham", + "_count": 2, "published": []map[string]any{ { - "name": "Theif Lord", + "name": "Painted House", + }, + { + "name": "A Time for Mercy", }, }, }, diff --git a/tests/integration/query/one_to_many/with_count_limit_offset_test.go b/tests/integration/query/one_to_many/with_count_limit_offset_test.go index c03e6b3950..838f67434d 100644 --- a/tests/integration/query/one_to_many/with_count_limit_offset_test.go +++ b/tests/integration/query/one_to_many/with_count_limit_offset_test.go @@ -34,38 +34,38 @@ func TestQueryOneToManyWithCountAndLimitAndOffset(t *testing.T) { `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Firm", "rating": 4.1, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Pelican Brief", "rating": 4.0, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace `{ "name": "Cornelia Funke", "age": 62, @@ -74,23 +74,23 @@ func TestQueryOneToManyWithCountAndLimitAndOffset(t *testing.T) { }, }, Results: []map[string]any{ + { + "name": "Cornelia Funke", + "_count": 1, + "published": []map[string]any{}, + }, { "name": "John Grisham", "_count": 4, "published": []map[string]any{ { - "name": "The Pelican Brief", + "name": "Painted House", }, { - "name": "Painted House", + "name": "The Pelican Brief", }, }, }, - { - "name": "Cornelia Funke", - "_count": 1, - "published": []map[string]any{}, - }, }, } @@ -111,37 +111,37 @@ func TestQueryOneToManyWithCountAndDifferentOffsets(t *testing.T) { }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Associate", "rating": 4.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace `{ "name": "Cornelia Funke", "age": 62, @@ -151,23 +151,23 @@ func TestQueryOneToManyWithCountAndDifferentOffsets(t *testing.T) { }, Results: []map[string]any{ { - "name": "John Grisham", - "_count": 2, + "name": "Cornelia Funke", + "_count": 0, "published": []map[string]any{ { - "name": "The Associate", - }, - { - "name": "Painted House", + "name": "Theif Lord", }, }, }, { - "name": "Cornelia Funke", - "_count": 0, + "name": "John Grisham", + "_count": 2, "published": []map[string]any{ { - "name": "Theif Lord", + "name": "The Associate", + }, + { + "name": "Painted House", }, }, }, @@ -188,32 +188,32 @@ func TestQueryOneToManyWithCountWithLimitWithOffset(t *testing.T) { }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace `{ "name": "Cornelia Funke", "age": 62, @@ -222,14 +222,14 @@ func TestQueryOneToManyWithCountWithLimitWithOffset(t *testing.T) { }, }, Results: []map[string]any{ - { - "name": "John Grisham", - "_count": 1, - }, { "name": "Cornelia Funke", "_count": 0, }, + { + "name": "John Grisham", + "_count": 1, + }, }, } diff --git a/tests/integration/query/one_to_many/with_count_limit_test.go b/tests/integration/query/one_to_many/with_count_limit_test.go index ee282d901f..3badad8ef8 100644 --- a/tests/integration/query/one_to_many/with_count_limit_test.go +++ b/tests/integration/query/one_to_many/with_count_limit_test.go @@ -30,32 +30,32 @@ func TestQueryOneToManyWithCountAndLimit(t *testing.T) { }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace `{ "name": "Cornelia Funke", "age": 62, @@ -65,20 +65,20 @@ func TestQueryOneToManyWithCountAndLimit(t *testing.T) { }, Results: []map[string]any{ { - "name": "John Grisham", - "_count": 2, + "name": "Cornelia Funke", + "_count": 1, "published": []map[string]any{ { - "name": "Painted House", + "name": "Theif Lord", }, }, }, { - "name": "Cornelia Funke", - "_count": 1, + "name": "John Grisham", + "_count": 2, "published": []map[string]any{ { - "name": "Theif Lord", + "name": "Painted House", }, }, }, @@ -102,37 +102,37 @@ func TestQueryOneToManyWithCountAndDifferentLimits(t *testing.T) { }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Associate", "rating": 4.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace `{ "name": "Cornelia Funke", "age": 62, @@ -142,20 +142,20 @@ func TestQueryOneToManyWithCountAndDifferentLimits(t *testing.T) { }, Results: []map[string]any{ { - "name": "John Grisham", - "_count": 2, + "name": "Cornelia Funke", + "_count": 1, "published": []map[string]any{ { - "name": "The Associate", + "name": "Theif Lord", }, }, }, { - "name": "Cornelia Funke", - "_count": 1, + "name": "John Grisham", + "_count": 2, "published": []map[string]any{ { - "name": "Theif Lord", + "name": "The Associate", }, }, }, @@ -176,32 +176,32 @@ func TestQueryOneToManyWithCountWithLimit(t *testing.T) { }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace `{ "name": "Cornelia Funke", "age": 62, @@ -211,11 +211,11 @@ func TestQueryOneToManyWithCountWithLimit(t *testing.T) { }, Results: []map[string]any{ { - "name": "John Grisham", + "name": "Cornelia Funke", "_count": 1, }, { - "name": "Cornelia Funke", + "name": "John Grisham", "_count": 1, }, }, diff --git a/tests/integration/query/one_to_many/with_count_test.go b/tests/integration/query/one_to_many/with_count_test.go index f525e215e3..0efb541d85 100644 --- a/tests/integration/query/one_to_many/with_count_test.go +++ b/tests/integration/query/one_to_many/with_count_test.go @@ -53,32 +53,32 @@ func TestQueryOneToManyWithCount(t *testing.T) { }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace `{ "name": "Cornelia Funke", "age": 62, @@ -87,14 +87,14 @@ func TestQueryOneToManyWithCount(t *testing.T) { }, }, Results: []map[string]any{ - { - "name": "John Grisham", - "_count": 2, - }, { "name": "Cornelia Funke", "_count": 1, }, + { + "name": "John Grisham", + "_count": 2, + }, }, }, } diff --git a/tests/integration/query/one_to_many/with_doc_id_test.go b/tests/integration/query/one_to_many/with_doc_id_test.go index 34021f53ad..e6e6aa9b90 100644 --- a/tests/integration/query/one_to_many/with_doc_id_test.go +++ b/tests/integration/query/one_to_many/with_doc_id_test.go @@ -23,7 +23,7 @@ func TestQueryOneToManyWithChildDocID(t *testing.T) { Author { name published ( - docID: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" + docID: "bae-5366ba09-54e8-5381-8169-a770aa9282ae" ) { name } @@ -31,20 +31,20 @@ func TestQueryOneToManyWithChildDocID(t *testing.T) { }`, Docs: map[int][]string{ //books - 0: { // bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d + 0: { // bae-5366ba09-54e8-5381-8169-a770aa9282ae `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, }, //authors - 1: { // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + 1: { // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, diff --git a/tests/integration/query/one_to_many/with_doc_ids_test.go b/tests/integration/query/one_to_many/with_doc_ids_test.go index 821a24c334..6e61902c79 100644 --- a/tests/integration/query/one_to_many/with_doc_ids_test.go +++ b/tests/integration/query/one_to_many/with_doc_ids_test.go @@ -23,7 +23,7 @@ func TestQueryOneToManyWithChildDocIDs(t *testing.T) { Author { name published ( - docIDs: ["bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d", "bae-4fb9e3e9-d1d3-5404-bf15-10e4c995d9ca"] + docIDs: ["bae-5366ba09-54e8-5381-8169-a770aa9282ae", "bae-1ccf3043-d760-543e-be1b-6691fa6aa7a8"] ) { name } @@ -32,31 +32,31 @@ func TestQueryOneToManyWithChildDocIDs(t *testing.T) { Docs: map[int][]string{ //books 0: { - // bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d + // bae-5366ba09-54e8-5381-8169-a770aa9282ae `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, - // bae-4fb9e3e9-d1d3-5404-bf15-10e4c995d9ca + // bae-1ccf3043-d760-543e-be1b-6691fa6aa7a8 `{ "name": "The Associate", "rating": 4.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Firm", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, }, //authors - 1: { // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + 1: { // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, diff --git a/tests/integration/query/one_to_many/with_filter_related_id_test.go b/tests/integration/query/one_to_many/with_filter_related_id_test.go index 98c3af4b59..f2e456143c 100644 --- a/tests/integration/query/one_to_many/with_filter_related_id_test.go +++ b/tests/integration/query/one_to_many/with_filter_related_id_test.go @@ -22,60 +22,60 @@ func TestQueryFromManySideWithEqFilterOnRelatedType(t *testing.T) { Description: "One-to-many query from many side with _eq filter on related field type.", Request: `query { - Book(filter: {author: {_docID: {_eq: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3"}}}) { + Book(filter: {author: {_docID: {_eq: "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b"}}}) { name } }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Client", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Candide", "rating": 4.95, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Zadig", "rating": 4.91, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", "rating": 2, - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + "author_id": "bae-34a9bd41-1f0d-5748-8446-48fc36ef2614" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + // bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c `{ "name": "Voltaire", "age": 327, "verified": true }`, - // bae-09d33399-197a-5b98-b135-4398f2b6de4c + // bae-34a9bd41-1f0d-5748-8446-48fc36ef2614 `{ "name": "Simon Pelloutier", "age": 327, @@ -99,60 +99,60 @@ func TestQueryFromManySideWithFilterOnRelatedObjectID(t *testing.T) { Description: "One-to-many query from many side with filter on related field.", Request: `query { - Book(filter: {author_id: {_eq: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3"}}) { + Book(filter: {author_id: {_eq: "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b"}}) { name } }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Client", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Candide", "rating": 4.95, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Zadig", "rating": 4.91, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", "rating": 2, - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + "author_id": "bae-34a9bd41-1f0d-5748-8446-48fc36ef2614" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + // bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c `{ "name": "Voltaire", "age": 327, "verified": true }`, - // bae-09d33399-197a-5b98-b135-4398f2b6de4c + // bae-34a9bd41-1f0d-5748-8446-48fc36ef2614 `{ "name": "Simon Pelloutier", "age": 327, @@ -178,8 +178,8 @@ func TestQueryFromManySideWithSameFiltersInDifferentWayOnRelatedType(t *testing. Request: `query { Book( filter: { - author: {_docID: {_eq: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3"}}, - author_id: {_eq: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3"} + author: {_docID: {_eq: "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b"}}, + author_id: {_eq: "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b"} } ) { name @@ -188,53 +188,53 @@ func TestQueryFromManySideWithSameFiltersInDifferentWayOnRelatedType(t *testing. Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Client", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Candide", "rating": 4.95, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Zadig", "rating": 4.91, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", "rating": 2, - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + "author_id": "bae-34a9bd41-1f0d-5748-8446-48fc36ef2614" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + // bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c `{ "name": "Voltaire", "age": 327, "verified": true }`, - // bae-09d33399-197a-5b98-b135-4398f2b6de4c + // bae-34a9bd41-1f0d-5748-8446-48fc36ef2614 `{ "name": "Simon Pelloutier", "age": 327, @@ -258,60 +258,60 @@ func TestQueryFromSingleSideWithEqFilterOnRelatedType(t *testing.T) { Description: "One-to-many query from single side with _eq filter on related field type.", Request: `query { - Author(filter: {published: {_docID: {_eq: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d"}}}) { + Author(filter: {published: {_docID: {_eq: "bae-96c9de0f-2903-5589-9604-b42882afde8c"}}}) { name } }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Client", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Candide", "rating": 4.95, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Zadig", "rating": 4.91, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", "rating": 2, - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + "author_id": "bae-34a9bd41-1f0d-5748-8446-48fc36ef2614" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + // bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c `{ "name": "Voltaire", "age": 327, "verified": true }`, - // bae-09d33399-197a-5b98-b135-4398f2b6de4c + // bae-34a9bd41-1f0d-5748-8446-48fc36ef2614 `{ "name": "Simon Pelloutier", "age": 327, @@ -335,60 +335,60 @@ func TestQueryFromSingleSideWithFilterOnRelatedObjectID_Error(t *testing.T) { Description: "One-to-many query from single side with filter on related field.", Request: `query { - Author(filter: {published_id: {_eq: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d"}}) { + Author(filter: {published_id: {_eq: "bae-5366ba09-54e8-5381-8169-a770aa9282ae"}}) { name } }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Client", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Candide", "rating": 4.95, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Zadig", "rating": 4.91, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", "rating": 2, - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + "author_id": "bae-34a9bd41-1f0d-5748-8446-48fc36ef2614" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + // bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c `{ "name": "Voltaire", "age": 327, "verified": true }`, - // bae-09d33399-197a-5b98-b135-4398f2b6de4c + // bae-34a9bd41-1f0d-5748-8446-48fc36ef2614 `{ "name": "Simon Pelloutier", "age": 327, @@ -397,7 +397,7 @@ func TestQueryFromSingleSideWithFilterOnRelatedObjectID_Error(t *testing.T) { }, }, - ExpectedError: "Argument \"filter\" has invalid value {published_id: {_eq: \"bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d\"}}.\nIn field \"published_id\": Unknown field.", + ExpectedError: "Argument \"filter\" has invalid value {published_id: {_eq: \"bae-5366ba09-54e8-5381-8169-a770aa9282ae\"}}.\nIn field \"published_id\": Unknown field.", } executeTestCase(t, test) diff --git a/tests/integration/query/one_to_many/with_filter_test.go b/tests/integration/query/one_to_many/with_filter_test.go index ce019f2afa..94c971c7a8 100644 --- a/tests/integration/query/one_to_many/with_filter_test.go +++ b/tests/integration/query/one_to_many/with_filter_test.go @@ -25,11 +25,11 @@ func TestQueryOneToManyWithNumericGreaterThanFilterOnParent(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + // bae-be6d8024-4953-5a92-84b4-f042d25230c6 Doc: `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, }, testUtils.CreateDoc{ @@ -37,7 +37,7 @@ func TestQueryOneToManyWithNumericGreaterThanFilterOnParent(t *testing.T) { Doc: `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, }, testUtils.CreateDoc{ @@ -45,12 +45,12 @@ func TestQueryOneToManyWithNumericGreaterThanFilterOnParent(t *testing.T) { Doc: `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b Doc: `{ "name": "John Grisham", "age": 65, @@ -59,7 +59,7 @@ func TestQueryOneToManyWithNumericGreaterThanFilterOnParent(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 1, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace Doc: `{ "name": "Cornelia Funke", "age": 62, @@ -109,11 +109,11 @@ func TestQueryOneToManyWithNumericGreaterThanChildFilterOnParentWithUnrenderedCh }, testUtils.CreateDoc{ CollectionID: 0, - // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + // bae-be6d8024-4953-5a92-84b4-f042d25230c6 Doc: `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, }, testUtils.CreateDoc{ @@ -121,7 +121,7 @@ func TestQueryOneToManyWithNumericGreaterThanChildFilterOnParentWithUnrenderedCh Doc: `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, }, testUtils.CreateDoc{ @@ -129,12 +129,12 @@ func TestQueryOneToManyWithNumericGreaterThanChildFilterOnParentWithUnrenderedCh Doc: `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b Doc: `{ "name": "John Grisham", "age": 65, @@ -143,7 +143,7 @@ func TestQueryOneToManyWithNumericGreaterThanChildFilterOnParentWithUnrenderedCh }, testUtils.CreateDoc{ CollectionID: 1, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace Doc: `{ "name": "Cornelia Funke", "age": 62, @@ -177,11 +177,11 @@ func TestQueryOneToManyWithNumericGreaterThanFilterOnParentAndChild(t *testing.T }, testUtils.CreateDoc{ CollectionID: 0, - // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + // bae-be6d8024-4953-5a92-84b4-f042d25230c6 Doc: `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, }, testUtils.CreateDoc{ @@ -189,7 +189,7 @@ func TestQueryOneToManyWithNumericGreaterThanFilterOnParentAndChild(t *testing.T Doc: `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, }, testUtils.CreateDoc{ @@ -197,12 +197,12 @@ func TestQueryOneToManyWithNumericGreaterThanFilterOnParentAndChild(t *testing.T Doc: `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b Doc: `{ "name": "John Grisham", "age": 65, @@ -211,7 +211,7 @@ func TestQueryOneToManyWithNumericGreaterThanFilterOnParentAndChild(t *testing.T }, testUtils.CreateDoc{ CollectionID: 1, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace Doc: `{ "name": "Cornelia Funke", "age": 62, @@ -257,11 +257,11 @@ func TestQueryOneToManyWithMultipleAliasedFilteredChildren(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + // bae-be6d8024-4953-5a92-84b4-f042d25230c6 Doc: `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, }, testUtils.CreateDoc{ @@ -269,7 +269,7 @@ func TestQueryOneToManyWithMultipleAliasedFilteredChildren(t *testing.T) { Doc: `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, }, testUtils.CreateDoc{ @@ -277,12 +277,12 @@ func TestQueryOneToManyWithMultipleAliasedFilteredChildren(t *testing.T) { Doc: `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b Doc: `{ "name": "John Grisham", "age": 65, @@ -291,7 +291,7 @@ func TestQueryOneToManyWithMultipleAliasedFilteredChildren(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 1, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace Doc: `{ "name": "Cornelia Funke", "age": 62, @@ -314,6 +314,17 @@ func TestQueryOneToManyWithMultipleAliasedFilteredChildren(t *testing.T) { } }`, Results: []map[string]any{ + { + "name": "Cornelia Funke", + "age": int64(62), + "p1": []map[string]any{ + { + "name": "Theif Lord", + "rating": 4.8, + }, + }, + "p2": []map[string]any{}, + }, { "name": "John Grisham", "age": int64(65), @@ -330,17 +341,6 @@ func TestQueryOneToManyWithMultipleAliasedFilteredChildren(t *testing.T) { }, }, }, - { - "name": "Cornelia Funke", - "age": int64(62), - "p1": []map[string]any{ - { - "name": "Theif Lord", - "rating": 4.8, - }, - }, - "p2": []map[string]any{}, - }, }, }, }, @@ -361,7 +361,7 @@ func TestQueryOneToManyWithCompoundOperatorInFilterAndRelation(t *testing.T) { Doc: `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, }, testUtils.CreateDoc{ @@ -369,7 +369,7 @@ func TestQueryOneToManyWithCompoundOperatorInFilterAndRelation(t *testing.T) { Doc: `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, }, testUtils.CreateDoc{ @@ -377,7 +377,7 @@ func TestQueryOneToManyWithCompoundOperatorInFilterAndRelation(t *testing.T) { Doc: `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, testUtils.CreateDoc{ @@ -385,12 +385,12 @@ func TestQueryOneToManyWithCompoundOperatorInFilterAndRelation(t *testing.T) { Doc: `{ "name": "The Lord of the Rings", "rating": 5.0, - "author_id": "bae-61d279c1-eab9-56ec-8654-dce0324ebfda" + "author_id": "bae-6bf29c1c-7112-5f4f-bfae-1c039479acf6" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b Doc: `{ "name": "John Grisham", "age": 65, @@ -399,7 +399,7 @@ func TestQueryOneToManyWithCompoundOperatorInFilterAndRelation(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 1, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace Doc: `{ "name": "Cornelia Funke", "age": 62, @@ -408,7 +408,7 @@ func TestQueryOneToManyWithCompoundOperatorInFilterAndRelation(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 1, - // bae-61d279c1-eab9-56ec-8654-dce0324ebfda + // bae-6bf29c1c-7112-5f4f-bfae-1c039479acf6 Doc: `{ "name": "John Tolkien", "age": 70, @@ -432,10 +432,10 @@ func TestQueryOneToManyWithCompoundOperatorInFilterAndRelation(t *testing.T) { }`, Results: []map[string]any{ { - "name": "John Grisham", + "name": "Cornelia Funke", }, { - "name": "Cornelia Funke", + "name": "John Grisham", }, }, }, @@ -464,41 +464,8 @@ func TestQueryOneToMany_WithCompoundOperatorInFilterAndRelationAndCaseInsensitiv testUtils.SchemaUpdate{ Schema: bookAuthorGQLSchema, }, - testUtils.CreateDoc{ - CollectionID: 0, - Doc: `{ - "name": "Painted House", - "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - }, - testUtils.CreateDoc{ - CollectionID: 0, - Doc: `{ - "name": "A Time for Mercy", - "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - }, - testUtils.CreateDoc{ - CollectionID: 0, - Doc: `{ - "name": "Theif Lord", - "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - }, - testUtils.CreateDoc{ - CollectionID: 0, - Doc: `{ - "name": "The Lord of the Rings", - "rating": 5.0, - "author_id": "bae-61d279c1-eab9-56ec-8654-dce0324ebfda" - }`, - }, testUtils.CreateDoc{ CollectionID: 1, - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 Doc: `{ "name": "John Grisham", "age": 65, @@ -507,7 +474,6 @@ func TestQueryOneToMany_WithCompoundOperatorInFilterAndRelationAndCaseInsensitiv }, testUtils.CreateDoc{ CollectionID: 1, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 Doc: `{ "name": "Cornelia Funke", "age": 62, @@ -516,13 +482,44 @@ func TestQueryOneToMany_WithCompoundOperatorInFilterAndRelationAndCaseInsensitiv }, testUtils.CreateDoc{ CollectionID: 1, - // bae-61d279c1-eab9-56ec-8654-dce0324ebfda Doc: `{ "name": "John Tolkien", "age": 70, "verified": true }`, }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Painted House", + "rating": 4.9, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Theif Lord", + "rating": 4.8, + "author_id": testUtils.NewDocIndex(1, 1), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "The Lord of the Rings", + "rating": 5.0, + "author_id": testUtils.NewDocIndex(1, 2), + }, + }, testUtils.Request{ Request: `query { Author(filter: {_or: [ @@ -540,10 +537,10 @@ func TestQueryOneToMany_WithCompoundOperatorInFilterAndRelationAndCaseInsensitiv }`, Results: []map[string]any{ { - "name": "John Grisham", + "name": "Cornelia Funke", }, { - "name": "Cornelia Funke", + "name": "John Grisham", }, }, }, diff --git a/tests/integration/query/one_to_many/with_group_filter_test.go b/tests/integration/query/one_to_many/with_group_filter_test.go index 05e5b1c573..6005ef2def 100644 --- a/tests/integration/query/one_to_many/with_group_filter_test.go +++ b/tests/integration/query/one_to_many/with_group_filter_test.go @@ -34,53 +34,53 @@ func TestQueryOneToManyWithParentJoinGroupNumberAndNumberFilterOnJoin(t *testing }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Client", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Candide", "rating": 4.95, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Zadig", "rating": 4.91, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", "rating": 2, - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + "author_id": "bae-34a9bd41-1f0d-5748-8446-48fc36ef2614" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + // bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c `{ "name": "Voltaire", "age": 327, "verified": true }`, - // bae-09d33399-197a-5b98-b135-4398f2b6de4c + // bae-34a9bd41-1f0d-5748-8446-48fc36ef2614 `{ "name": "Simon Pelloutier", "age": 327, @@ -92,10 +92,6 @@ func TestQueryOneToManyWithParentJoinGroupNumberAndNumberFilterOnJoin(t *testing { "age": int64(327), "_group": []map[string]any{ - { - "name": "Simon Pelloutier", - "published": []map[string]any{}, - }, { "name": "Voltaire", "published": []map[string]any{ @@ -109,6 +105,10 @@ func TestQueryOneToManyWithParentJoinGroupNumberAndNumberFilterOnJoin(t *testing }, }, }, + { + "name": "Simon Pelloutier", + "published": []map[string]any{}, + }, }, }, { @@ -152,53 +152,53 @@ func TestQueryOneToManyWithParentJoinGroupNumberAndNumberFilterOnGroup(t *testin }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Client", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Candide", "rating": 4.95, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Zadig", "rating": 4.91, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", "rating": 2, - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + "author_id": "bae-34a9bd41-1f0d-5748-8446-48fc36ef2614" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + // bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c `{ "name": "Voltaire", "age": 327, "verified": true }`, - // bae-09d33399-197a-5b98-b135-4398f2b6de4c + // bae-34a9bd41-1f0d-5748-8446-48fc36ef2614 `{ "name": "Simon Pelloutier", "age": 327, @@ -274,53 +274,53 @@ func TestQueryOneToManyWithParentJoinGroupNumberAndNumberFilterOnGroupAndOnGroup }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Client", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Candide", "rating": 4.95, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Zadig", "rating": 4.91, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", "rating": 2, - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + "author_id": "bae-34a9bd41-1f0d-5748-8446-48fc36ef2614" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + // bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c `{ "name": "Voltaire", "age": 327, "verified": true }`, - // bae-09d33399-197a-5b98-b135-4398f2b6de4c + // bae-34a9bd41-1f0d-5748-8446-48fc36ef2614 `{ "name": "Simon Pelloutier", "age": 327, @@ -332,10 +332,6 @@ func TestQueryOneToManyWithParentJoinGroupNumberAndNumberFilterOnGroupAndOnGroup { "age": int64(327), "_group": []map[string]any{ - { - "name": "Simon Pelloutier", - "published": []map[string]any{}, - }, { "name": "Voltaire", "published": []map[string]any{ @@ -345,6 +341,10 @@ func TestQueryOneToManyWithParentJoinGroupNumberAndNumberFilterOnGroupAndOnGroup }, }, }, + { + "name": "Simon Pelloutier", + "published": []map[string]any{}, + }, }, }, }, diff --git a/tests/integration/query/one_to_many/with_group_related_id_alias_test.go b/tests/integration/query/one_to_many/with_group_related_id_alias_test.go index bef01aee48..fdd50743c4 100644 --- a/tests/integration/query/one_to_many/with_group_related_id_alias_test.go +++ b/tests/integration/query/one_to_many/with_group_related_id_alias_test.go @@ -16,142 +16,174 @@ import ( testUtils "github.com/sourcenetwork/defradb/tests/integration" ) -// TODO: Don't return grouped field if not selected. [https://github.com/sourcenetwork/defradb/issues/1582]. func TestQueryOneToManyWithParentGroupByOnRelatedTypeFromManySideUsingAlias(t *testing.T) { - test := testUtils.RequestTestCase{ - + test := testUtils.TestCase{ Description: "One-to-many query with groupBy on related field alias (from many side).", - - Request: `query { - Book(groupBy: [author]) { - author_id - _group { - name - rating - author { - name - age + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + rating: Float + author: Author } - } - } - }`, - Docs: map[int][]string{ - //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d - `{ - "name": "Painted House", - "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "A Time for Mercy", - "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "The Client", - "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "Candide", - "rating": 4.95, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" - }`, - `{ - "name": "Zadig", - "rating": 4.91, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" - }`, - `{ - "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", - "rating": 2, - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" - }`, + type Author { + name: String + age: Int + verified: Boolean + published: [Book] + } + `, }, - //authors - 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 - `{ + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ "name": "Voltaire", "age": 327, "verified": true }`, - // bae-09d33399-197a-5b98-b135-4398f2b6de4c - `{ + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ "name": "Simon Pelloutier", "age": 327, "verified": true }`, }, - }, - Results: []map[string]any{ - { - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3", - "_group": []map[string]any{ - { - "name": "Candide", - "rating": 4.95, - "author": map[string]any{ - "age": int64(327), - "name": "Voltaire", - }, - }, - { - "name": "Zadig", - "rating": 4.91, - "author": map[string]any{ - "age": int64(327), - "name": "Voltaire", - }, - }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Painted House", + "rating": 4.9, + "author_id": testUtils.NewDocIndex(1, 0), }, }, - { - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "_group": []map[string]any{ - { - "name": "The Client", - "rating": 4.5, - "author": map[string]any{ - "age": int64(65), - "name": "John Grisham", - }, - }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "The Client", + "rating": 4.5, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Candide", + "rating": 4.95, + "author_id": testUtils.NewDocIndex(1, 1), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Zadig", + "rating": 4.91, + "author_id": testUtils.NewDocIndex(1, 1), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2, + "author_id": testUtils.NewDocIndex(1, 2), + }, + }, + testUtils.Request{ + Request: `query { + Book(groupBy: [author]) { + author_id + _group { + name + rating + author { + name + age + } + } + } + }`, + Results: []map[string]any{ { - "name": "Painted House", - "rating": 4.9, - "author": map[string]any{ - "age": int64(65), - "name": "John Grisham", + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b", + "_group": []map[string]any{ + { + "name": "The Client", + "rating": 4.5, + "author": map[string]any{ + "age": int64(65), + "name": "John Grisham", + }, + }, + { + "name": "Painted House", + "rating": 4.9, + "author": map[string]any{ + "age": int64(65), + "name": "John Grisham", + }, + }, + { + "name": "A Time for Mercy", + "rating": 4.5, + "author": map[string]any{ + "age": int64(65), + "name": "John Grisham", + }, + }, }, }, { - "name": "A Time for Mercy", - "rating": 4.5, - "author": map[string]any{ - "age": int64(65), - "name": "John Grisham", + "author_id": "bae-34a9bd41-1f0d-5748-8446-48fc36ef2614", + "_group": []map[string]any{ + { + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2.0, + "author": map[string]any{ + "age": int64(327), + "name": "Simon Pelloutier", + }, + }, }, }, - }, - }, - { - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c", - "_group": []map[string]any{ { - "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", - "rating": 2.0, - "author": map[string]any{ - "age": int64(327), - "name": "Simon Pelloutier", + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c", + "_group": []map[string]any{ + { + "name": "Candide", + "rating": 4.95, + "author": map[string]any{ + "age": int64(327), + "name": "Voltaire", + }, + }, + { + "name": "Zadig", + "rating": 4.91, + "author": map[string]any{ + "age": int64(327), + "name": "Voltaire", + }, + }, }, }, }, @@ -159,157 +191,189 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeFromManySideUsingAlias(t *t }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } -// TODO: Don't return grouped field if not selected. [https://github.com/sourcenetwork/defradb/issues/1582]. func TestQueryOneToManyWithParentGroupByOnRelatedTypeFromManySideUsingAliasAndRelatedSelection(t *testing.T) { - test := testUtils.RequestTestCase{ - + test := testUtils.TestCase{ Description: "One-to-many query with groupBy on related field alias (from many side).", - - Request: `query { - Book(groupBy: [author]) { - author { - _docID - name - } - _group { - name - rating - author { - name - age + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + rating: Float + author: Author } - } - } - }`, - Docs: map[int][]string{ - //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d - `{ - "name": "Painted House", - "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "A Time for Mercy", - "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "The Client", - "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "Candide", - "rating": 4.95, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" - }`, - `{ - "name": "Zadig", - "rating": 4.91, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" - }`, - `{ - "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", - "rating": 2, - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" - }`, + type Author { + name: String + age: Int + verified: Boolean + published: [Book] + } + `, }, - //authors - 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 - `{ + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ "name": "Voltaire", "age": 327, "verified": true }`, - // bae-09d33399-197a-5b98-b135-4398f2b6de4c - `{ + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ "name": "Simon Pelloutier", "age": 327, "verified": true }`, }, - }, - Results: []map[string]any{ - { - "author": map[string]any{ - "name": "Voltaire", - "_docID": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3", + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Painted House", + "rating": 4.9, + "author_id": testUtils.NewDocIndex(1, 0), }, - "_group": []map[string]any{ - { - "name": "Candide", - "rating": 4.95, - "author": map[string]any{ - "age": int64(327), - "name": "Voltaire", - }, - }, - { - "name": "Zadig", - "rating": 4.91, - "author": map[string]any{ - "age": int64(327), - "name": "Voltaire", - }, - }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": testUtils.NewDocIndex(1, 0), }, }, - { - "author": map[string]any{ - "name": "John Grisham", - "_docID": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "The Client", + "rating": 4.5, + "author_id": testUtils.NewDocIndex(1, 0), }, - "_group": []map[string]any{ + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Candide", + "rating": 4.95, + "author_id": testUtils.NewDocIndex(1, 1), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Zadig", + "rating": 4.91, + "author_id": testUtils.NewDocIndex(1, 1), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2, + "author_id": testUtils.NewDocIndex(1, 2), + }, + }, + testUtils.Request{ + Request: `query { + Book(groupBy: [author]) { + author { + _docID + name + } + _group { + name + rating + author { + name + age + } + } + } + }`, + Results: []map[string]any{ { - "name": "The Client", - "rating": 4.5, "author": map[string]any{ - "age": int64(65), - "name": "John Grisham", + "name": "John Grisham", + "_docID": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b", }, - }, - { - "name": "Painted House", - "rating": 4.9, - "author": map[string]any{ - "age": int64(65), - "name": "John Grisham", + "_group": []map[string]any{ + { + "name": "The Client", + "rating": 4.5, + "author": map[string]any{ + "age": int64(65), + "name": "John Grisham", + }, + }, + { + "name": "Painted House", + "rating": 4.9, + "author": map[string]any{ + "age": int64(65), + "name": "John Grisham", + }, + }, + { + "name": "A Time for Mercy", + "rating": 4.5, + "author": map[string]any{ + "age": int64(65), + "name": "John Grisham", + }, + }, }, }, { - "name": "A Time for Mercy", - "rating": 4.5, "author": map[string]any{ - "age": int64(65), - "name": "John Grisham", + "name": "Simon Pelloutier", + "_docID": "bae-34a9bd41-1f0d-5748-8446-48fc36ef2614", + }, + "_group": []map[string]any{ + { + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2.0, + "author": map[string]any{ + "age": int64(327), + "name": "Simon Pelloutier", + }, + }, }, }, - }, - }, - { - "author": map[string]any{ - "name": "Simon Pelloutier", - "_docID": "bae-09d33399-197a-5b98-b135-4398f2b6de4c", - }, - "_group": []map[string]any{ { - "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", - "rating": 2.0, "author": map[string]any{ - "age": int64(327), - "name": "Simon Pelloutier", + "name": "Voltaire", + "_docID": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c", + }, + "_group": []map[string]any{ + { + "name": "Candide", + "rating": 4.95, + "author": map[string]any{ + "age": int64(327), + "name": "Voltaire", + }, + }, + { + "name": "Zadig", + "rating": 4.91, + "author": map[string]any{ + "age": int64(327), + "name": "Voltaire", + }, + }, }, }, }, @@ -317,144 +381,177 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeFromManySideUsingAliasAndRe }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryOneToManyWithParentGroupByOnRelatedTypeWithIDSelectionFromManySideUsingAlias(t *testing.T) { - test := testUtils.RequestTestCase{ - + test := testUtils.TestCase{ Description: "One-to-many query with groupBy on related field alias, with id selection & related selection (from many side).", - - Request: `query { - Book(groupBy: [author]) { - author_id - _group { - name - rating - author { - name - age + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + rating: Float + author: Author } - } - } - }`, - Docs: map[int][]string{ - //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d - `{ - "name": "Painted House", - "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "A Time for Mercy", - "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "The Client", - "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "Candide", - "rating": 4.95, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" - }`, - `{ - "name": "Zadig", - "rating": 4.91, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" - }`, - `{ - "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", - "rating": 2, - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" - }`, + type Author { + name: String + age: Int + verified: Boolean + published: [Book] + } + `, }, - //authors - 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 - `{ + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ "name": "Voltaire", "age": 327, "verified": true }`, - // bae-09d33399-197a-5b98-b135-4398f2b6de4c - `{ + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ "name": "Simon Pelloutier", "age": 327, "verified": true }`, }, - }, - Results: []map[string]any{ - { - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3", - "_group": []map[string]any{ - { - "name": "Candide", - "rating": 4.95, - "author": map[string]any{ - "age": int64(327), - "name": "Voltaire", - }, - }, - { - "name": "Zadig", - "rating": 4.91, - "author": map[string]any{ - "age": int64(327), - "name": "Voltaire", - }, - }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Painted House", + "rating": 4.9, + "author_id": testUtils.NewDocIndex(1, 0), }, }, - { - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "_group": []map[string]any{ - { - "name": "The Client", - "rating": 4.5, - "author": map[string]any{ - "age": int64(65), - "name": "John Grisham", - }, - }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "The Client", + "rating": 4.5, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Candide", + "rating": 4.95, + "author_id": testUtils.NewDocIndex(1, 1), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Zadig", + "rating": 4.91, + "author_id": testUtils.NewDocIndex(1, 1), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2, + "author_id": testUtils.NewDocIndex(1, 2), + }, + }, + testUtils.Request{ + Request: `query { + Book(groupBy: [author]) { + author_id + _group { + name + rating + author { + name + age + } + } + } + }`, + Results: []map[string]any{ { - "name": "Painted House", - "rating": 4.9, - "author": map[string]any{ - "age": int64(65), - "name": "John Grisham", + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b", + "_group": []map[string]any{ + { + "name": "The Client", + "rating": 4.5, + "author": map[string]any{ + "age": int64(65), + "name": "John Grisham", + }, + }, + { + "name": "Painted House", + "rating": 4.9, + "author": map[string]any{ + "age": int64(65), + "name": "John Grisham", + }, + }, + { + "name": "A Time for Mercy", + "rating": 4.5, + "author": map[string]any{ + "age": int64(65), + "name": "John Grisham", + }, + }, }, }, { - "name": "A Time for Mercy", - "rating": 4.5, - "author": map[string]any{ - "age": int64(65), - "name": "John Grisham", + "author_id": "bae-34a9bd41-1f0d-5748-8446-48fc36ef2614", + "_group": []map[string]any{ + { + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2.0, + "author": map[string]any{ + "age": int64(327), + "name": "Simon Pelloutier", + }, + }, }, }, - }, - }, - { - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c", - "_group": []map[string]any{ { - "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", - "rating": 2.0, - "author": map[string]any{ - "age": int64(327), - "name": "Simon Pelloutier", + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c", + "_group": []map[string]any{ + { + "name": "Candide", + "rating": 4.95, + "author": map[string]any{ + "age": int64(327), + "name": "Voltaire", + }, + }, + { + "name": "Zadig", + "rating": 4.91, + "author": map[string]any{ + "age": int64(327), + "name": "Voltaire", + }, + }, }, }, }, @@ -462,160 +559,193 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeWithIDSelectionFromManySide }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryOneToManyWithParentGroupByOnRelatedTypeWithIDSelectionFromManySideUsingAliasAndRelatedSelection(t *testing.T) { - test := testUtils.RequestTestCase{ - + test := testUtils.TestCase{ Description: "One-to-many query with groupBy on related field alias, with id selection & related selection (from many side).", - - Request: `query { - Book(groupBy: [author]) { - author_id - author { - _docID - name - } - _group { - name - rating - author { - name - age + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + rating: Float + author: Author } - } - } - }`, - Docs: map[int][]string{ - //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d - `{ - "name": "Painted House", - "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "A Time for Mercy", - "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "The Client", - "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "Candide", - "rating": 4.95, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" - }`, - `{ - "name": "Zadig", - "rating": 4.91, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" - }`, - `{ - "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", - "rating": 2, - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" - }`, + type Author { + name: String + age: Int + verified: Boolean + published: [Book] + } + `, }, - //authors - 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 - `{ + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ "name": "Voltaire", "age": 327, "verified": true }`, - // bae-09d33399-197a-5b98-b135-4398f2b6de4c - `{ + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ "name": "Simon Pelloutier", "age": 327, "verified": true }`, }, - }, - Results: []map[string]any{ - { - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3", - "author": map[string]any{ - "name": "Voltaire", - "_docID": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3", + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Painted House", + "rating": 4.9, + "author_id": testUtils.NewDocIndex(1, 0), }, - "_group": []map[string]any{ - { - "name": "Candide", - "rating": 4.95, - "author": map[string]any{ - "age": int64(327), - "name": "Voltaire", - }, - }, - { - "name": "Zadig", - "rating": 4.91, - "author": map[string]any{ - "age": int64(327), - "name": "Voltaire", - }, - }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": testUtils.NewDocIndex(1, 0), }, }, - { - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "author": map[string]any{ - "name": "John Grisham", - "_docID": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "The Client", + "rating": 4.5, + "author_id": testUtils.NewDocIndex(1, 0), }, - "_group": []map[string]any{ + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Candide", + "rating": 4.95, + "author_id": testUtils.NewDocIndex(1, 1), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Zadig", + "rating": 4.91, + "author_id": testUtils.NewDocIndex(1, 1), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2, + "author_id": testUtils.NewDocIndex(1, 2), + }, + }, + testUtils.Request{ + Request: `query { + Book(groupBy: [author]) { + author_id + author { + _docID + name + } + _group { + name + rating + author { + name + age + } + } + } + }`, + Results: []map[string]any{ { - "name": "The Client", - "rating": 4.5, + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b", "author": map[string]any{ - "age": int64(65), - "name": "John Grisham", + "name": "John Grisham", + "_docID": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b", }, - }, - { - "name": "Painted House", - "rating": 4.9, - "author": map[string]any{ - "age": int64(65), - "name": "John Grisham", + "_group": []map[string]any{ + { + "name": "The Client", + "rating": 4.5, + "author": map[string]any{ + "age": int64(65), + "name": "John Grisham", + }, + }, + { + "name": "Painted House", + "rating": 4.9, + "author": map[string]any{ + "age": int64(65), + "name": "John Grisham", + }, + }, + { + "name": "A Time for Mercy", + "rating": 4.5, + "author": map[string]any{ + "age": int64(65), + "name": "John Grisham", + }, + }, }, }, { - "name": "A Time for Mercy", - "rating": 4.5, + "author_id": "bae-34a9bd41-1f0d-5748-8446-48fc36ef2614", "author": map[string]any{ - "age": int64(65), - "name": "John Grisham", + "name": "Simon Pelloutier", + "_docID": "bae-34a9bd41-1f0d-5748-8446-48fc36ef2614", + }, + "_group": []map[string]any{ + { + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2.0, + "author": map[string]any{ + "age": int64(327), + "name": "Simon Pelloutier", + }, + }, }, }, - }, - }, - { - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c", - "author": map[string]any{ - "name": "Simon Pelloutier", - "_docID": "bae-09d33399-197a-5b98-b135-4398f2b6de4c", - }, - "_group": []map[string]any{ { - "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", - "rating": 2.0, + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c", "author": map[string]any{ - "age": int64(327), - "name": "Simon Pelloutier", + "name": "Voltaire", + "_docID": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c", + }, + "_group": []map[string]any{ + { + "name": "Candide", + "rating": 4.95, + "author": map[string]any{ + "age": int64(327), + "name": "Voltaire", + }, + }, + { + "name": "Zadig", + "rating": 4.91, + "author": map[string]any{ + "age": int64(327), + "name": "Voltaire", + }, + }, }, }, }, @@ -623,10 +753,9 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeWithIDSelectionFromManySide }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } -// TODO: Don't return grouped field if not selected. [https://github.com/sourcenetwork/defradb/issues/1582]. func TestQueryOneToManyWithParentGroupByOnRelatedTypeFromSingleSideUsingAlias(t *testing.T) { test := testUtils.RequestTestCase{ Description: "One-to-many query with groupBy on related id field alias (from single side).", @@ -639,53 +768,53 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeFromSingleSideUsingAlias(t }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Client", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Candide", "rating": 4.95, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Zadig", "rating": 4.91, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", "rating": 2, - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + "author_id": "bae-34a9bd41-1f0d-5748-8446-48fc36ef2614" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + // bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c `{ "name": "Voltaire", "age": 327, "verified": true }`, - // bae-09d33399-197a-5b98-b135-4398f2b6de4c + // bae-34a9bd41-1f0d-5748-8446-48fc36ef2614 `{ "name": "Simon Pelloutier", "age": 327, @@ -713,53 +842,53 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeWithIDSelectionFromSingleSi }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Client", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Candide", "rating": 4.95, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Zadig", "rating": 4.91, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", "rating": 2, - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + "author_id": "bae-34a9bd41-1f0d-5748-8446-48fc36ef2614" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + // bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c `{ "name": "Voltaire", "age": 327, "verified": true }`, - // bae-09d33399-197a-5b98-b135-4398f2b6de4c + // bae-34a9bd41-1f0d-5748-8446-48fc36ef2614 `{ "name": "Simon Pelloutier", "age": 327, diff --git a/tests/integration/query/one_to_many/with_group_related_id_test.go b/tests/integration/query/one_to_many/with_group_related_id_test.go index 4eec467480..673ccfddad 100644 --- a/tests/integration/query/one_to_many/with_group_related_id_test.go +++ b/tests/integration/query/one_to_many/with_group_related_id_test.go @@ -16,7 +16,6 @@ import ( testUtils "github.com/sourcenetwork/defradb/tests/integration" ) -// TODO: Don't return grouped field if not selected. [https://github.com/sourcenetwork/defradb/issues/1582]. func TestQueryOneToManyWithParentGroupByOnRelatedTypeIDFromManySide(t *testing.T) { test := testUtils.RequestTestCase{ Description: "One-to-many query with groupBy on related id (from many side).", @@ -35,53 +34,53 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeIDFromManySide(t *testing.T }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Client", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Candide", "rating": 4.95, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Zadig", "rating": 4.91, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", "rating": 2, - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + "author_id": "bae-34a9bd41-1f0d-5748-8446-48fc36ef2614" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + // bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c `{ "name": "Voltaire", "age": 327, "verified": true }`, - // bae-09d33399-197a-5b98-b135-4398f2b6de4c + // bae-34a9bd41-1f0d-5748-8446-48fc36ef2614 `{ "name": "Simon Pelloutier", "age": 327, @@ -91,28 +90,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeIDFromManySide(t *testing.T }, Results: []map[string]any{ { - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3", - "_group": []map[string]any{ - { - "name": "Candide", - "rating": 4.95, - "author": map[string]any{ - "age": int64(327), - "name": "Voltaire", - }, - }, - { - "name": "Zadig", - "rating": 4.91, - "author": map[string]any{ - "age": int64(327), - "name": "Voltaire", - }, - }, - }, - }, - { - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b", "_group": []map[string]any{ { "name": "The Client", @@ -141,7 +119,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeIDFromManySide(t *testing.T }, }, { - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c", + "author_id": "bae-34a9bd41-1f0d-5748-8446-48fc36ef2614", "_group": []map[string]any{ { "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", @@ -153,6 +131,27 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeIDFromManySide(t *testing.T }, }, }, + { + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c", + "_group": []map[string]any{ + { + "name": "Candide", + "rating": 4.95, + "author": map[string]any{ + "age": int64(327), + "name": "Voltaire", + }, + }, + { + "name": "Zadig", + "rating": 4.91, + "author": map[string]any{ + "age": int64(327), + "name": "Voltaire", + }, + }, + }, + }, }, } @@ -177,53 +176,53 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeIDWithIDSelectionFromManySi }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Client", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Candide", "rating": 4.95, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Zadig", "rating": 4.91, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", "rating": 2, - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + "author_id": "bae-34a9bd41-1f0d-5748-8446-48fc36ef2614" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + // bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c `{ "name": "Voltaire", "age": 327, "verified": true }`, - // bae-09d33399-197a-5b98-b135-4398f2b6de4c + // bae-34a9bd41-1f0d-5748-8446-48fc36ef2614 `{ "name": "Simon Pelloutier", "age": 327, @@ -233,28 +232,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeIDWithIDSelectionFromManySi }, Results: []map[string]any{ { - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3", - "_group": []map[string]any{ - { - "name": "Candide", - "rating": 4.95, - "author": map[string]any{ - "age": int64(327), - "name": "Voltaire", - }, - }, - { - "name": "Zadig", - "rating": 4.91, - "author": map[string]any{ - "age": int64(327), - "name": "Voltaire", - }, - }, - }, - }, - { - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b", "_group": []map[string]any{ { "name": "The Client", @@ -283,7 +261,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeIDWithIDSelectionFromManySi }, }, { - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c", + "author_id": "bae-34a9bd41-1f0d-5748-8446-48fc36ef2614", "_group": []map[string]any{ { "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", @@ -295,13 +273,33 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeIDWithIDSelectionFromManySi }, }, }, + { + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c", + "_group": []map[string]any{ + { + "name": "Candide", + "rating": 4.95, + "author": map[string]any{ + "age": int64(327), + "name": "Voltaire", + }, + }, + { + "name": "Zadig", + "rating": 4.91, + "author": map[string]any{ + "age": int64(327), + "name": "Voltaire", + }, + }, + }, + }, }, } executeTestCase(t, test) } -// TODO: Don't return grouped field if not selected. [https://github.com/sourcenetwork/defradb/issues/1582]. func TestQueryOneToManyWithParentGroupByOnRelatedTypeFromSingleSide(t *testing.T) { test := testUtils.RequestTestCase{ Description: "One-to-many query with groupBy on related id (from single side).", @@ -318,53 +316,53 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeFromSingleSide(t *testing.T }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Client", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Candide", "rating": 4.95, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Zadig", "rating": 4.91, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", "rating": 2, - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + "author_id": "bae-34a9bd41-1f0d-5748-8446-48fc36ef2614" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + // bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c `{ "name": "Voltaire", "age": 327, "verified": true }`, - // bae-09d33399-197a-5b98-b135-4398f2b6de4c + // bae-34a9bd41-1f0d-5748-8446-48fc36ef2614 `{ "name": "Simon Pelloutier", "age": 327, @@ -396,53 +394,53 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeWithIDSelectionFromSingleSi }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Client", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Candide", "rating": 4.95, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Zadig", "rating": 4.91, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", "rating": 2, - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + "author_id": "bae-34a9bd41-1f0d-5748-8446-48fc36ef2614" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + // bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c `{ "name": "Voltaire", "age": 327, "verified": true }`, - // bae-09d33399-197a-5b98-b135-4398f2b6de4c + // bae-34a9bd41-1f0d-5748-8446-48fc36ef2614 `{ "name": "Simon Pelloutier", "age": 327, diff --git a/tests/integration/query/one_to_many/with_group_test.go b/tests/integration/query/one_to_many/with_group_test.go index b56a6f5cea..0cb74a4492 100644 --- a/tests/integration/query/one_to_many/with_group_test.go +++ b/tests/integration/query/one_to_many/with_group_test.go @@ -34,37 +34,37 @@ func TestQueryOneToManyWithInnerJoinGroupNumber(t *testing.T) { }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Client", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace `{ "name": "Cornelia Funke", "age": 62, @@ -73,6 +73,20 @@ func TestQueryOneToManyWithInnerJoinGroupNumber(t *testing.T) { }, }, Results: []map[string]any{ + { + "name": "Cornelia Funke", + "age": int64(62), + "published": []map[string]any{ + { + "rating": 4.8, + "_group": []map[string]any{ + { + "name": "Theif Lord", + }, + }, + }, + }, + }, { "name": "John Grisham", "age": int64(65), @@ -98,20 +112,6 @@ func TestQueryOneToManyWithInnerJoinGroupNumber(t *testing.T) { }, }, }, - { - "name": "Cornelia Funke", - "age": int64(62), - "published": []map[string]any{ - { - "rating": 4.8, - "_group": []map[string]any{ - { - "name": "Theif Lord", - }, - }, - }, - }, - }, }, }, } @@ -122,10 +122,9 @@ func TestQueryOneToManyWithInnerJoinGroupNumber(t *testing.T) { } func TestQueryOneToManyWithParentJoinGroupNumber(t *testing.T) { - tests := []testUtils.RequestTestCase{ - { - Description: "One-to-many relation query from many side with parent level group", - Request: `query { + test := testUtils.RequestTestCase{ + Description: "One-to-many relation query from many side with parent level group", + Request: `query { Author (groupBy: [age]) { age _group { @@ -137,108 +136,107 @@ func TestQueryOneToManyWithParentJoinGroupNumber(t *testing.T) { } } }`, - Docs: map[int][]string{ - //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d - `{ + Docs: map[int][]string{ + //books + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 + `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, - `{ + `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, - `{ + `{ "name": "The Client", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, - `{ + `{ "name": "Candide", "rating": 4.95, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, - `{ + `{ "name": "Zadig", "rating": 4.91, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, - `{ + `{ "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", "rating": 2, - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + "author_id": "bae-34a9bd41-1f0d-5748-8446-48fc36ef2614" }`, - }, - //authors - 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ + }, + //authors + 1: { + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b + `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 - `{ + // bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c + `{ "name": "Voltaire", "age": 327, "verified": true }`, - // bae-09d33399-197a-5b98-b135-4398f2b6de4c - `{ + // bae-34a9bd41-1f0d-5748-8446-48fc36ef2614 + `{ "name": "Simon Pelloutier", "age": 327, "verified": true }`, - }, }, - Results: []map[string]any{ - { - "age": int64(327), - "_group": []map[string]any{ - { - "name": "Simon Pelloutier", - "published": []map[string]any{ - { - "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", - "rating": float64(2), - }, + }, + Results: []map[string]any{ + { + "age": int64(327), + "_group": []map[string]any{ + { + "name": "Voltaire", + "published": []map[string]any{ + { + "name": "Candide", + "rating": 4.95, + }, + { + "name": "Zadig", + "rating": 4.91, }, }, - { - "name": "Voltaire", - "published": []map[string]any{ - { - "name": "Candide", - "rating": 4.95, - }, - { - "name": "Zadig", - "rating": 4.91, - }, + }, + { + "name": "Simon Pelloutier", + "published": []map[string]any{ + { + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": float64(2), }, }, }, }, - { - "age": int64(65), - "_group": []map[string]any{ - { - "name": "John Grisham", - "published": []map[string]any{ - { - "name": "The Client", - "rating": 4.5, - }, - { - "name": "Painted House", - "rating": 4.9, - }, - { - "name": "A Time for Mercy", - "rating": 4.5, - }, + }, + { + "age": int64(65), + "_group": []map[string]any{ + { + "name": "John Grisham", + "published": []map[string]any{ + { + "name": "The Client", + "rating": 4.5, + }, + { + "name": "Painted House", + "rating": 4.9, + }, + { + "name": "A Time for Mercy", + "rating": 4.5, }, }, }, @@ -247,9 +245,7 @@ func TestQueryOneToManyWithParentJoinGroupNumber(t *testing.T) { }, } - for _, test := range tests { - executeTestCase(t, test) - } + executeTestCase(t, test) } func TestQueryOneToManyWithInnerJoinGroupNumberWithNonGroupFieldsSelected(t *testing.T) { diff --git a/tests/integration/query/one_to_many/with_limit_test.go b/tests/integration/query/one_to_many/with_limit_test.go index 297fd56896..6b518de6e1 100644 --- a/tests/integration/query/one_to_many/with_limit_test.go +++ b/tests/integration/query/one_to_many/with_limit_test.go @@ -30,32 +30,32 @@ func TestQueryOneToManyWithSingleChildLimit(t *testing.T) { }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace `{ "name": "Cornelia Funke", "age": 62, @@ -65,20 +65,20 @@ func TestQueryOneToManyWithSingleChildLimit(t *testing.T) { }, Results: []map[string]any{ { - "name": "John Grisham", + "name": "Cornelia Funke", "published": []map[string]any{ { - "name": "Painted House", - "rating": 4.9, + "name": "Theif Lord", + "rating": 4.8, }, }, }, { - "name": "Cornelia Funke", + "name": "John Grisham", "published": []map[string]any{ { - "name": "Theif Lord", - "rating": 4.8, + "name": "Painted House", + "rating": 4.9, }, }, }, @@ -106,32 +106,32 @@ func TestQueryOneToManyWithMultipleChildLimits(t *testing.T) { }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace `{ "name": "Cornelia Funke", "age": 62, @@ -141,36 +141,36 @@ func TestQueryOneToManyWithMultipleChildLimits(t *testing.T) { }, Results: []map[string]any{ { - "name": "John Grisham", + "name": "Cornelia Funke", "p1": []map[string]any{ { - "name": "Painted House", - "rating": 4.9, + "name": "Theif Lord", + "rating": 4.8, }, }, "p2": []map[string]any{ { - "name": "Painted House", - "rating": 4.9, - }, - { - "name": "A Time for Mercy", - "rating": 4.5, + "name": "Theif Lord", + "rating": 4.8, }, }, }, { - "name": "Cornelia Funke", + "name": "John Grisham", "p1": []map[string]any{ { - "name": "Theif Lord", - "rating": 4.8, + "name": "Painted House", + "rating": 4.9, }, }, "p2": []map[string]any{ { - "name": "Theif Lord", - "rating": 4.8, + "name": "Painted House", + "rating": 4.9, + }, + { + "name": "A Time for Mercy", + "rating": 4.5, }, }, }, diff --git a/tests/integration/query/one_to_many/with_order_filter_limit_test.go b/tests/integration/query/one_to_many/with_order_filter_limit_test.go index 8acee4db18..46c27c474e 100644 --- a/tests/integration/query/one_to_many/with_order_filter_limit_test.go +++ b/tests/integration/query/one_to_many/with_order_filter_limit_test.go @@ -33,32 +33,32 @@ func TestQueryOneToManyWithNumericGreaterThanFilterOnParentAndNumericSortAscendi }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace `{ "name": "Cornelia Funke", "age": 62, @@ -100,32 +100,32 @@ func TestQueryOneToManyWithNumericGreaterThanFilterOnParentAndNumericSortDescend }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace `{ "name": "Cornelia Funke", "age": 62, diff --git a/tests/integration/query/one_to_many/with_order_filter_test.go b/tests/integration/query/one_to_many/with_order_filter_test.go index a7de6d5977..225d25a7f4 100644 --- a/tests/integration/query/one_to_many/with_order_filter_test.go +++ b/tests/integration/query/one_to_many/with_order_filter_test.go @@ -37,28 +37,28 @@ func TestQueryOneToManyWithNumericGreaterThanFilterOnParentAndNumericSortAscendi `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace `{ "name": "Cornelia Funke", "age": 62, @@ -102,32 +102,32 @@ func TestQueryOneToManyWithNumericGreaterThanFilterAndNumericSortDescendingOnChi }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace `{ "name": "Cornelia Funke", "age": 62, @@ -136,6 +136,16 @@ func TestQueryOneToManyWithNumericGreaterThanFilterAndNumericSortDescendingOnChi }, }, Results: []map[string]any{ + { + "name": "Cornelia Funke", + "age": int64(62), + "published": []map[string]any{ + { + "name": "Theif Lord", + "rating": 4.8, + }, + }, + }, { "name": "John Grisham", "age": int64(65), @@ -150,16 +160,6 @@ func TestQueryOneToManyWithNumericGreaterThanFilterAndNumericSortDescendingOnChi }, }, }, - { - "name": "Cornelia Funke", - "age": int64(62), - "published": []map[string]any{ - { - "name": "Theif Lord", - "rating": 4.8, - }, - }, - }, }, } diff --git a/tests/integration/query/one_to_many/with_related_id_test.go b/tests/integration/query/one_to_many/with_related_id_test.go index bcfef26cfe..8626ffddd9 100644 --- a/tests/integration/query/one_to_many/with_related_id_test.go +++ b/tests/integration/query/one_to_many/with_related_id_test.go @@ -30,54 +30,54 @@ func TestQueryOneToManyWithRelatedTypeIDFromManySide(t *testing.T) { Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Client", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Candide", "rating": 4.95, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Zadig", "rating": 4.91, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", "rating": 2, - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + "author_id": "bae-34a9bd41-1f0d-5748-8446-48fc36ef2614" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + // bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c `{ "name": "Voltaire", "age": 327, "verified": true }`, - // bae-09d33399-197a-5b98-b135-4398f2b6de4c + // bae-34a9bd41-1f0d-5748-8446-48fc36ef2614 `{ "name": "Simon Pelloutier", "age": 327, @@ -88,28 +88,28 @@ func TestQueryOneToManyWithRelatedTypeIDFromManySide(t *testing.T) { Results: []map[string]any{ { - "name": "Candide", - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3", + "name": "The Client", + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b", }, { - "name": "Zadig", - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3", + "name": "Painted House", + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b", }, { - "name": "The Client", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", + "name": "A Time for Mercy", + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b", }, { "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c", + "author_id": "bae-34a9bd41-1f0d-5748-8446-48fc36ef2614", }, { - "name": "Painted House", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", + "name": "Candide", + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c", }, { - "name": "A Time for Mercy", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", + "name": "Zadig", + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c", }, }, } @@ -131,54 +131,54 @@ func TestQueryOneToManyWithRelatedTypeIDFromSingleSide(t *testing.T) { Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Client", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Candide", "rating": 4.95, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Zadig", "rating": 4.91, - "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + "author_id": "bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c" }`, `{ "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", "rating": 2, - "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + "author_id": "bae-34a9bd41-1f0d-5748-8446-48fc36ef2614" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + // bae-1594d2aa-d63c-51d2-8e5e-06ee0c9e2e8c `{ "name": "Voltaire", "age": 327, "verified": true }`, - // bae-09d33399-197a-5b98-b135-4398f2b6de4c + // bae-34a9bd41-1f0d-5748-8446-48fc36ef2614 `{ "name": "Simon Pelloutier", "age": 327, diff --git a/tests/integration/query/one_to_many/with_same_field_name_test.go b/tests/integration/query/one_to_many/with_same_field_name_test.go index dfc609518b..10136ae529 100644 --- a/tests/integration/query/one_to_many/with_same_field_name_test.go +++ b/tests/integration/query/one_to_many/with_same_field_name_test.go @@ -46,14 +46,14 @@ func TestQueryOneToManyWithSameFieldName(t *testing.T) { }`, Docs: map[int][]string{ //books - 0: { // bae-9217906d-e8c5-533d-8520-71c754590844 + 0: { `{ "name": "Painted House", - "relationship1_id": "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + "relationship1_id": "bae-ee5973cf-73c3-558f-8aec-8b590b8e77cf" }`, }, //authors - 1: { // bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed + 1: { // bae-ee5973cf-73c3-558f-8aec-8b590b8e77cf `{ "name": "John Grisham" }`, @@ -80,14 +80,14 @@ func TestQueryOneToManyWithSameFieldName(t *testing.T) { }`, Docs: map[int][]string{ //books - 0: { // bae-9217906d-e8c5-533d-8520-71c754590844 + 0: { `{ "name": "Painted House", - "relationship1_id": "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + "relationship1_id": "bae-ee5973cf-73c3-558f-8aec-8b590b8e77cf" }`, }, //authors - 1: { // bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed + 1: { // bae-ee5973cf-73c3-558f-8aec-8b590b8e77cf `{ "name": "John Grisham" }`, diff --git a/tests/integration/query/one_to_many/with_sum_filter_order_test.go b/tests/integration/query/one_to_many/with_sum_filter_order_test.go index af01cdb8b1..96f2c14fc1 100644 --- a/tests/integration/query/one_to_many/with_sum_filter_order_test.go +++ b/tests/integration/query/one_to_many/with_sum_filter_order_test.go @@ -28,47 +28,47 @@ func TestOneToManyAscOrderAndFilterOnParentWithAggSumOnSubTypeField(t *testing.T Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Associate", "rating": 4.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Sooley", "rating": 3.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Rooster Bar", "rating": 4, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace `{ "name": "Cornelia Funke", "age": 62, @@ -93,7 +93,7 @@ func TestOneToManyAscOrderAndFilterOnParentWithAggSumOnSubTypeField(t *testing.T }, { "name": "John Grisham", - "_sum": 20.799999999999997, + "_sum": 20.8, }, { "name": "Not a Writer", @@ -117,47 +117,47 @@ func TestOneToManyDescOrderAndFilterOnParentWithAggSumOnSubTypeField(t *testing. Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Associate", "rating": 4.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Sooley", "rating": 3.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Rooster Bar", "rating": 4, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace `{ "name": "Cornelia Funke", "age": 62, @@ -182,7 +182,7 @@ func TestOneToManyDescOrderAndFilterOnParentWithAggSumOnSubTypeField(t *testing. }, { "name": "John Grisham", - "_sum": 20.799999999999997, + "_sum": 20.8, }, { "name": "Cornelia Funke", @@ -207,47 +207,47 @@ func TestOnetoManySumBySubTypeFieldAndSumBySybTypeFieldWithDescOrderingOnFieldWi Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Associate", "rating": 4.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Sooley", "rating": 3.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Rooster Bar", "rating": 4, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace `{ "name": "Cornelia Funke", "age": 62, @@ -276,16 +276,16 @@ func TestOnetoManySumBySubTypeFieldAndSumBySybTypeFieldWithDescOrderingOnFieldWi "sum1": 0.0, "sum2": 0.0, }, - { - "name": "John Grisham", - "sum1": 20.799999999999997, - "sum2": 4.9 + 4.5, - }, { "name": "Cornelia Funke", "sum1": 4.8, "sum2": 4.8, }, + { + "name": "John Grisham", + "sum1": 20.8, + "sum2": 4.9 + 4.5, + }, }, } @@ -305,47 +305,47 @@ func TestOnetoManySumBySubTypeFieldAndSumBySybTypeFieldWithAscOrderingOnFieldWit Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Associate", "rating": 4.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Sooley", "rating": 3.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Rooster Bar", "rating": 4, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace `{ "name": "Cornelia Funke", "age": 62, @@ -374,16 +374,16 @@ func TestOnetoManySumBySubTypeFieldAndSumBySybTypeFieldWithAscOrderingOnFieldWit "sum1": 0.0, "sum2": 0.0, }, - { - "name": "John Grisham", - "sum1": 20.799999999999997, - "sum2": 4.0 + 3.2, - }, { "name": "Cornelia Funke", "sum1": 4.8, "sum2": 4.8, }, + { + "name": "John Grisham", + "sum1": 20.8, + "sum2": 4.0 + 3.2, + }, }, } @@ -404,47 +404,47 @@ func TestOneToManyLimitAscOrderSumOfSubTypeAndLimitAscOrderFieldsOfSubtype(t *te Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Associate", "rating": 4.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Sooley", "rating": 3.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Rooster Bar", "rating": 4, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace `{ "name": "Cornelia Funke", "age": 62, @@ -472,21 +472,21 @@ func TestOneToManyLimitAscOrderSumOfSubTypeAndLimitAscOrderFieldsOfSubtype(t *te "LimitOrderFields": []map[string]any{}, }, { - "LimitOrderSum": 3.2 + 4.0, + "LimitOrderSum": 4.8, "LimitOrderFields": []map[string]any{ { - "name": "Sooley", - }, - { - "name": "The Rooster Bar", + "name": "Theif Lord", }, }, }, { - "LimitOrderSum": 4.8, + "LimitOrderSum": 3.2 + 4.0, "LimitOrderFields": []map[string]any{ { - "name": "Theif Lord", + "name": "Sooley", + }, + { + "name": "The Rooster Bar", }, }, }, @@ -510,47 +510,47 @@ func TestOneToManyLimitDescOrderSumOfSubTypeAndLimitAscOrderFieldsOfSubtype(t *t Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Associate", "rating": 4.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Sooley", "rating": 3.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Rooster Bar", "rating": 4, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace `{ "name": "Cornelia Funke", "age": 62, @@ -578,21 +578,21 @@ func TestOneToManyLimitDescOrderSumOfSubTypeAndLimitAscOrderFieldsOfSubtype(t *t "LimitOrderFields": []map[string]any{}, }, { - "LimitOrderSum": 4.9 + 4.5, + "LimitOrderSum": 4.8, "LimitOrderFields": []map[string]any{ { - "name": "Painted House", - }, - { - "name": "A Time for Mercy", + "name": "Theif Lord", }, }, }, { - "LimitOrderSum": 4.8, + "LimitOrderSum": 4.9 + 4.5, "LimitOrderFields": []map[string]any{ { - "name": "Theif Lord", + "name": "Painted House", + }, + { + "name": "A Time for Mercy", }, }, }, diff --git a/tests/integration/query/one_to_many/with_sum_limit_offset_order_test.go b/tests/integration/query/one_to_many/with_sum_limit_offset_order_test.go index 11df5b310c..e7c4606071 100644 --- a/tests/integration/query/one_to_many/with_sum_limit_offset_order_test.go +++ b/tests/integration/query/one_to_many/with_sum_limit_offset_order_test.go @@ -17,398 +17,568 @@ import ( ) func TestQueryOneToManyWithSumWithLimitWithOffsetWithOrderAsc(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "One-to-many relation query from many side with sum with limit and offset and order", - Request: `query { - Author { - name - _sum(published: {field: rating, offset: 1, limit: 2, order: {name: ASC}}) - } - }`, - Docs: map[int][]string{ - //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d - `{ - "name": "Painted House", - "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "A Time for Mercy", - "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "The Associate", - "rating": 4.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "Sooley", - "rating": 3.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "The Rooster Bar", - "rating": 4, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "Theif Lord", - "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + rating: Float + author: Author + } + + type Author { + name: String + age: Int + verified: Boolean + published: [Book] + } + `, }, - //authors - 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ "name": "Cornelia Funke", "age": 62, "verified": false }`, }, - }, - Results: []map[string]any{ - { - "name": "John Grisham", - // 4.9 + 3.2 - // ...00001 is float math artifact - "_sum": 8.100000000000001, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Painted House", + "rating": 4.9, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "The Associate", + "rating": 4.2, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Sooley", + "rating": 3.2, + "author_id": testUtils.NewDocIndex(1, 0), + }, }, - { - "name": "Cornelia Funke", - "_sum": float64(0), + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "The Rooster Bar", + "rating": 4, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Theif Lord", + "rating": 4.8, + "author_id": testUtils.NewDocIndex(1, 1), + }, + }, + testUtils.Request{ + Request: `query { + Author { + name + _sum(published: {field: rating, offset: 1, limit: 2, order: {name: ASC}}) + } + }`, + Results: []map[string]any{ + { + "name": "Cornelia Funke", + "_sum": float64(0), + }, + { + "name": "John Grisham", + // 4.9 + 3.2 + // ...00001 is float math artifact + "_sum": 8.100000000000001, + }, + }, }, }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryOneToManyWithSumWithLimitWithOffsetWithOrderDesc(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "One-to-many relation query from many side with sum with limit and offset and order", - Request: `query { - Author { - name - _sum(published: {field: rating, offset: 1, limit: 2, order: {name: DESC}}) - } - }`, - Docs: map[int][]string{ - //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d - `{ - "name": "Painted House", - "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "A Time for Mercy", - "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "The Associate", - "rating": 4.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "Sooley", - "rating": 3.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "The Rooster Bar", - "rating": 4, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "Theif Lord", - "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + rating: Float + author: Author + } + + type Author { + name: String + age: Int + verified: Boolean + published: [Book] + } + `, }, - //authors - 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ "name": "Cornelia Funke", "age": 62, "verified": false }`, }, - }, - Results: []map[string]any{ - { - "name": "John Grisham", - // 4.2 + 3.2 - "_sum": 7.4, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Painted House", + "rating": 4.9, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "The Associate", + "rating": 4.2, + "author_id": testUtils.NewDocIndex(1, 0), + }, }, - { - "name": "Cornelia Funke", - "_sum": float64(0), + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Sooley", + "rating": 3.2, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "The Rooster Bar", + "rating": 4, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Theif Lord", + "rating": 4.8, + "author_id": testUtils.NewDocIndex(1, 1), + }, + }, + testUtils.Request{ + Request: `query { + Author { + name + _sum(published: {field: rating, offset: 1, limit: 2, order: {name: DESC}}) + } + }`, + Results: []map[string]any{ + { + "name": "Cornelia Funke", + "_sum": float64(0), + }, + { + "name": "John Grisham", + // 4.2 + 3.2 + "_sum": 7.4, + }, + }, }, }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryOneToManyWithSumWithLimitWithOffsetWithOrderAscAndDesc(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "One-to-many relation query from many side with sum with limit and offset and order", - Request: `query { - Author { - name - asc: _sum(published: {field: rating, offset: 1, limit: 2, order: {name: ASC}}) - desc: _sum(published: {field: rating, offset: 1, limit: 2, order: {name: DESC}}) - } - }`, - Docs: map[int][]string{ - //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d - `{ - "name": "Painted House", - "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "A Time for Mercy", - "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "The Associate", - "rating": 4.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "Sooley", - "rating": 3.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "The Rooster Bar", - "rating": 4, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "Theif Lord", - "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + rating: Float + author: Author + } + + type Author { + name: String + age: Int + verified: Boolean + published: [Book] + } + `, }, - //authors - 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ "name": "Cornelia Funke", "age": 62, "verified": false }`, }, - }, - Results: []map[string]any{ - { - "name": "John Grisham", - // 4.9 + 3.2 - // ...00001 is float math artifact - "asc": 8.100000000000001, - // 4.2 + 3.2 - "desc": 7.4, - }, - { - "name": "Cornelia Funke", - "asc": float64(0), - "desc": float64(0), + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Painted House", + "rating": 4.9, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "The Associate", + "rating": 4.2, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Sooley", + "rating": 3.2, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "The Rooster Bar", + "rating": 4, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Theif Lord", + "rating": 4.8, + "author_id": testUtils.NewDocIndex(1, 1), + }, + }, + testUtils.Request{ + Request: `query { + Author { + name + asc: _sum(published: {field: rating, offset: 1, limit: 2, order: {name: ASC}}) + desc: _sum(published: {field: rating, offset: 1, limit: 2, order: {name: DESC}}) + } + }`, + Results: []map[string]any{ + { + "name": "Cornelia Funke", + "asc": float64(0), + "desc": float64(0), + }, + { + "name": "John Grisham", + // 4.9 + 3.2 + // ...00001 is float math artifact + "asc": 8.100000000000001, + // 4.2 + 3.2 + "desc": 7.4, + }, + }, }, }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryOneToManyWithSumWithLimitWithOffsetWithOrderOnDifferentFields(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "One-to-many relation query from many side with sum with limit and offset and order", - Request: `query { - Author { - name - byName: _sum(published: {field: rating, offset: 1, limit: 2, order: {name: DESC}}) - byRating: _sum(published: {field: rating, offset: 1, limit: 2, order: {rating: DESC}}) - } - }`, - Docs: map[int][]string{ - //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d - `{ - "name": "Painted House", - "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "A Time for Mercy", - "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "The Associate", - "rating": 4.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "Sooley", - "rating": 3.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "The Rooster Bar", - "rating": 4, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "Theif Lord", - "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + rating: Float + author: Author + } + + type Author { + name: String + age: Int + verified: Boolean + published: [Book] + } + `, }, - //authors - 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ "name": "Cornelia Funke", "age": 62, "verified": false }`, }, - }, - Results: []map[string]any{ - { - "name": "John Grisham", - // 4.2 + 3.2 - "byName": 7.4, - // 4.5 + 4.2 - "byRating": 8.7, - }, - { - "name": "Cornelia Funke", - "byName": float64(0), - "byRating": float64(0), + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Painted House", + "rating": 4.9, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "The Associate", + "rating": 4.2, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Sooley", + "rating": 3.2, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "The Rooster Bar", + "rating": 4, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Theif Lord", + "rating": 4.8, + "author_id": testUtils.NewDocIndex(1, 1), + }, + }, + testUtils.Request{ + Request: `query { + Author { + name + byName: _sum(published: {field: rating, offset: 1, limit: 2, order: {name: DESC}}) + byRating: _sum(published: {field: rating, offset: 1, limit: 2, order: {rating: DESC}}) + } + }`, + Results: []map[string]any{ + { + "name": "Cornelia Funke", + "byName": float64(0), + "byRating": float64(0), + }, + { + "name": "John Grisham", + // 4.2 + 3.2 + "byName": 7.4, + // 4.5 + 4.2 + "byRating": 8.7, + }, + }, }, }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryOneToManyWithSumWithLimitWithOffsetWithOrderDescAndRenderedChildrenOrderedAsc(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "One-to-many relation query from many side with sum with limit and offset and order", - Request: `query { - Author { - name - _sum(published: {field: rating, offset: 1, limit: 2, order: {name: DESC}}) - published(offset: 1, limit: 2, order: {name: ASC}) { - name + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + rating: Float + author: Author } - } - }`, - Docs: map[int][]string{ - //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d - `{ - "name": "Painted House", - "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "A Time for Mercy", - "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "The Associate", - "rating": 4.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "Sooley", - "rating": 3.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "The Rooster Bar", - "rating": 4, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "Theif Lord", - "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, + + type Author { + name: String + age: Int + verified: Boolean + published: [Book] + } + `, }, - //authors - 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ "name": "Cornelia Funke", "age": 62, "verified": false }`, }, - }, - Results: []map[string]any{ - { - "name": "John Grisham", - // 4.2 + 3.2 - "_sum": 7.4, - "published": []map[string]any{ + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Painted House", + "rating": 4.9, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "The Associate", + "rating": 4.2, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Sooley", + "rating": 3.2, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "The Rooster Bar", + "rating": 4, + "author_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Theif Lord", + "rating": 4.8, + "author_id": testUtils.NewDocIndex(1, 1), + }, + }, + testUtils.Request{ + Request: `query { + Author { + name + _sum(published: {field: rating, offset: 1, limit: 2, order: {name: DESC}}) + published(offset: 1, limit: 2, order: {name: ASC}) { + name + } + } + }`, + Results: []map[string]any{ { - "name": "Painted House", + "name": "Cornelia Funke", + "_sum": float64(0), + "published": []map[string]any{}, }, { - "name": "Sooley", + "name": "John Grisham", + // 4.2 + 3.2 + "_sum": 7.4, + "published": []map[string]any{ + { + "name": "Painted House", + }, + { + "name": "Sooley", + }, + }, }, }, }, - { - "name": "Cornelia Funke", - "_sum": float64(0), - "published": []map[string]any{}, - }, }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/one_to_many/with_sum_limit_offset_test.go b/tests/integration/query/one_to_many/with_sum_limit_offset_test.go index c907ade5dd..ea89759d72 100644 --- a/tests/integration/query/one_to_many/with_sum_limit_offset_test.go +++ b/tests/integration/query/one_to_many/with_sum_limit_offset_test.go @@ -27,37 +27,37 @@ func TestQueryOneToManyWithSumWithLimitAndOffset(t *testing.T) { }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Associate", "rating": 4.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace `{ "name": "Cornelia Funke", "age": 62, @@ -66,14 +66,14 @@ func TestQueryOneToManyWithSumWithLimitAndOffset(t *testing.T) { }, }, Results: []map[string]any{ - { - "name": "John Grisham", - "_sum": 9.4, - }, { "name": "Cornelia Funke", "_sum": float64(0), }, + { + "name": "John Grisham", + "_sum": 9.4, + }, }, } diff --git a/tests/integration/query/one_to_many/with_sum_limit_test.go b/tests/integration/query/one_to_many/with_sum_limit_test.go index e049e77226..f7fe551567 100644 --- a/tests/integration/query/one_to_many/with_sum_limit_test.go +++ b/tests/integration/query/one_to_many/with_sum_limit_test.go @@ -27,37 +27,37 @@ func TestQueryOneToManyWithSumWithLimit(t *testing.T) { }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "The Associate", "rating": 4.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, `{ "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + "author_id": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace" }`, }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + // bae-72e8c691-9f20-55e7-9228-8af1cf54cace `{ "name": "Cornelia Funke", "age": 62, @@ -66,15 +66,15 @@ func TestQueryOneToManyWithSumWithLimit(t *testing.T) { }, }, Results: []map[string]any{ + { + "name": "Cornelia Funke", + "_sum": 4.8, + }, { "name": "John Grisham", // .00...1 is float math thing "_sum": 9.100000000000001, }, - { - "name": "Cornelia Funke", - "_sum": 4.8, - }, }, } diff --git a/tests/integration/query/one_to_many/with_typename_test.go b/tests/integration/query/one_to_many/with_typename_test.go index b5dca894e0..db318066f0 100644 --- a/tests/integration/query/one_to_many/with_typename_test.go +++ b/tests/integration/query/one_to_many/with_typename_test.go @@ -31,15 +31,15 @@ func TestQueryOneToManyWithTypeName(t *testing.T) { }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + "author_id": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b" }`, }, //authors - 1: { // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + 1: { // bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b `{ "name": "John Grisham", "age": 65, diff --git a/tests/integration/query/one_to_many_multiple/utils.go b/tests/integration/query/one_to_many_multiple/utils.go deleted file mode 100644 index fe7584a980..0000000000 --- a/tests/integration/query/one_to_many_multiple/utils.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package one_to_many_multiple - -import ( - "testing" - - testUtils "github.com/sourcenetwork/defradb/tests/integration" -) - -var bookAuthorGQLSchema = (` - type Article { - name: String - author: Author - rating: Int - } - - type Book { - name: String - author: Author - score: Int - } - - type Author { - name: String - age: Int - verified: Boolean - books: [Book] - articles: [Article] - } -`) - -func executeTestCase(t *testing.T, test testUtils.RequestTestCase) { - testUtils.ExecuteRequestTestCase(t, bookAuthorGQLSchema, []string{"Article", "Book", "Author"}, test) -} diff --git a/tests/integration/query/one_to_many_multiple/with_average_filter_test.go b/tests/integration/query/one_to_many_multiple/with_average_filter_test.go index b55390efe1..9685790830 100644 --- a/tests/integration/query/one_to_many_multiple/with_average_filter_test.go +++ b/tests/integration/query/one_to_many_multiple/with_average_filter_test.go @@ -17,165 +17,247 @@ import ( ) func TestQueryOneToManyMultipleWithAverageOnMultipleJoinsWithAndWithoutFilter(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "One-to-many relation query from many side with averages with and without filters", - Request: `query { - Author { - name - _avg(books: {field: score, filter: {score: {_gt: 3}}}, articles: {field: rating}) - } - }`, - Docs: map[int][]string{ - //articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "rating": 3 - }`, - `{ - "name": "To my dear readers", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "rating": 2 - }`, - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "rating": 1 - }`, - }, - //books - 1: { - `{ - "name": "Painted House", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 1 - }`, - `{ - "name": "A Time for Mercy", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 2 - }`, - `{ - "name": "Sooley", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 3 - }`, - `{ - "name": "Theif Lord", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "score": 4 - }`, - }, - //authors - 2: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false - }`, + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Article { + name: String + author: Author + rating: Int + } + + type Book { + name: String + author: Author + score: Int + } + + type Author { + name: String + age: Int + verified: Boolean + books: [Book] + articles: [Article] + } + `, }, - }, - Results: []map[string]any{ - { - "name": "John Grisham", - "_avg": float64(3), + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "John Grisham", + "age": 65, + "verified": true, + }, + }, + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "Cornelia Funke", + "age": 62, + "verified": false, + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "After Guantánamo, Another Injustice", + "author_id": testUtils.NewDocIndex(2, 0), + "rating": 3, + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "To my dear readers", + "author_id": testUtils.NewDocIndex(2, 1), + "rating": 2, + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Twinklestar's Favourite Xmas Cookie", + "author_id": testUtils.NewDocIndex(2, 1), + "rating": 1, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Painted House", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 1, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 2, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Sooley", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 3, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Theif Lord", + "author_id": testUtils.NewDocIndex(2, 1), + "score": 4, + }, }, - { - "name": "Cornelia Funke", - "_avg": float64(2.3333333333333335), + testUtils.Request{ + Request: `query { + Author { + name + _avg(books: {field: score, filter: {score: {_gt: 3}}}, articles: {field: rating}) + } + }`, + Results: []map[string]any{ + { + "name": "Cornelia Funke", + "_avg": float64(2.3333333333333335), + }, + { + "name": "John Grisham", + "_avg": float64(3), + }, + }, }, }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryOneToManyMultipleWithAverageOnMultipleJoinsWithFilters(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "One-to-many relation query from many side with averages with filters", - Request: `query { - Author { - name - _avg(books: {field: score, filter: {score: {_gt: 3}}}, articles: {field: rating, filter: {rating: {_lt: 3}}}) - } - }`, - Docs: map[int][]string{ - //articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "rating": 3 - }`, - `{ - "name": "To my dear readers", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "rating": 2 - }`, - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "rating": 1 - }`, - }, - //books - 1: { - `{ - "name": "Painted House", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 1 - }`, - `{ - "name": "A Time for Mercy", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 2 - }`, - `{ - "name": "Sooley", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 3 - }`, - `{ - "name": "Theif Lord", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "score": 4 - }`, - }, - //authors - 2: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false - }`, + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Article { + name: String + author: Author + rating: Int + } + + type Book { + name: String + author: Author + score: Int + } + + type Author { + name: String + age: Int + verified: Boolean + books: [Book] + articles: [Article] + } + `, }, - }, - Results: []map[string]any{ - { - "name": "John Grisham", - "_avg": float64(0), + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "John Grisham", + "age": 65, + "verified": true, + }, + }, + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "Cornelia Funke", + "age": 62, + "verified": false, + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "After Guantánamo, Another Injustice", + "author_id": testUtils.NewDocIndex(2, 0), + "rating": 3, + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "To my dear readers", + "author_id": testUtils.NewDocIndex(2, 1), + "rating": 2, + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Twinklestar's Favourite Xmas Cookie", + "author_id": testUtils.NewDocIndex(2, 1), + "rating": 1, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Painted House", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 1, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 2, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Sooley", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 3, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Theif Lord", + "author_id": testUtils.NewDocIndex(2, 1), + "score": 4, + }, }, - { - "name": "Cornelia Funke", - "_avg": float64(2.3333333333333335), + testUtils.Request{ + Request: `query { + Author { + name + _avg(books: {field: score, filter: {score: {_gt: 3}}}, articles: {field: rating, filter: {rating: {_lt: 3}}}) + } + }`, + Results: []map[string]any{ + { + "name": "Cornelia Funke", + "_avg": float64(2.3333333333333335), + }, + { + "name": "John Grisham", + "_avg": float64(0), + }, + }, }, }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/one_to_many_multiple/with_average_test.go b/tests/integration/query/one_to_many_multiple/with_average_test.go index 615782ab4a..a2af6b6a67 100644 --- a/tests/integration/query/one_to_many_multiple/with_average_test.go +++ b/tests/integration/query/one_to_many_multiple/with_average_test.go @@ -17,83 +17,124 @@ import ( ) func TestQueryOneToManyMultipleWithAverageOnMultipleJoins(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "One-to-many relation query from many side with average", - Request: `query { - Author { - name - _avg(books: {field: score}, articles: {field: rating}) - } - }`, - Docs: map[int][]string{ - //articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "rating": 3 - }`, - `{ - "name": "To my dear readers", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "rating": 2 - }`, - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "rating": 1 - }`, + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Article { + name: String + author: Author + rating: Int + } + + type Book { + name: String + author: Author + score: Int + } + + type Author { + name: String + age: Int + verified: Boolean + books: [Book] + articles: [Article] + } + `, }, - //books - 1: { - `{ - "name": "Painted House", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 1 - }`, - `{ - "name": "A Time for Mercy", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 2 - }`, - `{ - "name": "Sooley", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 3 - }`, - `{ - "name": "Theif Lord", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "score": 4 - }`, + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "John Grisham", + "age": 65, + "verified": true, + }, }, - //authors - 2: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false - }`, + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "Cornelia Funke", + "age": 62, + "verified": false, + }, }, - }, - Results: []map[string]any{ - { - "name": "John Grisham", - "_avg": float64(2.25), + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "After Guantánamo, Another Injustice", + "author_id": testUtils.NewDocIndex(2, 0), + "rating": 3, + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "To my dear readers", + "author_id": testUtils.NewDocIndex(2, 1), + "rating": 2, + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Twinklestar's Favourite Xmas Cookie", + "author_id": testUtils.NewDocIndex(2, 1), + "rating": 1, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Painted House", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 1, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 2, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Sooley", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 3, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Theif Lord", + "author_id": testUtils.NewDocIndex(2, 1), + "score": 4, + }, }, - { - "name": "Cornelia Funke", - "_avg": float64(2.3333333333333335), + testUtils.Request{ + Request: `query { + Author { + name + _avg(books: {field: score}, articles: {field: rating}) + } + }`, + Results: []map[string]any{ + { + "name": "Cornelia Funke", + "_avg": float64(2.3333333333333335), + }, + { + "name": "John Grisham", + "_avg": float64(2.25), + }, + }, }, }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/one_to_many_multiple/with_count_filter_test.go b/tests/integration/query/one_to_many_multiple/with_count_filter_test.go index ea97e7c17d..9cb9409d41 100644 --- a/tests/integration/query/one_to_many_multiple/with_count_filter_test.go +++ b/tests/integration/query/one_to_many_multiple/with_count_filter_test.go @@ -17,165 +17,247 @@ import ( ) func TestQueryOneToManyMultipleWithCountOnMultipleJoinsWithAndWithoutFilter(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "One-to-many relation query from many side with counts with and without filters", - Request: `query { - Author { - name - _count(books: {filter: {score: {_gt: 3}}}, articles: {}) - } - }`, - Docs: map[int][]string{ - //articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "rating": 3 - }`, - `{ - "name": "To my dear readers", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "rating": 2 - }`, - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "rating": 1 - }`, + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Article { + name: String + author: Author + rating: Int + } + + type Book { + name: String + author: Author + score: Int + } + + type Author { + name: String + age: Int + verified: Boolean + books: [Book] + articles: [Article] + } + `, }, - //books - 1: { - `{ - "name": "Painted House", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 1 - }`, - `{ - "name": "A Time for Mercy", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 2 - }`, - `{ - "name": "Sooley", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 3 - }`, - `{ - "name": "Theif Lord", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "score": 4 - }`, + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "John Grisham", + "age": 65, + "verified": true, + }, }, - //authors - 2: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false - }`, + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "Cornelia Funke", + "age": 62, + "verified": false, + }, }, - }, - Results: []map[string]any{ - { - "name": "John Grisham", - "_count": 1, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "After Guantánamo, Another Injustice", + "author_id": testUtils.NewDocIndex(2, 0), + "rating": 3, + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "To my dear readers", + "author_id": testUtils.NewDocIndex(2, 1), + "rating": 2, + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Twinklestar's Favourite Xmas Cookie", + "author_id": testUtils.NewDocIndex(2, 1), + "rating": 1, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Painted House", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 1, + }, }, - { - "name": "Cornelia Funke", - "_count": 3, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 2, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Sooley", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 3, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Theif Lord", + "author_id": testUtils.NewDocIndex(2, 1), + "score": 4, + }, + }, + testUtils.Request{ + Request: `query { + Author { + name + _count(books: {filter: {score: {_gt: 3}}}, articles: {}) + } + }`, + Results: []map[string]any{ + { + "name": "Cornelia Funke", + "_count": 3, + }, + { + "name": "John Grisham", + "_count": 1, + }, + }, }, }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryOneToManyMultipleWithCountOnMultipleJoinsWithFilters(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "One-to-many relation query from many side with counts with filters", - Request: `query { - Author { - name - _count(books: {filter: {score: {_gt: 3}}}, articles: {filter: {rating: {_lt: 3}}}) - } - }`, - Docs: map[int][]string{ - //articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "rating": 3 - }`, - `{ - "name": "To my dear readers", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "rating": 2 - }`, - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "rating": 1 - }`, + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Article { + name: String + author: Author + rating: Int + } + + type Book { + name: String + author: Author + score: Int + } + + type Author { + name: String + age: Int + verified: Boolean + books: [Book] + articles: [Article] + } + `, }, - //books - 1: { - `{ - "name": "Painted House", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 1 - }`, - `{ - "name": "A Time for Mercy", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 2 - }`, - `{ - "name": "Sooley", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 3 - }`, - `{ - "name": "Theif Lord", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "score": 4 - }`, + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "John Grisham", + "age": 65, + "verified": true, + }, }, - //authors - 2: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false - }`, + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "Cornelia Funke", + "age": 62, + "verified": false, + }, }, - }, - Results: []map[string]any{ - { - "name": "John Grisham", - "_count": 0, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "After Guantánamo, Another Injustice", + "author_id": testUtils.NewDocIndex(2, 0), + "rating": 3, + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "To my dear readers", + "author_id": testUtils.NewDocIndex(2, 1), + "rating": 2, + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Twinklestar's Favourite Xmas Cookie", + "author_id": testUtils.NewDocIndex(2, 1), + "rating": 1, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Painted House", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 1, + }, }, - { - "name": "Cornelia Funke", - "_count": 3, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 2, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Sooley", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 3, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Theif Lord", + "author_id": testUtils.NewDocIndex(2, 1), + "score": 4, + }, + }, + testUtils.Request{ + Request: `query { + Author { + name + _count(books: {filter: {score: {_gt: 3}}}, articles: {filter: {rating: {_lt: 3}}}) + } + }`, + Results: []map[string]any{ + { + "name": "Cornelia Funke", + "_count": 3, + }, + { + "name": "John Grisham", + "_count": 0, + }, + }, }, }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/one_to_many_multiple/with_count_test.go b/tests/integration/query/one_to_many_multiple/with_count_test.go index 1956f95abd..f6155ffc16 100644 --- a/tests/integration/query/one_to_many_multiple/with_count_test.go +++ b/tests/integration/query/one_to_many_multiple/with_count_test.go @@ -17,150 +17,242 @@ import ( ) func TestQueryOneToManyMultipleWithCount(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "One-to-many relation query from many side with count", - Request: `query { - Author { - name - numberOfBooks: _count(books: {}) - numberOfArticles: _count(articles: {}) - } - }`, - Docs: map[int][]string{ - //articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "To my dear readers", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - }, - //books - 1: { - `{ - "name": "Painted House", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "A Time for Mercy", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "Theif Lord", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - }, - //authors - 2: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false - }`, + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Article { + name: String + author: Author + rating: Int + } + + type Book { + name: String + author: Author + score: Int + } + + type Author { + name: String + age: Int + verified: Boolean + books: [Book] + articles: [Article] + } + `, }, - }, - Results: []map[string]any{ - { - "name": "John Grisham", - "numberOfBooks": 2, - "numberOfArticles": 1, + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "John Grisham", + "age": 65, + "verified": true, + }, + }, + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "Cornelia Funke", + "age": 62, + "verified": false, + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "After Guantánamo, Another Injustice", + "author_id": testUtils.NewDocIndex(2, 0), + "rating": 3, + }, }, - { - "name": "Cornelia Funke", - "numberOfBooks": 1, - "numberOfArticles": 2, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "To my dear readers", + "author_id": testUtils.NewDocIndex(2, 1), + "rating": 2, + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Twinklestar's Favourite Xmas Cookie", + "author_id": testUtils.NewDocIndex(2, 1), + "rating": 1, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Painted House", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 1, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 2, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Theif Lord", + "author_id": testUtils.NewDocIndex(2, 1), + "score": 4, + }, + }, + testUtils.Request{ + Request: `query { + Author { + name + numberOfBooks: _count(books: {}) + numberOfArticles: _count(articles: {}) + } + }`, + Results: []map[string]any{ + { + "name": "Cornelia Funke", + "numberOfBooks": 1, + "numberOfArticles": 2, + }, + { + "name": "John Grisham", + "numberOfBooks": 2, + "numberOfArticles": 1, + }, + }, }, }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryOneToManyMultipleWithCountOnMultipleJoins(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "One-to-many relation query from many side with count", - Request: `query { - Author { - name - _count(books: {}, articles: {}) - } - }`, - Docs: map[int][]string{ - //articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "To my dear readers", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - }, - //books - 1: { - `{ - "name": "Painted House", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "A Time for Mercy", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "Sooley", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "Theif Lord", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - }, - //authors - 2: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false - }`, + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Article { + name: String + author: Author + rating: Int + } + + type Book { + name: String + author: Author + score: Int + } + + type Author { + name: String + age: Int + verified: Boolean + books: [Book] + articles: [Article] + } + `, }, - }, - Results: []map[string]any{ - { - "name": "John Grisham", - "_count": 4, + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "John Grisham", + "age": 65, + "verified": true, + }, + }, + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "Cornelia Funke", + "age": 62, + "verified": false, + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "After Guantánamo, Another Injustice", + "author_id": testUtils.NewDocIndex(2, 0), + "rating": 3, + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "To my dear readers", + "author_id": testUtils.NewDocIndex(2, 1), + "rating": 2, + }, }, - { - "name": "Cornelia Funke", - "_count": 3, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Twinklestar's Favourite Xmas Cookie", + "author_id": testUtils.NewDocIndex(2, 1), + "rating": 1, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Painted House", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 1, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 2, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Sooley", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 3, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Theif Lord", + "author_id": testUtils.NewDocIndex(2, 1), + "score": 4, + }, + }, + testUtils.Request{ + Request: `query { + Author { + name + _count(books: {}, articles: {}) + } + }`, + Results: []map[string]any{ + { + "name": "Cornelia Funke", + "_count": 3, + }, + { + "name": "John Grisham", + "_count": 4, + }, + }, }, }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/one_to_many_multiple/with_multiple_filter_test.go b/tests/integration/query/one_to_many_multiple/with_multiple_filter_test.go index 26a15729ea..a30f2f78a1 100644 --- a/tests/integration/query/one_to_many_multiple/with_multiple_filter_test.go +++ b/tests/integration/query/one_to_many_multiple/with_multiple_filter_test.go @@ -17,77 +17,118 @@ import ( ) func TestQueryOneToManyMultipleWithMultipleManyFilters(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "One-to-many relation query from one side with multiple many fitlers", - Request: `query { - Author(filter: {name: {_eq: "John Grisham"}, books: {score: {_eq: 1}}, articles: {rating: {_eq: 3}}}) { - name - } - }`, - Docs: map[int][]string{ - //articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "rating": 3 - }`, - `{ - "name": "To my dear readers", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "rating": 2 - }`, - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "rating": 1 - }`, + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Article { + name: String + author: Author + rating: Int + } + + type Book { + name: String + author: Author + score: Int + } + + type Author { + name: String + age: Int + verified: Boolean + books: [Book] + articles: [Article] + } + `, }, - //books - 1: { - `{ - "name": "Painted House", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 1 - }`, - `{ - "name": "A Time for Mercy", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 2 - }`, - `{ - "name": "Sooley", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 3 - }`, - `{ - "name": "Theif Lord", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "score": 4 - }`, + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "John Grisham", + "age": 65, + "verified": true, + }, }, - //authors - 2: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false - }`, + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "Cornelia Funke", + "age": 62, + "verified": false, + }, }, - }, - Results: []map[string]any{ - { - "name": "John Grisham", + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "After Guantánamo, Another Injustice", + "author_id": testUtils.NewDocIndex(2, 0), + "rating": 3, + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "To my dear readers", + "author_id": testUtils.NewDocIndex(2, 1), + "rating": 2, + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Twinklestar's Favourite Xmas Cookie", + "author_id": testUtils.NewDocIndex(2, 1), + "rating": 1, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Painted House", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 1, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 2, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Sooley", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 3, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Theif Lord", + "author_id": testUtils.NewDocIndex(2, 1), + "score": 4, + }, + }, + testUtils.Request{ + Request: `query { + Author(filter: {name: {_eq: "John Grisham"}, books: {score: {_eq: 1}}, articles: {rating: {_eq: 3}}}) { + name + } + }`, + Results: []map[string]any{ + { + "name": "John Grisham", + }, + }, }, }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/one_to_many_multiple/with_sum_filter_test.go b/tests/integration/query/one_to_many_multiple/with_sum_filter_test.go index a14cb59926..f5f278a1f2 100644 --- a/tests/integration/query/one_to_many_multiple/with_sum_filter_test.go +++ b/tests/integration/query/one_to_many_multiple/with_sum_filter_test.go @@ -17,165 +17,247 @@ import ( ) func TestQueryOneToManyMultipleWithSumOnMultipleJoinsWithAndWithoutFilter(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "One-to-many relation query from many side with sums with and without filters", - Request: `query { - Author { - name - _sum(books: {field: score, filter: {score: {_gt: 3}}}, articles: {field: rating}) - } - }`, - Docs: map[int][]string{ - //articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "rating": 3 - }`, - `{ - "name": "To my dear readers", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "rating": 2 - }`, - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "rating": 1 - }`, - }, - //books - 1: { - `{ - "name": "Painted House", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 1 - }`, - `{ - "name": "A Time for Mercy", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 2 - }`, - `{ - "name": "Sooley", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 3 - }`, - `{ - "name": "Theif Lord", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "score": 4 - }`, - }, - //authors - 2: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false - }`, + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Article { + name: String + author: Author + rating: Int + } + + type Book { + name: String + author: Author + score: Int + } + + type Author { + name: String + age: Int + verified: Boolean + books: [Book] + articles: [Article] + } + `, }, - }, - Results: []map[string]any{ - { - "name": "John Grisham", - "_sum": int64(3), + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "John Grisham", + "age": 65, + "verified": true, + }, + }, + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "Cornelia Funke", + "age": 62, + "verified": false, + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "After Guantánamo, Another Injustice", + "author_id": testUtils.NewDocIndex(2, 0), + "rating": 3, + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "To my dear readers", + "author_id": testUtils.NewDocIndex(2, 1), + "rating": 2, + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Twinklestar's Favourite Xmas Cookie", + "author_id": testUtils.NewDocIndex(2, 1), + "rating": 1, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Painted House", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 1, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 2, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Sooley", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 3, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Theif Lord", + "author_id": testUtils.NewDocIndex(2, 1), + "score": 4, + }, }, - { - "name": "Cornelia Funke", - "_sum": int64(7), + testUtils.Request{ + Request: `query { + Author { + name + _sum(books: {field: score, filter: {score: {_gt: 3}}}, articles: {field: rating}) + } + }`, + Results: []map[string]any{ + { + "name": "Cornelia Funke", + "_sum": int64(7), + }, + { + "name": "John Grisham", + "_sum": int64(3), + }, + }, }, }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryOneToManyMultipleWithSumOnMultipleJoinsWithFilters(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "One-to-many relation query from many side with sums with filters", - Request: `query { - Author { - name - _sum(books: {field: score, filter: {score: {_gt: 3}}}, articles: {field: rating, filter: {rating: {_lt: 3}}}) - } - }`, - Docs: map[int][]string{ - //articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "rating": 3 - }`, - `{ - "name": "To my dear readers", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "rating": 2 - }`, - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "rating": 1 - }`, - }, - //books - 1: { - `{ - "name": "Painted House", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 1 - }`, - `{ - "name": "A Time for Mercy", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 2 - }`, - `{ - "name": "Sooley", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 3 - }`, - `{ - "name": "Theif Lord", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "score": 4 - }`, - }, - //authors - 2: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false - }`, + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Article { + name: String + author: Author + rating: Int + } + + type Book { + name: String + author: Author + score: Int + } + + type Author { + name: String + age: Int + verified: Boolean + books: [Book] + articles: [Article] + } + `, }, - }, - Results: []map[string]any{ - { - "name": "John Grisham", - "_sum": int64(0), + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "John Grisham", + "age": 65, + "verified": true, + }, + }, + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "Cornelia Funke", + "age": 62, + "verified": false, + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "After Guantánamo, Another Injustice", + "author_id": testUtils.NewDocIndex(2, 0), + "rating": 3, + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "To my dear readers", + "author_id": testUtils.NewDocIndex(2, 1), + "rating": 2, + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Twinklestar's Favourite Xmas Cookie", + "author_id": testUtils.NewDocIndex(2, 1), + "rating": 1, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Painted House", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 1, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 2, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Sooley", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 3, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Theif Lord", + "author_id": testUtils.NewDocIndex(2, 1), + "score": 4, + }, }, - { - "name": "Cornelia Funke", - "_sum": int64(7), + testUtils.Request{ + Request: `query { + Author { + name + _sum(books: {field: score, filter: {score: {_gt: 3}}}, articles: {field: rating, filter: {rating: {_lt: 3}}}) + } + }`, + Results: []map[string]any{ + { + "name": "Cornelia Funke", + "_sum": int64(7), + }, + { + "name": "John Grisham", + "_sum": int64(0), + }, + }, }, }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/one_to_many_multiple/with_sum_test.go b/tests/integration/query/one_to_many_multiple/with_sum_test.go index 81f96e93c4..a1f9176722 100644 --- a/tests/integration/query/one_to_many_multiple/with_sum_test.go +++ b/tests/integration/query/one_to_many_multiple/with_sum_test.go @@ -17,83 +17,124 @@ import ( ) func TestQueryOneToManyMultipleWithSumOnMultipleJoins(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "One-to-many relation query from many side with sum", - Request: `query { - Author { - name - _sum(books: {field: score}, articles: {field: rating}) - } - }`, - Docs: map[int][]string{ - //articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "rating": 3 - }`, - `{ - "name": "To my dear readers", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "rating": 2 - }`, - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "rating": 1 - }`, + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Article { + name: String + author: Author + rating: Int + } + + type Book { + name: String + author: Author + score: Int + } + + type Author { + name: String + age: Int + verified: Boolean + books: [Book] + articles: [Article] + } + `, }, - //books - 1: { - `{ - "name": "Painted House", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 1 - }`, - `{ - "name": "A Time for Mercy", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 2 - }`, - `{ - "name": "Sooley", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "score": 3 - }`, - `{ - "name": "Theif Lord", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "score": 4 - }`, + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "John Grisham", + "age": 65, + "verified": true, + }, }, - //authors - 2: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false - }`, + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "Cornelia Funke", + "age": 62, + "verified": false, + }, }, - }, - Results: []map[string]any{ - { - "name": "John Grisham", - "_sum": int64(9), + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "After Guantánamo, Another Injustice", + "author_id": testUtils.NewDocIndex(2, 0), + "rating": 3, + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "To my dear readers", + "author_id": testUtils.NewDocIndex(2, 1), + "rating": 2, + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Twinklestar's Favourite Xmas Cookie", + "author_id": testUtils.NewDocIndex(2, 1), + "rating": 1, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Painted House", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 1, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 2, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Sooley", + "author_id": testUtils.NewDocIndex(2, 0), + "score": 3, + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Theif Lord", + "author_id": testUtils.NewDocIndex(2, 1), + "score": 4, + }, }, - { - "name": "Cornelia Funke", - "_sum": int64(7), + testUtils.Request{ + Request: `query { + Author { + name + _sum(books: {field: score}, articles: {field: rating}) + } + }`, + Results: []map[string]any{ + { + "name": "Cornelia Funke", + "_sum": int64(7), + }, + { + "name": "John Grisham", + "_sum": int64(9), + }, + }, }, }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/one_to_many_to_many/joins_test.go b/tests/integration/query/one_to_many_to_many/joins_test.go index 2e040b05a7..d4d4da2ca5 100644 --- a/tests/integration/query/one_to_many_to_many/joins_test.go +++ b/tests/integration/query/one_to_many_to_many/joins_test.go @@ -17,205 +17,248 @@ import ( ) func TestOneToManyToManyJoinsAreLinkedProperly(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "1-N-M Query to ensure joins are linked properly.", - Request: `query { - Author { - _docID - name - book { - _docID - name - publisher { - _docID - name + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Author { + name: String + age: Int + verified: Boolean + book: [Book] } - } - } - }`, - Docs: map[int][]string{ - // Authors - 0: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3, Has written 5 books - `{ + type Book { + name: String + rating: Float + author: Author + publisher: [Publisher] + } + + type Publisher { + name: String + address: String + yearOpened: Int + book: Book + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04, Has written 1 Book - `{ + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ "name": "Cornelia Funke", "age": 62, "verified": false }`, - // Has written no Book - `{ + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ "name": "Not a Writer", "age": 6, "verified": false }`, }, - - // Books - 1: { - // "bae-080d7580-a791-541e-90bd-49bf69f858e1", Has 1 Publisher - `{ - "name": "The Rooster Bar", - "rating": 4, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - // "bae-b8091c4f-7594-5d7a-98e8-272aadcedfdf", Has 1 Publisher - `{ - "name": "Theif Lord", - "rating": 4.8, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - // "bae-4fb9e3e9-d1d3-5404-bf15-10e4c995d9ca", Has no Publisher. - `{ - "name": "The Associate", - "rating": 4.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - // "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d", Has 1 Publisher - `{ - "name": "Painted House", - "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - // "bae-c674e3b0-ebb6-5b89-bfa3-d1128288d21a", Has 1 Publisher - `{ - "name": "A Time for Mercy", - "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - // "bae-7ba73251-c935-5f44-ac04-d2061149cc14", Has 2 Publishers - `{ - "name": "Sooley", - "rating": 3.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "The Rooster Bar", + "rating": 4, + "author_id": testUtils.NewDocIndex(0, 1), + }, }, - - // Publishers - 2: { - `{ - "name": "Only Publisher of The Rooster Bar", - "address": "1 Rooster Ave., Waterloo, Ontario", + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Theif Lord", + "rating": 4.8, + "author_id": testUtils.NewDocIndex(0, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "The Associate", + "rating": 4.2, + "author_id": testUtils.NewDocIndex(0, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Painted House", + "rating": 4.9, + "author_id": testUtils.NewDocIndex(0, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": testUtils.NewDocIndex(0, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Sooley", + "rating": 3.2, + "author_id": testUtils.NewDocIndex(0, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "Only Publisher of The Rooster Bar", + "address": "1 Rooster Ave., Waterloo, Ontario", "yearOpened": 2022, - "book_id": "bae-080d7580-a791-541e-90bd-49bf69f858e1" - }`, - `{ - "name": "Only Publisher of Theif Lord", - "address": "1 Theif Lord, Waterloo, Ontario", + "book_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "Only Publisher of Theif Lord", + "address": "1 Theif Lord, Waterloo, Ontario", "yearOpened": 2020, - "book_id": "bae-b8091c4f-7594-5d7a-98e8-272aadcedfdf" - }`, - `{ - "name": "Only Publisher of Painted House", - "address": "600 Madison Ave., New York, New York", + "book_id": testUtils.NewDocIndex(1, 1), + }, + }, + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "Only Publisher of Painted House", + "address": "600 Madison Ave., New York, New York", "yearOpened": 1995, - "book_id": "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" - }`, - `{ - "name": "Only Publisher of A Time for Mercy", - "address": "123 Andrew Street, Flin Flon, Manitoba", + "book_id": testUtils.NewDocIndex(1, 3), + }, + }, + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "Only Publisher of A Time for Mercy", + "address": "123 Andrew Street, Flin Flon, Manitoba", "yearOpened": 2013, - "book_id": "bae-c674e3b0-ebb6-5b89-bfa3-d1128288d21a" - }`, - `{ - "name": "First of Two Publishers of Sooley", - "address": "11 Sooley Ave., Waterloo, Ontario", + "book_id": testUtils.NewDocIndex(1, 4), + }, + }, + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "First of Two Publishers of Sooley", + "address": "11 Sooley Ave., Waterloo, Ontario", "yearOpened": 1999, - "book_id": "bae-7ba73251-c935-5f44-ac04-d2061149cc14" - }`, - `{ - "name": "Second of Two Publishers of Sooley", - "address": "22 Sooley Ave., Waterloo, Ontario", + "book_id": testUtils.NewDocIndex(1, 5), + }, + }, + testUtils.CreateDoc{ + CollectionID: 2, + DocMap: map[string]any{ + "name": "Second of Two Publishers of Sooley", + "address": "22 Sooley Ave., Waterloo, Ontario", "yearOpened": 2000, - "book_id": "bae-7ba73251-c935-5f44-ac04-d2061149cc14" - }`, + "book_id": testUtils.NewDocIndex(1, 5), + }, }, - }, - - Results: []map[string]any{ - { - "name": "John Grisham", - "_docID": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "book": []map[string]any{ - + testUtils.Request{ + Request: `query { + Author { + _docID + name + book { + _docID + name + publisher { + _docID + name + } + } + } + }`, + Results: []map[string]any{ { - "_docID": "bae-4fb9e3e9-d1d3-5404-bf15-10e4c995d9ca", - "name": "The Associate", - "publisher": []map[string]any{}, + "_docID": "bae-4819f8a1-b519-5b46-ae39-4fdda8558e4f", + "book": []map[string]any{}, + "name": "Not a Writer", }, - { - "_docID": "bae-7ba73251-c935-5f44-ac04-d2061149cc14", - "name": "Sooley", - "publisher": []map[string]any{ + "name": "Cornelia Funke", + "_docID": "bae-72e8c691-9f20-55e7-9228-8af1cf54cace", + "book": []map[string]any{ { - "_docID": "bae-cecb7841-fb4c-5403-a6d7-3654694dd073", - "name": "First of Two Publishers of Sooley", - }, - { - "_docID": "bae-d7e35ac3-dcf3-5537-91dd-3d27e378ba5d", - "name": "Second of Two Publishers of Sooley", + "_docID": "bae-4dbc2bbc-0652-5412-8063-486499f1c341", + "name": "The Rooster Bar", + "publisher": []map[string]any{ + { + "_docID": "bae-8a8cbab7-65db-5955-b618-b82f44761cee", + "name": "Only Publisher of The Rooster Bar", + }, + }, }, }, }, - { - "_docID": "bae-b8091c4f-7594-5d7a-98e8-272aadcedfdf", - "name": "Theif Lord", - "publisher": []map[string]any{ + "name": "John Grisham", + "_docID": "bae-e1ea288f-09fa-55fa-b0b5-0ac8941ea35b", + "book": []map[string]any{ { - "_docID": "bae-1a3ca715-3f3c-5934-9133-d7b489d57f88", - "name": "Only Publisher of Theif Lord", + "_docID": "bae-13164fd9-60fd-5c32-9cb5-8bff3ef8ea53", + "name": "Theif Lord", + "publisher": []map[string]any{ + { + "_docID": "bae-0107f5cc-c25a-5295-8439-2b08a286af83", + "name": "Only Publisher of Theif Lord", + }, + }, }, - }, - }, - - { - "_docID": "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d", - "name": "Painted House", - "publisher": []map[string]any{ { - "_docID": "bae-6412f5ff-a69a-5472-8647-18bf2b247697", - "name": "Only Publisher of Painted House", + "_docID": "bae-1ccf3043-d760-543e-be1b-6691fa6aa7a8", + "name": "The Associate", + "publisher": []map[string]any{}, }, - }, - }, - { - "_docID": "bae-c674e3b0-ebb6-5b89-bfa3-d1128288d21a", - "name": "A Time for Mercy", - "publisher": []map[string]any{ { - "_docID": "bae-2f83fa75-241f-517d-9b47-3715feee43c1", - "name": "Only Publisher of A Time for Mercy", + "_docID": "bae-5366ba09-54e8-5381-8169-a770aa9282ae", + "name": "Painted House", + "publisher": []map[string]any{ + { + "_docID": "bae-35f1e55a-c51b-53d7-9b28-9beb904a1343", + "name": "Only Publisher of Painted House", + }, + }, + }, + { + "_docID": "bae-96c9de0f-2903-5589-9604-b42882afde8c", + "name": "A Time for Mercy", + "publisher": []map[string]any{ + { + "_docID": "bae-37451579-7e50-541d-8a3c-849b290ea416", + "name": "Only Publisher of A Time for Mercy", + }, + }, }, - }, - }, - }, - }, - - { - "_docID": "bae-7ba214a4-5ac8-5878-b221-dae6c285ef41", - "book": []map[string]any{}, - "name": "Not a Writer", - }, - - { - "name": "Cornelia Funke", - "_docID": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "book": []map[string]any{ - { - "_docID": "bae-080d7580-a791-541e-90bd-49bf69f858e1", - "name": "The Rooster Bar", - "publisher": []map[string]any{ { - "_docID": "bae-a5836991-96a3-5147-83be-3374a8b62e6c", - "name": "Only Publisher of The Rooster Bar", + "_docID": "bae-f52abfc3-9026-5713-9622-2d3458a386e0", + "name": "Sooley", + "publisher": []map[string]any{ + { + "_docID": "bae-c46b7771-843e-51ac-92be-d145aa2cfc07", + "name": "Second of Two Publishers of Sooley", + }, + { + "_docID": "bae-fc233f9c-f117-59de-be2b-60e4f6f0a898", + "name": "First of Two Publishers of Sooley", + }, + }, }, }, }, @@ -224,5 +267,5 @@ func TestOneToManyToManyJoinsAreLinkedProperly(t *testing.T) { }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/one_to_many_to_many/utils.go b/tests/integration/query/one_to_many_to_many/utils.go deleted file mode 100644 index 0dbc191c96..0000000000 --- a/tests/integration/query/one_to_many_to_many/utils.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package one_to_many_to_many - -import ( - "testing" - - testUtils "github.com/sourcenetwork/defradb/tests/integration" -) - -var gqlSchemaOneToManyToMany = (` - - type Author { - name: String - age: Int - verified: Boolean - book: [Book] - } - - type Book { - name: String - rating: Float - author: Author - publisher: [Publisher] - } - - type Publisher { - name: String - address: String - yearOpened: Int - book: Book - } - -`) - -func executeTestCase( - t *testing.T, - test testUtils.RequestTestCase, -) { - testUtils.ExecuteRequestTestCase( - t, - gqlSchemaOneToManyToMany, - []string{ - "Author", - "Book", - "Publisher", - }, - test, - ) -} diff --git a/tests/integration/query/one_to_many_to_one/fixture.go b/tests/integration/query/one_to_many_to_one/fixture.go index a078c630b2..ac66aa098d 100644 --- a/tests/integration/query/one_to_many_to_one/fixture.go +++ b/tests/integration/query/one_to_many_to_one/fixture.go @@ -47,7 +47,7 @@ func createDocsWith6BooksAnd5Publishers() []testUtils.CreateDoc { // Authors { CollectionID: 0, - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3, Has written 5 books + // bae-7aabc9d2-fbbc-5911-b0d0-b49a2a1d0e84, Has written 5 books Doc: `{ "name": "John Grisham", "age": 65, @@ -76,102 +76,102 @@ func createDocsWith6BooksAnd5Publishers() []testUtils.CreateDoc { { CollectionID: 1, // "bae-080d7580-a791-541e-90bd-49bf69f858e1", Has 1 Publisher - Doc: `{ - "name": "The Rooster Bar", - "rating": 4, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, + DocMap: map[string]any{ + "name": "The Rooster Bar", + "rating": 4, + "author_id": testUtils.NewDocIndex(0, 1), + }, }, { CollectionID: 1, - // "bae-b8091c4f-7594-5d7a-98e8-272aadcedfdf", Has 1 Publisher - Doc: `{ - "name": "Theif Lord", - "rating": 4.8, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, + // "bae-7697f14d-7b32-5884-8677-344e183c14bf", Has 1 Publisher + DocMap: map[string]any{ + "name": "Theif Lord", + "rating": 4.8, + "author_id": testUtils.NewDocIndex(0, 0), + }, }, { CollectionID: 1, - // "bae-4fb9e3e9-d1d3-5404-bf15-10e4c995d9ca", Has no Publisher. - Doc: `{ - "name": "The Associate", - "rating": 4.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, + // "bae-374998e0-e84d-5f6b-9e87-5edaaa2d9c7d", Has no Publisher. + DocMap: map[string]any{ + "name": "The Associate", + "rating": 4.2, + "author_id": testUtils.NewDocIndex(0, 0), + }, }, { CollectionID: 1, - // "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d", Has 1 Publisher - Doc: `{ - "name": "Painted House", - "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, + // "bae-aef1d940-5ac1-5924-a87f-63ac40758b22", Has 1 Publisher + DocMap: map[string]any{ + "name": "Painted House", + "rating": 4.9, + "author_id": testUtils.NewDocIndex(0, 0), + }, }, { CollectionID: 1, - // "bae-c674e3b0-ebb6-5b89-bfa3-d1128288d21a", Has 1 Publisher - Doc: `{ - "name": "A Time for Mercy", - "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, + // "bae-1d0dcbed-300a-567a-9b48-c23cd026d165", Has 1 Publisher + DocMap: map[string]any{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": testUtils.NewDocIndex(0, 0), + }, }, { CollectionID: 1, - // "bae-7ba73251-c935-5f44-ac04-d2061149cc14", Has 1 Publisher - Doc: `{ - "name": "Sooley", - "rating": 3.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, + // "bae-ee6b8339-8a9e-58a9-9a0d-dbd8d44fa149", Has 1 Publisher + DocMap: map[string]any{ + "name": "Sooley", + "rating": 3.2, + "author_id": testUtils.NewDocIndex(0, 0), + }, }, // Publishers { CollectionID: 2, - Doc: `{ - "name": "Only Publisher of The Rooster Bar", - "address": "1 Rooster Ave., Waterloo, Ontario", - "yearOpened": 2022, - "book_id": "bae-080d7580-a791-541e-90bd-49bf69f858e1" - }`, + DocMap: map[string]any{ + "name": "Only Publisher of The Rooster Bar", + "address": "1 Rooster Ave., Waterloo, Ontario", + "yearOpened": 2022, + "book_id": testUtils.NewDocIndex(1, 0), + }, }, { CollectionID: 2, - Doc: `{ - "name": "Only Publisher of Theif Lord", - "address": "1 Theif Lord, Waterloo, Ontario", - "yearOpened": 2020, - "book_id": "bae-b8091c4f-7594-5d7a-98e8-272aadcedfdf" - }`, + DocMap: map[string]any{ + "name": "Only Publisher of Theif Lord", + "address": "1 Theif Lord, Waterloo, Ontario", + "yearOpened": 2020, + "book_id": testUtils.NewDocIndex(1, 1), + }, }, { CollectionID: 2, - Doc: `{ - "name": "Only Publisher of Painted House", - "address": "600 Madison Ave., New York, New York", - "yearOpened": 1995, - "book_id": "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" - }`, + DocMap: map[string]any{ + "name": "Only Publisher of Painted House", + "address": "600 Madison Ave., New York, New York", + "yearOpened": 1995, + "book_id": testUtils.NewDocIndex(1, 3), + }, }, { CollectionID: 2, - Doc: `{ - "name": "Only Publisher of A Time for Mercy", - "address": "123 Andrew Street, Flin Flon, Manitoba", - "yearOpened": 2013, - "book_id": "bae-c674e3b0-ebb6-5b89-bfa3-d1128288d21a" - }`, + DocMap: map[string]any{ + "name": "Only Publisher of A Time for Mercy", + "address": "123 Andrew Street, Flin Flon, Manitoba", + "yearOpened": 2013, + "book_id": testUtils.NewDocIndex(1, 4), + }, }, { CollectionID: 2, - Doc: `{ - "name": "Only Publisher of Sooley", - "address": "11 Sooley Ave., Waterloo, Ontario", - "yearOpened": 1999, - "book_id": "bae-7ba73251-c935-5f44-ac04-d2061149cc14" - }`, + DocMap: map[string]any{ + "name": "Only Publisher of Sooley", + "address": "11 Sooley Ave., Waterloo, Ontario", + "yearOpened": 1999, + "book_id": testUtils.NewDocIndex(1, 5), + }, }, } } diff --git a/tests/integration/query/one_to_many_to_one/joins_test.go b/tests/integration/query/one_to_many_to_one/joins_test.go index dbb6dad8da..9cd6432112 100644 --- a/tests/integration/query/one_to_many_to_one/joins_test.go +++ b/tests/integration/query/one_to_many_to_one/joins_test.go @@ -24,7 +24,7 @@ func TestOneToManyToOneJoinsAreLinkedProperly(t *testing.T) { // Authors testUtils.CreateDoc{ CollectionID: 0, - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3, Has written 5 books + // Has written 5 books Doc: `{ "name": "John Grisham", "age": 65, @@ -33,7 +33,7 @@ func TestOneToManyToOneJoinsAreLinkedProperly(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04, Has written 1 Book + // Has written 1 Book Doc: `{ "name": "Cornelia Funke", "age": 62, @@ -52,103 +52,103 @@ func TestOneToManyToOneJoinsAreLinkedProperly(t *testing.T) { // Books testUtils.CreateDoc{ CollectionID: 1, - // "bae-080d7580-a791-541e-90bd-49bf69f858e1", Has 1 Publisher - Doc: `{ - "name": "The Rooster Bar", - "rating": 4, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, + // Has 1 Publisher + DocMap: map[string]any{ + "name": "The Rooster Bar", + "rating": 4, + "author_id": testUtils.NewDocIndex(0, 1), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // "bae-b8091c4f-7594-5d7a-98e8-272aadcedfdf", Has 1 Publisher - Doc: `{ - "name": "Theif Lord", - "rating": 4.8, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, + // Has 1 Publisher + DocMap: map[string]any{ + "name": "Theif Lord", + "rating": 4.8, + "author_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // "bae-4fb9e3e9-d1d3-5404-bf15-10e4c995d9ca", Has no Publisher. - Doc: `{ - "name": "The Associate", - "rating": 4.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, + // Has no Publisher. + DocMap: map[string]any{ + "name": "The Associate", + "rating": 4.2, + "author_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d", Has 1 Publisher - Doc: `{ - "name": "Painted House", - "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, + // Has 1 Publisher + DocMap: map[string]any{ + "name": "Painted House", + "rating": 4.9, + "author_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // "bae-c674e3b0-ebb6-5b89-bfa3-d1128288d21a", Has 1 Publisher - Doc: `{ - "name": "A Time for Mercy", - "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, + // Has 1 Publisher + DocMap: map[string]any{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // "bae-7ba73251-c935-5f44-ac04-d2061149cc14", Has 1 Publisher - Doc: `{ - "name": "Sooley", - "rating": 3.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, + // Has 1 Publisher + DocMap: map[string]any{ + "name": "Sooley", + "rating": 3.2, + "author_id": testUtils.NewDocIndex(0, 0), + }, }, // Publishers testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Only Publisher of The Rooster Bar", - "address": "1 Rooster Ave., Waterloo, Ontario", + DocMap: map[string]any{ + "name": "Only Publisher of The Rooster Bar", + "address": "1 Rooster Ave., Waterloo, Ontario", "yearOpened": 2022, - "book_id": "bae-080d7580-a791-541e-90bd-49bf69f858e1" - }`, + "book_id": testUtils.NewDocIndex(1, 0), + }, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Only Publisher of Theif Lord", - "address": "1 Theif Lord, Waterloo, Ontario", + DocMap: map[string]any{ + "name": "Only Publisher of Theif Lord", + "address": "1 Theif Lord, Waterloo, Ontario", "yearOpened": 2020, - "book_id": "bae-b8091c4f-7594-5d7a-98e8-272aadcedfdf" - }`, + "book_id": testUtils.NewDocIndex(1, 1), + }, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Only Publisher of Painted House", - "address": "600 Madison Ave., New York, New York", + DocMap: map[string]any{ + "name": "Only Publisher of Painted House", + "address": "600 Madison Ave., New York, New York", "yearOpened": 1995, - "book_id": "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" - }`, + "book_id": testUtils.NewDocIndex(1, 3), + }, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Only Publisher of A Time for Mercy", - "address": "123 Andrew Street, Flin Flon, Manitoba", + DocMap: map[string]any{ + "name": "Only Publisher of A Time for Mercy", + "address": "123 Andrew Street, Flin Flon, Manitoba", "yearOpened": 2013, - "book_id": "bae-c674e3b0-ebb6-5b89-bfa3-d1128288d21a" - }`, + "book_id": testUtils.NewDocIndex(1, 4), + }, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Only Publisher of Sooley", - "address": "11 Sooley Ave., Waterloo, Ontario", + DocMap: map[string]any{ + "name": "Only Publisher of Sooley", + "address": "11 Sooley Ave., Waterloo, Ontario", "yearOpened": 1999, - "book_id": "bae-7ba73251-c935-5f44-ac04-d2061149cc14" - }`, + "book_id": testUtils.NewDocIndex(1, 5), + }, }, testUtils.Request{ Request: `query { @@ -166,63 +166,63 @@ func TestOneToManyToOneJoinsAreLinkedProperly(t *testing.T) { } }`, Results: []map[string]any{ + { + "_docID": "bae-489b4e01-4764-56f6-913f-b3c92dffcaa3", + "book": []map[string]any{}, + "name": "Not a Writer", + }, { "name": "John Grisham", - "_docID": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", + "_docID": "bae-7aabc9d2-fbbc-5911-b0d0-b49a2a1d0e84", "book": []map[string]any{ { - "_docID": "bae-4fb9e3e9-d1d3-5404-bf15-10e4c995d9ca", - "name": "The Associate", - "publisher": nil, - }, - { - "_docID": "bae-7ba73251-c935-5f44-ac04-d2061149cc14", - "name": "Sooley", + "_docID": "bae-1d0dcbed-300a-567a-9b48-c23cd026d165", + "name": "A Time for Mercy", "publisher": map[string]any{ - "_docID": "bae-cd2a319a-e013-559e-aad9-282b48fd3f72", - "name": "Only Publisher of Sooley", + "_docID": "bae-2bad7de3-0f1a-56c0-b499-a552debef4b8", + "name": "Only Publisher of A Time for Mercy", }, }, { - "_docID": "bae-b8091c4f-7594-5d7a-98e8-272aadcedfdf", + "_docID": "bae-374998e0-e84d-5f6b-9e87-5edaaa2d9c7d", + "name": "The Associate", + "publisher": nil, + }, + { + "_docID": "bae-7697f14d-7b32-5884-8677-344e183c14bf", "name": "Theif Lord", "publisher": map[string]any{ - "_docID": "bae-1a3ca715-3f3c-5934-9133-d7b489d57f88", + "_docID": "bae-d43823c0-0bb6-58a9-a098-1826dffa4e4a", "name": "Only Publisher of Theif Lord", }, }, { - "_docID": "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d", + "_docID": "bae-aef1d940-5ac1-5924-a87f-63ac40758b22", "name": "Painted House", "publisher": map[string]any{ - "_docID": "bae-6412f5ff-a69a-5472-8647-18bf2b247697", + "_docID": "bae-a104397b-7804-5cd0-93e5-c3986b4e5e71", "name": "Only Publisher of Painted House", }, }, { - "_docID": "bae-c674e3b0-ebb6-5b89-bfa3-d1128288d21a", - "name": "A Time for Mercy", + "_docID": "bae-ee6b8339-8a9e-58a9-9a0d-dbd8d44fa149", + "name": "Sooley", "publisher": map[string]any{ - "_docID": "bae-2f83fa75-241f-517d-9b47-3715feee43c1", - "name": "Only Publisher of A Time for Mercy", + "_docID": "bae-efeca601-cce1-5289-b392-85fa5b7bc0f7", + "name": "Only Publisher of Sooley", }, }, }, }, - { - "_docID": "bae-7ba214a4-5ac8-5878-b221-dae6c285ef41", - "book": []map[string]any{}, - "name": "Not a Writer", - }, { "name": "Cornelia Funke", - "_docID": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", + "_docID": "bae-fb2a1852-3951-5ce9-a3bf-6825202f201b", "book": []map[string]any{ { - "_docID": "bae-080d7580-a791-541e-90bd-49bf69f858e1", + "_docID": "bae-1867d7cb-01b3-572f-a993-1c3f22f46526", "name": "The Rooster Bar", "publisher": map[string]any{ - "_docID": "bae-a5836991-96a3-5147-83be-3374a8b62e6c", + "_docID": "bae-09af7e39-8596-584f-8825-cb430c4156b3", "name": "Only Publisher of The Rooster Bar", }, }, diff --git a/tests/integration/query/one_to_many_to_one/simple_test.go b/tests/integration/query/one_to_many_to_one/simple_test.go index 03bb0b781f..d1551300da 100644 --- a/tests/integration/query/one_to_many_to_one/simple_test.go +++ b/tests/integration/query/one_to_many_to_one/simple_test.go @@ -24,7 +24,7 @@ func TestQueryOneToOneRelations(t *testing.T) { // Authors testUtils.CreateDoc{ CollectionID: 0, - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3, Has written 5 books + // bae-7aabc9d2-fbbc-5911-b0d0-b49a2a1d0e84, Has written 5 books Doc: `{ "name": "John Grisham", "age": 65, @@ -53,50 +53,49 @@ func TestQueryOneToOneRelations(t *testing.T) { testUtils.CreateDoc{ CollectionID: 1, // "bae-080d7580-a791-541e-90bd-49bf69f858e1", Has 1 Publisher - Doc: `{ - "name": "The Rooster Bar", - "rating": 4, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, + DocMap: map[string]any{ + "name": "The Rooster Bar", + "rating": 4, + "author_id": testUtils.NewDocIndex(0, 1), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // "bae-b8091c4f-7594-5d7a-98e8-272aadcedfdf", Has 1 Publisher - Doc: `{ - "name": "Theif Lord", - "rating": 4.8, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, + // "bae-7697f14d-7b32-5884-8677-344e183c14bf", Has 1 Publisher + DocMap: map[string]any{ + "name": "Theif Lord", + "rating": 4.8, + "author_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // "bae-4fb9e3e9-d1d3-5404-bf15-10e4c995d9ca", Has no Publisher. - Doc: `{ - "name": "The Associate", - "rating": 4.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, + // "bae-374998e0-e84d-5f6b-9e87-5edaaa2d9c7d", Has no Publisher. + DocMap: map[string]any{ + "name": "The Associate", + "rating": 4.2, + "author_id": testUtils.NewDocIndex(0, 0), + }, }, // Publishers testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Only Publisher of The Rooster Bar", - "address": "1 Rooster Ave., Waterloo, Ontario", + DocMap: map[string]any{ + "name": "Only Publisher of The Rooster Bar", + "address": "1 Rooster Ave., Waterloo, Ontario", "yearOpened": 2022, - "book_id": "bae-080d7580-a791-541e-90bd-49bf69f858e1" - }`, + "book_id": testUtils.NewDocIndex(1, 0), + }, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Only Publisher of Theif Lord", - "address": "1 Theif Lord, Waterloo, Ontario", + DocMap: map[string]any{ + "name": "Only Publisher of Theif Lord", + "address": "1 Theif Lord, Waterloo, Ontario", "yearOpened": 2020, - "book_id": "bae-b8091c4f-7594-5d7a-98e8-272aadcedfdf" - }`, + "book_id": testUtils.NewDocIndex(1, 1), + }, }, - testUtils.Request{ Request: `query { Book { diff --git a/tests/integration/query/one_to_many_to_one/with_filter_test.go b/tests/integration/query/one_to_many_to_one/with_filter_test.go index 65c402dfa2..bd56f7bc39 100644 --- a/tests/integration/query/one_to_many_to_one/with_filter_test.go +++ b/tests/integration/query/one_to_many_to_one/with_filter_test.go @@ -24,7 +24,6 @@ func TestQueryComplexWithDeepFilterOnRenderedChildren(t *testing.T) { // Authors testUtils.CreateDoc{ CollectionID: 0, - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3, Has written 5 books Doc: `{ "name": "John Grisham", "age": 65, @@ -33,7 +32,7 @@ func TestQueryComplexWithDeepFilterOnRenderedChildren(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04, Has written 1 Book + // Has written 1 Book Doc: `{ "name": "Cornelia Funke", "age": 62, @@ -52,49 +51,49 @@ func TestQueryComplexWithDeepFilterOnRenderedChildren(t *testing.T) { // Books testUtils.CreateDoc{ CollectionID: 1, - // "bae-080d7580-a791-541e-90bd-49bf69f858e1", Has 1 Publisher - Doc: `{ - "name": "The Rooster Bar", - "rating": 4, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, + // Has 1 Publisher + DocMap: map[string]any{ + "name": "The Rooster Bar", + "rating": 4, + "author_id": testUtils.NewDocIndex(0, 1), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // "bae-b8091c4f-7594-5d7a-98e8-272aadcedfdf", Has 1 Publisher - Doc: `{ - "name": "Theif Lord", - "rating": 4.8, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, + // Has 1 Publisher + DocMap: map[string]any{ + "name": "Theif Lord", + "rating": 4.8, + "author_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // "bae-4fb9e3e9-d1d3-5404-bf15-10e4c995d9ca", Has no Publisher. - Doc: `{ - "name": "The Associate", - "rating": 4.2, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, + // Has no Publisher. + DocMap: map[string]any{ + "name": "The Associate", + "rating": 4.2, + "author_id": testUtils.NewDocIndex(0, 0), + }, }, // Publishers testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Only Publisher of The Rooster Bar", - "address": "1 Rooster Ave., Waterloo, Ontario", + DocMap: map[string]any{ + "name": "Only Publisher of The Rooster Bar", + "address": "1 Rooster Ave., Waterloo, Ontario", "yearOpened": 2022, - "book_id": "bae-080d7580-a791-541e-90bd-49bf69f858e1" - }`, + "book_id": testUtils.NewDocIndex(1, 0), + }, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Only Publisher of Theif Lord", - "address": "1 Theif Lord, Waterloo, Ontario", + DocMap: map[string]any{ + "name": "Only Publisher of Theif Lord", + "address": "1 Theif Lord, Waterloo, Ontario", "yearOpened": 2020, - "book_id": "bae-b8091c4f-7594-5d7a-98e8-272aadcedfdf" - }`, + "book_id": testUtils.NewDocIndex(1, 1), + }, }, testUtils.Request{ Request: `query { @@ -141,6 +140,11 @@ func TestOneToManyToOneWithSumOfDeepFilterSubTypeOfBothDescAndAsc(t *testing.T) } }`, Results: []map[string]any{ + { + "name": "Not a Writer", + "s1": 0.0, + "s2": 0.0, + }, { "name": "John Grisham", // 'Theif Lord' (4.8 rating) 2020, then 'A Time for Mercy' 2013 (4.5 rating). @@ -148,11 +152,6 @@ func TestOneToManyToOneWithSumOfDeepFilterSubTypeOfBothDescAndAsc(t *testing.T) // 'The Associate' as it has no Publisher (4.2 rating), then 'Painted House' 1995 (4.9 rating). "s2": 4.8, }, - { - "name": "Not a Writer", - "s1": 0.0, - "s2": 0.0, - }, { "name": "Cornelia Funke", "s1": 0.0, @@ -183,6 +182,11 @@ func TestOneToManyToOneWithSumOfDeepFilterSubTypeAndDeepOrderBySubtypeOppositeDi } }`, Results: []map[string]any{ + { + "name": "Not a Writer", + "s1": 0.0, + "books2020": []map[string]any{}, + }, { "name": "John Grisham", "s1": 4.5, @@ -192,11 +196,6 @@ func TestOneToManyToOneWithSumOfDeepFilterSubTypeAndDeepOrderBySubtypeOppositeDi }, }, }, - { - "name": "Not a Writer", - "s1": 0.0, - "books2020": []map[string]any{}, - }, { "name": "Cornelia Funke", "s1": 0.0, @@ -236,15 +235,15 @@ func TestOneToManyToOneWithTwoLevelDeepFilter(t *testing.T) { { "book": []map[string]any{ { - "name": "The Associate", - "publisher": nil, - }, - { - "name": "Sooley", + "name": "A Time for Mercy", "publisher": map[string]any{ - "yearOpened": int64(1999), + "yearOpened": int64(2013), }, }, + { + "name": "The Associate", + "publisher": nil, + }, { "name": "Theif Lord", "publisher": map[string]any{ @@ -258,9 +257,9 @@ func TestOneToManyToOneWithTwoLevelDeepFilter(t *testing.T) { }, }, { - "name": "A Time for Mercy", + "name": "Sooley", "publisher": map[string]any{ - "yearOpened": int64(2013), + "yearOpened": int64(1999), }, }, }, @@ -293,7 +292,6 @@ func TestOneToManyToOneWithCompoundOperatorInFilterAndRelation(t *testing.T) { createDocsWith6BooksAnd5Publishers(), testUtils.CreateDoc{ CollectionID: 0, - // bae-61d279c1-eab9-56ec-8654-dce0324ebfda Doc: `{ "name": "John Tolkien", "age": 70, @@ -302,21 +300,20 @@ func TestOneToManyToOneWithCompoundOperatorInFilterAndRelation(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 1, - // bae-2c116b72-21f1-5c87-9148-f69f0c0c087e - Doc: `{ - "name": "The Lord of the Rings", - "rating": 5.0, - "author_id": "bae-61d279c1-eab9-56ec-8654-dce0324ebfda" - }`, + DocMap: map[string]any{ + "name": "The Lord of the Rings", + "rating": 5.0, + "author_id": testUtils.NewDocIndex(0, 3), + }, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Allen & Unwin", - "address": "1 Allen Ave., Sydney, Australia", + DocMap: map[string]any{ + "name": "Allen & Unwin", + "address": "1 Allen Ave., Sydney, Australia", "yearOpened": 1954, - "book_id": "bae-2c116b72-21f1-5c87-9148-f69f0c0c087e" - }`, + "book_id": testUtils.NewDocIndex(1, 6), + }, }, testUtils.Request{ Request: `query { diff --git a/tests/integration/query/one_to_many_to_one/with_order_limit_test.go b/tests/integration/query/one_to_many_to_one/with_order_limit_test.go index fecfdc980d..1ec7c0296e 100644 --- a/tests/integration/query/one_to_many_to_one/with_order_limit_test.go +++ b/tests/integration/query/one_to_many_to_one/with_order_limit_test.go @@ -35,6 +35,11 @@ func TestOneToManyToOneDeepOrderBySubTypeOfBothDescAndAsc(t *testing.T) { } }`, Results: []map[string]any{ + { + "name": "Not a Writer", + "NewestPublishersBook": []map[string]any{}, + "OldestPublishersBook": []map[string]any{}, + }, { "name": "John Grisham", "NewestPublishersBook": []map[string]any{ @@ -48,11 +53,6 @@ func TestOneToManyToOneDeepOrderBySubTypeOfBothDescAndAsc(t *testing.T) { }, }, }, - { - "name": "Not a Writer", - "NewestPublishersBook": []map[string]any{}, - "OldestPublishersBook": []map[string]any{}, - }, { "name": "Cornelia Funke", "NewestPublishersBook": []map[string]any{ diff --git a/tests/integration/query/one_to_many_to_one/with_sum_order_limit_test.go b/tests/integration/query/one_to_many_to_one/with_sum_order_limit_test.go index bcb344c1e9..28379253b4 100644 --- a/tests/integration/query/one_to_many_to_one/with_sum_order_limit_test.go +++ b/tests/integration/query/one_to_many_to_one/with_sum_order_limit_test.go @@ -33,6 +33,11 @@ func TestOneToManyToOneWithSumOfDeepOrderBySubTypeAndDeepOrderBySubtypeDescDirec } }`, Results: []map[string]any{ + { + "name": "Not a Writer", + "s1": 0.0, + "NewestPublishersBook": []map[string]any{}, + }, { "name": "John Grisham", "s1": 4.8 + 4.5, // Because in descending order years for John are [2020, 2013]. @@ -45,11 +50,6 @@ func TestOneToManyToOneWithSumOfDeepOrderBySubTypeAndDeepOrderBySubtypeDescDirec }, }, }, - { - "name": "Not a Writer", - "s1": 0.0, - "NewestPublishersBook": []map[string]any{}, - }, { "name": "Cornelia Funke", "s1": 4.0, @@ -83,6 +83,11 @@ func TestOneToManyToOneWithSumOfDeepOrderBySubTypeAndDeepOrderBySubtypeAscDirect } }`, Results: []map[string]any{ + { + "name": "Not a Writer", + "s1": 0.0, + "NewestPublishersBook": []map[string]any{}, + }, { "name": "John Grisham", // Because in ascending order years for John are: @@ -97,11 +102,6 @@ func TestOneToManyToOneWithSumOfDeepOrderBySubTypeAndDeepOrderBySubtypeAscDirect }, }, }, - { - "name": "Not a Writer", - "s1": 0.0, - "NewestPublishersBook": []map[string]any{}, - }, { "name": "Cornelia Funke", "s1": 4.0, @@ -134,6 +134,11 @@ func TestOneToManyToOneWithSumOfDeepOrderBySubTypeOfBothDescAndAsc(t *testing.T) } }`, Results: []map[string]any{ + { + "name": "Not a Writer", + "s1": 0.0, + "s2": 0.0, + }, { "name": "John Grisham", // 'Theif Lord' (4.8 rating) 2020, then 'A Time for Mercy' 2013 (4.5 rating). @@ -141,11 +146,6 @@ func TestOneToManyToOneWithSumOfDeepOrderBySubTypeOfBothDescAndAsc(t *testing.T) // 'The Associate' as it has no Publisher (4.2 rating), then 'Painted House' 1995 (4.9 rating). "s2": float64(4.2) + float64(4.9), }, - { - "name": "Not a Writer", - "s1": 0.0, - "s2": 0.0, - }, { "name": "Cornelia Funke", "s1": 4.0, @@ -176,6 +176,11 @@ func TestOneToManyToOneWithSumOfDeepOrderBySubTypeAndDeepOrderBySubtypeOppositeD } }`, Results: []map[string]any{ + { + "name": "Not a Writer", + "s1": 0.0, + "OldestPublishersBook": []map[string]any{}, + }, { "name": "John Grisham", // 'Theif Lord' (4.8 rating) 2020, then 'A Time for Mercy' 2013 (4.5 rating). @@ -189,11 +194,6 @@ func TestOneToManyToOneWithSumOfDeepOrderBySubTypeAndDeepOrderBySubtypeOppositeD }, }, }, - { - "name": "Not a Writer", - "s1": 0.0, - "OldestPublishersBook": []map[string]any{}, - }, { "name": "Cornelia Funke", "s1": 4.0, diff --git a/tests/integration/query/one_to_many_to_one/with_sum_test.go b/tests/integration/query/one_to_many_to_one/with_sum_test.go index b1db62f07a..192784144e 100644 --- a/tests/integration/query/one_to_many_to_one/with_sum_test.go +++ b/tests/integration/query/one_to_many_to_one/with_sum_test.go @@ -24,7 +24,6 @@ func TestQueryWithSumOnInlineAndSumOnOneToManyField(t *testing.T) { // Authors testUtils.CreateDoc{ CollectionID: 0, - // bae-0c100ad0-1511-5f37-984d-66fa8534b06f, Has written 5 books Doc: `{ "name": "John Grisham", "age": 65, @@ -34,7 +33,7 @@ func TestQueryWithSumOnInlineAndSumOnOneToManyField(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04, Has written 1 Book + // Has written 1 Book Doc: `{ "name": "Cornelia Funke", "age": 62, @@ -44,49 +43,49 @@ func TestQueryWithSumOnInlineAndSumOnOneToManyField(t *testing.T) { // Books testUtils.CreateDoc{ CollectionID: 1, - // "bae-080d7580-a791-541e-90bd-49bf69f858e1", Has 1 Publisher - Doc: `{ - "name": "The Rooster Bar", - "rating": 4, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, + // Has 1 Publisher + DocMap: map[string]any{ + "name": "The Rooster Bar", + "rating": 4, + "author_id": testUtils.NewDocIndex(0, 1), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // "bae-afdd1769-b056-5bb1-b743-116a347b4b87", Has 1 Publisher - Doc: `{ - "name": "Theif Lord", - "rating": 4.8, - "author_id": "bae-0c100ad0-1511-5f37-984d-66fa8534b06f" - }`, + // Has 1 Publisher + DocMap: map[string]any{ + "name": "Theif Lord", + "rating": 4.8, + "author_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // "bae-fbba03cf-c77c-5850-a6a4-0d9992d489e1", Has no Publisher. - Doc: `{ - "name": "The Associate", - "rating": 4.2, - "author_id": "bae-0c100ad0-1511-5f37-984d-66fa8534b06f" - }`, + // Has no Publisher. + DocMap: map[string]any{ + "name": "The Associate", + "rating": 4.2, + "author_id": testUtils.NewDocIndex(0, 0), + }, }, // Publishers testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Only Publisher of The Rooster Bar", - "address": "1 Rooster Ave., Waterloo, Ontario", + DocMap: map[string]any{ + "name": "Only Publisher of The Rooster Bar", + "address": "1 Rooster Ave., Waterloo, Ontario", "yearOpened": 2022, - "book_id": "bae-080d7580-a791-541e-90bd-49bf69f858e1" - }`, + "book_id": testUtils.NewDocIndex(1, 0), + }, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Only Publisher of Theif Lord", - "address": "1 Theif Lord, Waterloo, Ontario", + DocMap: map[string]any{ + "name": "Only Publisher of Theif Lord", + "address": "1 Theif Lord, Waterloo, Ontario", "yearOpened": 2020, - "book_id": "bae-afdd1769-b056-5bb1-b743-116a347b4b87" - }`, + "book_id": testUtils.NewDocIndex(1, 1), + }, }, testUtils.Request{ Request: `query { diff --git a/tests/integration/query/one_to_one/simple_test.go b/tests/integration/query/one_to_one/simple_test.go index b5a1da594c..f0eb5f1f4e 100644 --- a/tests/integration/query/one_to_one/simple_test.go +++ b/tests/integration/query/one_to_one/simple_test.go @@ -32,19 +32,19 @@ func TestQueryOneToOne(t *testing.T) { }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9 }`, }, //authors - 1: { // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + 1: { // bae-7aabc9d2-fbbc-5911-b0d0-b49a2a1d0e84 `{ "name": "John Grisham", "age": 65, "verified": true, - "published_id": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" + "published_id": "bae-be6d8024-4953-5a92-84b4-f042d25230c6" }`, }, }, @@ -73,19 +73,19 @@ func TestQueryOneToOne(t *testing.T) { }`, Docs: map[int][]string{ //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + 0: { // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9 }`, }, //authors - 1: { // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + 1: { // bae-7aabc9d2-fbbc-5911-b0d0-b49a2a1d0e84 `{ "name": "John Grisham", "age": 65, "verified": true, - "published_id": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" + "published_id": "bae-be6d8024-4953-5a92-84b4-f042d25230c6" }`, }, }, @@ -108,119 +108,161 @@ func TestQueryOneToOne(t *testing.T) { } func TestQueryOneToOneWithMultipleRecords(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "One-to-one relation primary direction, multiple records", - Request: `query { - Book { - name - author { - name - } - } - }`, - Docs: map[int][]string{ - //books - 0: { - // bae-fd541c25-229e-5280-b44b-e5c2af3e374d - `{ - "name": "Painted House", - "rating": 4.9 - }`, - // "bae-ad4ad79c-278d-55cd-a9e3-85f3bc9a0947" - `{ - "name": "Go Guide for Rust developers", - "rating": 5.0 - }`, + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + rating: Float + author: Author + } + + type Author { + name: String + age: Int + verified: Boolean + published: Book @primary + } + `, }, - //authors - 1: { - // "bae-3bfe0092-e31f-5ebe-a3ba-fa18fac448a6" - `{ - "name": "John Grisham", - "age": 65, - "verified": true, - "published_id": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" - }`, - // "bae-756c2bf0-4767-57fd-b12b-393915feae68", - `{ - "name": "Andrew Lone", - "age": 30, - "verified": true, - "published_id": "bae-ad4ad79c-278d-55cd-a9e3-85f3bc9a0947" - }`, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Painted House", + "rating": 4.9, + }, }, - }, - Results: []map[string]any{ - { - "name": "Go Guide for Rust developers", - "author": map[string]any{ - "name": "Andrew Lone", + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Go Guide for Rust developers", + "rating": 5.0, }, }, - { - "name": "Painted House", - "author": map[string]any{ - "name": "John Grisham", + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "John Grisham", + "age": 65, + "verified": true, + "published_id": testUtils.NewDocIndex(0, 0), + }, + }, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Andrew Lone", + "age": 30, + "verified": true, + "published_id": testUtils.NewDocIndex(0, 1), + }, + }, + testUtils.Request{ + Request: `query { + Book { + name + author { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Go Guide for Rust developers", + "author": map[string]any{ + "name": "Andrew Lone", + }, + }, + { + "name": "Painted House", + "author": map[string]any{ + "name": "John Grisham", + }, + }, }, }, }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryOneToOneWithMultipleRecordsSecondaryDirection(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "One-to-one-to-one relation secondary direction", - Request: `query { - Author { - name - published { - name - } - } - }`, - Docs: map[int][]string{ - //books - 0: { - // "bae-3d236f89-6a31-5add-a36a-27971a2eac76" - `{ + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + rating: Float + author: Author + } + + type Author { + name: String + age: Int + verified: Boolean + published: Book @primary + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ "name": "Painted House" }`, - // "bae-c2f3f08b-53f2-5b53-9a9f-da1eee096321" - `{ + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ "name": "Theif Lord" }`, }, - //authors - 1: { - `{ - "name": "John Grisham", - "published_id": "bae-3d236f89-6a31-5add-a36a-27971a2eac76" - }`, - `{ - "name": "Cornelia Funke", - "published_id": "bae-c2f3f08b-53f2-5b53-9a9f-da1eee096321" - }`, + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "John Grisham", + "published_id": testUtils.NewDocIndex(0, 0), + }, }, - }, - Results: []map[string]any{ - { - "name": "John Grisham", - "published": map[string]any{ - "name": "Painted House", + testUtils.CreateDoc{ + CollectionID: 1, + DocMap: map[string]any{ + "name": "Cornelia Funke", + "published_id": testUtils.NewDocIndex(0, 1), }, }, - { - "name": "Cornelia Funke", - "published": map[string]any{ - "name": "Theif Lord", + testUtils.Request{ + Request: `query { + Author { + name + published { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Cornelia Funke", + "published": map[string]any{ + "name": "Theif Lord", + }, + }, + { + "name": "John Grisham", + "published": map[string]any{ + "name": "Painted House", + }, + }, }, }, }, } - executeTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryOneToOneWithNilChild(t *testing.T) { @@ -301,7 +343,6 @@ func TestQueryOneToOne_WithRelationIDFromPrimarySide(t *testing.T) { `, }, testUtils.CreateDoc{ - // bae-3d236f89-6a31-5add-a36a-27971a2eac76 CollectionID: 0, Doc: `{ "name": "Painted House" @@ -309,10 +350,10 @@ func TestQueryOneToOne_WithRelationIDFromPrimarySide(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "name": "John Grisham", - "published_id": "bae-3d236f89-6a31-5add-a36a-27971a2eac76" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "published_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.Request{ Request: `query { @@ -324,7 +365,7 @@ func TestQueryOneToOne_WithRelationIDFromPrimarySide(t *testing.T) { Results: []map[string]any{ { "name": "John Grisham", - "published_id": "bae-3d236f89-6a31-5add-a36a-27971a2eac76", + "published_id": "bae-514f04b1-b218-5b8c-89ee-538f150a32b5", }, }, }, @@ -352,7 +393,6 @@ func TestQueryOneToOne_WithRelationIDFromSecondarySide(t *testing.T) { `, }, testUtils.CreateDoc{ - // bae-3d236f89-6a31-5add-a36a-27971a2eac76 CollectionID: 0, Doc: `{ "name": "Painted House" @@ -360,10 +400,10 @@ func TestQueryOneToOne_WithRelationIDFromSecondarySide(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "name": "John Grisham", - "published_id": "bae-3d236f89-6a31-5add-a36a-27971a2eac76" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "published_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.Request{ Request: `query { @@ -375,7 +415,7 @@ func TestQueryOneToOne_WithRelationIDFromSecondarySide(t *testing.T) { Results: []map[string]any{ { "name": "Painted House", - "author_id": "bae-6b624301-3d0a-5336-bd2c-ca00bca3de85", + "author_id": "bae-420e72a6-e0c6-5a06-a958-2cc7adb7b3d0", }, }, }, diff --git a/tests/integration/query/one_to_one/with_clashing_id_field_test.go b/tests/integration/query/one_to_one/with_clashing_id_field_test.go index 1dd97572ca..368ea364a4 100644 --- a/tests/integration/query/one_to_one/with_clashing_id_field_test.go +++ b/tests/integration/query/one_to_one/with_clashing_id_field_test.go @@ -36,7 +36,6 @@ func TestQueryOneToOneWithClashingIdFieldOnSecondary(t *testing.T) { `, }, testUtils.CreateDoc{ - // bae-d82dbe47-9df1-5e33-bd87-f92e9c378161 CollectionID: 0, Doc: `{ "name": "Painted House", @@ -45,10 +44,10 @@ func TestQueryOneToOneWithClashingIdFieldOnSecondary(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "name": "John Grisham", - "published_id": "bae-d82dbe47-9df1-5e33-bd87-f92e9c378161" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "published_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.Request{ Request: `query { @@ -63,7 +62,7 @@ func TestQueryOneToOneWithClashingIdFieldOnSecondary(t *testing.T) { Results: []map[string]any{ { "name": "Painted House", - "author_id": "bae-9d67a886-64e3-520b-8cd5-1ca7b098fabe", + "author_id": "bae-1a0405fa-e17d-5b0f-8fe2-eb966938df1c", "author": map[string]any{ "name": "John Grisham", }, diff --git a/tests/integration/query/one_to_one/with_count_filter_test.go b/tests/integration/query/one_to_one/with_count_filter_test.go index d5f2b60d22..9748bb7ca8 100644 --- a/tests/integration/query/one_to_one/with_count_filter_test.go +++ b/tests/integration/query/one_to_one/with_count_filter_test.go @@ -25,7 +25,6 @@ func TestQueryOneToOneWithCountWithCompoundOrFilterThatIncludesRelation(t *testi }, testUtils.CreateDoc{ CollectionID: 0, - // bae-fd541c25-229e-5280-b44b-e5c2af3e374d Doc: `{ "name": "Painted House", "rating": 4.9 @@ -33,7 +32,6 @@ func TestQueryOneToOneWithCountWithCompoundOrFilterThatIncludesRelation(t *testi }, testUtils.CreateDoc{ CollectionID: 0, - // bae-437092f3-7817-555c-bf8a-cc1c5a0a0db6 Doc: `{ "name": "Some Book", "rating": 4.0 @@ -41,7 +39,6 @@ func TestQueryOneToOneWithCountWithCompoundOrFilterThatIncludesRelation(t *testi }, testUtils.CreateDoc{ CollectionID: 0, - // bae-1c890922-ddf9-5820-a888-c7f977848934 Doc: `{ "name": "Some Other Book", "rating": 3.5 @@ -49,7 +46,6 @@ func TestQueryOneToOneWithCountWithCompoundOrFilterThatIncludesRelation(t *testi }, testUtils.CreateDoc{ CollectionID: 0, - // TestQueryOneToOneWithCompoundOrFilterThatIncludesRelation Doc: `{ "name": "Yet Another Book", "rating": 3.0 @@ -57,40 +53,39 @@ func TestQueryOneToOneWithCountWithCompoundOrFilterThatIncludesRelation(t *testi }, testUtils.CreateDoc{ CollectionID: 1, - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - Doc: `{ - "name": "John Grisham", - "age": 65, - "verified": true, - "published_id": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "age": 65, + "verified": true, + "published_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "name": "Some Writer", - "age": 45, - "verified": false, - "published_id": "bae-437092f3-7817-555c-bf8a-cc1c5a0a0db6" - }`, + DocMap: map[string]any{ + "name": "Some Writer", + "age": 45, + "verified": false, + "published_id": testUtils.NewDocIndex(0, 1), + }, }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "name": "Some Other Writer", - "age": 35, - "verified": false, - "published_id": "bae-1c890922-ddf9-5820-a888-c7f977848934" - }`, + DocMap: map[string]any{ + "name": "Some Other Writer", + "age": 35, + "verified": false, + "published_id": testUtils.NewDocIndex(0, 2), + }, }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "name": "Yet Another Writer", - "age": 30, - "verified": false, - "published_id": "TestQueryOneToOneWithCompoundOrFilterThatIncludesRelation" - }`, + DocMap: map[string]any{ + "name": "Yet Another Writer", + "age": 30, + "verified": false, + "published_id": testUtils.NewDocIndex(0, 3), + }, }, testUtils.Request{ Request: `query { diff --git a/tests/integration/query/one_to_one/with_filter_order_test.go b/tests/integration/query/one_to_one/with_filter_order_test.go index 363302abc8..7ec7d88362 100644 --- a/tests/integration/query/one_to_one/with_filter_order_test.go +++ b/tests/integration/query/one_to_one/with_filter_order_test.go @@ -31,12 +31,12 @@ func TestOnetoOneSubTypeDscOrderByQueryWithFilterHavinghNoSubTypeSelections(t *t Docs: map[int][]string{ //books 0: { - // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9 }`, - // bae-d432bdfb-787d-5a1c-ac29-dc025ab80095 + // bae-26a28d23-ae5b-5257-91b7-d4f2c6abef7b `{ "name": "Theif Lord", "rating": 4.8 @@ -49,14 +49,14 @@ func TestOnetoOneSubTypeDscOrderByQueryWithFilterHavinghNoSubTypeSelections(t *t "name": "John Grisham", "age": 65, "verified": true, - "published_id": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" + "published_id": "bae-be6d8024-4953-5a92-84b4-f042d25230c6" }`, // "bae-08519989-280d-5a4d-90b2-915ea06df3c4" `{ "name": "Cornelia Funke", "age": 62, "verified": false, - "published_id": "bae-d432bdfb-787d-5a1c-ac29-dc025ab80095" + "published_id": "bae-26a28d23-ae5b-5257-91b7-d4f2c6abef7b" }`, }, }, @@ -90,12 +90,12 @@ func TestOnetoOneSubTypeAscOrderByQueryWithFilterHavinghNoSubTypeSelections(t *t Docs: map[int][]string{ //books 0: { - // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9 }`, - // bae-d432bdfb-787d-5a1c-ac29-dc025ab80095 + // bae-26a28d23-ae5b-5257-91b7-d4f2c6abef7b `{ "name": "Theif Lord", "rating": 4.8 @@ -108,14 +108,14 @@ func TestOnetoOneSubTypeAscOrderByQueryWithFilterHavinghNoSubTypeSelections(t *t "name": "John Grisham", "age": 65, "verified": true, - "published_id": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" + "published_id": "bae-be6d8024-4953-5a92-84b4-f042d25230c6" }`, // "bae-08519989-280d-5a4d-90b2-915ea06df3c4" `{ "name": "Cornelia Funke", "age": 62, "verified": false, - "published_id": "bae-d432bdfb-787d-5a1c-ac29-dc025ab80095" + "published_id": "bae-26a28d23-ae5b-5257-91b7-d4f2c6abef7b" }`, }, }, diff --git a/tests/integration/query/one_to_one/with_filter_test.go b/tests/integration/query/one_to_one/with_filter_test.go index ab13634667..c9ae69f637 100644 --- a/tests/integration/query/one_to_one/with_filter_test.go +++ b/tests/integration/query/one_to_one/with_filter_test.go @@ -25,7 +25,6 @@ func TestQueryOneToOneWithNumericFilterOnParent(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-fd541c25-229e-5280-b44b-e5c2af3e374d Doc: `{ "name": "Painted House", "rating": 4.9 @@ -33,13 +32,12 @@ func TestQueryOneToOneWithNumericFilterOnParent(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 1, - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - Doc: `{ - "name": "John Grisham", - "age": 65, - "verified": true, - "published_id": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "age": 65, + "verified": true, + "published_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.Request{ Request: `query { @@ -78,7 +76,7 @@ func TestQueryOneToOneWithStringFilterOnChild(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + // bae-be6d8024-4953-5a92-84b4-f042d25230c6 Doc: `{ "name": "Painted House", "rating": 4.9 @@ -86,12 +84,12 @@ func TestQueryOneToOneWithStringFilterOnChild(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 1, - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-7aabc9d2-fbbc-5911-b0d0-b49a2a1d0e84 Doc: `{ "name": "John Grisham", "age": 65, "verified": true, - "published_id": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" + "published_id": "bae-be6d8024-4953-5a92-84b4-f042d25230c6" }`, }, testUtils.Request{ @@ -131,7 +129,7 @@ func TestQueryOneToOneWithBooleanFilterOnChild(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + // bae-be6d8024-4953-5a92-84b4-f042d25230c6 Doc: `{ "name": "Painted House", "rating": 4.9 @@ -139,12 +137,12 @@ func TestQueryOneToOneWithBooleanFilterOnChild(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 1, - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-7aabc9d2-fbbc-5911-b0d0-b49a2a1d0e84 Doc: `{ "name": "John Grisham", "age": 65, "verified": true, - "published_id": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" + "published_id": "bae-be6d8024-4953-5a92-84b4-f042d25230c6" }`, }, testUtils.Request{ @@ -184,7 +182,6 @@ func TestQueryOneToOneWithFilterThroughChildBackToParent(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-fd541c25-229e-5280-b44b-e5c2af3e374d Doc: `{ "name": "Painted House", "rating": 4.9 @@ -192,7 +189,6 @@ func TestQueryOneToOneWithFilterThroughChildBackToParent(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-d432bdfb-787d-5a1c-ac29-dc025ab80095 Doc: `{ "name": "Theif Lord", "rating": 4.8 @@ -200,23 +196,21 @@ func TestQueryOneToOneWithFilterThroughChildBackToParent(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 1, - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - Doc: `{ - "name": "John Grisham", - "age": 65, - "verified": true, - "published_id": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "age": 65, + "verified": true, + "published_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - Doc: `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false, - "published_id": "bae-d432bdfb-787d-5a1c-ac29-dc025ab80095" - }`, + DocMap: map[string]any{ + "name": "Cornelia Funke", + "age": 62, + "verified": false, + "published_id": testUtils.NewDocIndex(0, 1), + }, }, testUtils.Request{ Request: `query { @@ -255,7 +249,6 @@ func TestQueryOneToOneWithBooleanFilterOnChildWithNoSubTypeSelection(t *testing. }, testUtils.CreateDoc{ CollectionID: 0, - // bae-fd541c25-229e-5280-b44b-e5c2af3e374d Doc: `{ "name": "Painted House", "rating": 4.9 @@ -263,13 +256,12 @@ func TestQueryOneToOneWithBooleanFilterOnChildWithNoSubTypeSelection(t *testing. }, testUtils.CreateDoc{ CollectionID: 1, - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - Doc: `{ - "name": "John Grisham", - "age": 65, - "verified": true, - "published_id": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "age": 65, + "verified": true, + "published_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.Request{ Request: `query { @@ -298,7 +290,6 @@ func TestQueryOneToOneWithCompoundAndFilterThatIncludesRelation(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-fd541c25-229e-5280-b44b-e5c2af3e374d Doc: `{ "name": "Painted House", "rating": 4.9 @@ -306,7 +297,6 @@ func TestQueryOneToOneWithCompoundAndFilterThatIncludesRelation(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-437092f3-7817-555c-bf8a-cc1c5a0a0db6 Doc: `{ "name": "Some Book", "rating": 4.0 @@ -314,7 +304,6 @@ func TestQueryOneToOneWithCompoundAndFilterThatIncludesRelation(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-500a9445-bd90-580e-9191-d2d0ec1a5cf5 Doc: `{ "name": "Some Other Book", "rating": 3.0 @@ -322,31 +311,30 @@ func TestQueryOneToOneWithCompoundAndFilterThatIncludesRelation(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 1, - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - Doc: `{ - "name": "John Grisham", - "age": 65, - "verified": true, - "published_id": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "age": 65, + "verified": true, + "published_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "name": "Some Writer", - "age": 45, - "verified": false, - "published_id": "bae-437092f3-7817-555c-bf8a-cc1c5a0a0db6" - }`, + DocMap: map[string]any{ + "name": "Some Writer", + "age": 45, + "verified": false, + "published_id": testUtils.NewDocIndex(0, 1), + }, }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "name": "Some Other Writer", - "age": 30, - "verified": true, - "published_id": "bae-500a9445-bd90-580e-9191-d2d0ec1a5cf5" - }`, + DocMap: map[string]any{ + "name": "Some Other Writer", + "age": 30, + "verified": true, + "published_id": testUtils.NewDocIndex(0, 2), + }, }, testUtils.Request{ Request: `query { @@ -378,7 +366,6 @@ func TestQueryOneToOneWithCompoundOrFilterThatIncludesRelation(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-fd541c25-229e-5280-b44b-e5c2af3e374d Doc: `{ "name": "Painted House", "rating": 4.9 @@ -386,7 +373,6 @@ func TestQueryOneToOneWithCompoundOrFilterThatIncludesRelation(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-437092f3-7817-555c-bf8a-cc1c5a0a0db6 Doc: `{ "name": "Some Book", "rating": 4.0 @@ -394,7 +380,6 @@ func TestQueryOneToOneWithCompoundOrFilterThatIncludesRelation(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-1c890922-ddf9-5820-a888-c7f977848934 Doc: `{ "name": "Some Other Book", "rating": 3.5 @@ -402,7 +387,6 @@ func TestQueryOneToOneWithCompoundOrFilterThatIncludesRelation(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // TestQueryOneToOneWithCompoundOrFilterThatIncludesRelation Doc: `{ "name": "Yet Another Book", "rating": 3.0 @@ -410,40 +394,39 @@ func TestQueryOneToOneWithCompoundOrFilterThatIncludesRelation(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 1, - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - Doc: `{ - "name": "John Grisham", - "age": 65, - "verified": true, - "published_id": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "age": 65, + "verified": true, + "published_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "name": "Some Writer", - "age": 45, - "verified": false, - "published_id": "bae-437092f3-7817-555c-bf8a-cc1c5a0a0db6" - }`, + DocMap: map[string]any{ + "name": "Some Writer", + "age": 45, + "verified": false, + "published_id": testUtils.NewDocIndex(0, 1), + }, }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "name": "Some Other Writer", - "age": 35, - "verified": false, - "published_id": "bae-1c890922-ddf9-5820-a888-c7f977848934" - }`, + DocMap: map[string]any{ + "name": "Some Other Writer", + "age": 35, + "verified": false, + "published_id": testUtils.NewDocIndex(0, 2), + }, }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "name": "Yet Another Writer", - "age": 30, - "verified": false, - "published_id": "TestQueryOneToOneWithCompoundOrFilterThatIncludesRelation" - }`, + DocMap: map[string]any{ + "name": "Yet Another Writer", + "age": 30, + "verified": false, + "published_id": testUtils.NewDocIndex(0, 3), + }, }, testUtils.Request{ Request: `query { @@ -462,10 +445,10 @@ func TestQueryOneToOneWithCompoundOrFilterThatIncludesRelation(t *testing.T) { }`, Results: []map[string]any{ { - "name": "Some Other Book", + "name": "Some Book", }, { - "name": "Some Book", + "name": "Some Other Book", }, }, }, @@ -502,7 +485,6 @@ func TestQueryOneToOne_WithCompoundFiltersThatIncludesRelation_ShouldReturnResul }, testUtils.CreateDoc{ CollectionID: 0, - // bae-fd541c25-229e-5280-b44b-e5c2af3e374d Doc: `{ "name": "Painted House", "rating": 4.9 @@ -510,7 +492,6 @@ func TestQueryOneToOne_WithCompoundFiltersThatIncludesRelation_ShouldReturnResul }, testUtils.CreateDoc{ CollectionID: 0, - // bae-437092f3-7817-555c-bf8a-cc1c5a0a0db6 Doc: `{ "name": "Some Book", "rating": 4.0 @@ -518,7 +499,6 @@ func TestQueryOneToOne_WithCompoundFiltersThatIncludesRelation_ShouldReturnResul }, testUtils.CreateDoc{ CollectionID: 0, - // bae-66ba0c48-4984-5b44-83dd-edb791a54b7d Doc: `{ "name": "Some Other Book", "rating": 3.0 @@ -526,33 +506,30 @@ func TestQueryOneToOne_WithCompoundFiltersThatIncludesRelation_ShouldReturnResul }, testUtils.CreateDoc{ CollectionID: 1, - // bae-3bfe0092-e31f-5ebe-a3ba-fa18fac448a6 - Doc: `{ - "name": "John Grisham", - "age": 65, - "verified": true, - "published_id": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "age": 65, + "verified": true, + "published_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-5dac8488-0f75-5ddf-b08b-804b3d33a239 - Doc: `{ - "name": "Some Writer", - "age": 45, - "verified": false, - "published_id": "bae-437092f3-7817-555c-bf8a-cc1c5a0a0db6" - }`, + DocMap: map[string]any{ + "name": "Some Writer", + "age": 45, + "verified": false, + "published_id": testUtils.NewDocIndex(0, 1), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-8b0c345b-dda7-573c-b5f1-5fa1d70593e1 - Doc: `{ - "name": "Some Other Writer", - "age": 30, - "verified": true, - "published_id": "bae-66ba0c48-4984-5b44-83dd-edb791a54b7d" - }`, + DocMap: map[string]any{ + "name": "Some Other Writer", + "age": 30, + "verified": true, + "published_id": testUtils.NewDocIndex(0, 2), + }, }, testUtils.Request{ Request: `query { @@ -565,14 +542,14 @@ func TestQueryOneToOne_WithCompoundFiltersThatIncludesRelation_ShouldReturnResul } }`, Results: []map[string]any{ - { - "name": "Some Other Book", - "rating": 3.0, - }, { "name": "Painted House", "rating": 4.9, }, + { + "name": "Some Other Book", + "rating": 3.0, + }, }, }, testUtils.Request{ @@ -604,14 +581,14 @@ func TestQueryOneToOne_WithCompoundFiltersThatIncludesRelation_ShouldReturnResul } }`, Results: []map[string]any{ - { - "name": "Some Other Book", - "rating": 3.0, - }, { "name": "Painted House", "rating": 4.9, }, + { + "name": "Some Other Book", + "rating": 3.0, + }, }, }, }, diff --git a/tests/integration/query/one_to_one/with_group_related_id_alias_test.go b/tests/integration/query/one_to_one/with_group_related_id_alias_test.go index eb53ab5d47..1f98f6604b 100644 --- a/tests/integration/query/one_to_one/with_group_related_id_alias_test.go +++ b/tests/integration/query/one_to_one/with_group_related_id_alias_test.go @@ -35,33 +35,29 @@ func TestQueryOneToOneWithGroupRelatedIDAlias(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-3d236f89-6a31-5add-a36a-27971a2eac76 Doc: `{ "name": "Painted House" }`, }, testUtils.CreateDoc{ CollectionID: 0, - // bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6 Doc: `{ "name": "Go Guide for Rust developers" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-6b624301-3d0a-5336-bd2c-ca00bca3de85 - Doc: `{ - "name": "John Grisham", - "published_id": "bae-3d236f89-6a31-5add-a36a-27971a2eac76" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "published_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-92fa9dcb-c1ee-5b84-b2f6-e9437c7f261c - Doc: `{ - "name": "Andrew Lone", - "published_id": "bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6" - }`, + DocMap: map[string]any{ + "name": "Andrew Lone", + "published_id": testUtils.NewDocIndex(0, 1), + }, }, testUtils.Request{ Request: `query { @@ -77,7 +73,7 @@ func TestQueryOneToOneWithGroupRelatedIDAlias(t *testing.T) { }`, Results: []map[string]any{ { - "author_id": "bae-6b624301-3d0a-5336-bd2c-ca00bca3de85", + "author_id": "bae-077b5e8d-5a86-5ae7-a321-ac7e423bb260", "author": map[string]any{ "name": "John Grisham", }, @@ -88,7 +84,7 @@ func TestQueryOneToOneWithGroupRelatedIDAlias(t *testing.T) { }, }, { - "author_id": "bae-92fa9dcb-c1ee-5b84-b2f6-e9437c7f261c", + "author_id": "bae-b11e00fc-340f-558b-909d-2ab94601570b", "author": map[string]any{ "name": "Andrew Lone", }, @@ -125,33 +121,29 @@ func TestQueryOneToOneWithGroupRelatedIDAliasFromSecondaryWithoutInnerGroup(t *t }, testUtils.CreateDoc{ CollectionID: 0, - // bae-3d236f89-6a31-5add-a36a-27971a2eac76 Doc: `{ "name": "Painted House" }`, }, testUtils.CreateDoc{ CollectionID: 0, - // bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6 Doc: `{ "name": "Go Guide for Rust developers" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-6b624301-3d0a-5336-bd2c-ca00bca3de85 - Doc: `{ - "name": "John Grisham", - "published_id": "bae-3d236f89-6a31-5add-a36a-27971a2eac76" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "published_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-92fa9dcb-c1ee-5b84-b2f6-e9437c7f261c - Doc: `{ - "name": "Andrew Lone", - "published_id": "bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6" - }`, + DocMap: map[string]any{ + "name": "Andrew Lone", + "published_id": testUtils.NewDocIndex(0, 1), + }, }, testUtils.Request{ Request: `query { @@ -161,10 +153,10 @@ func TestQueryOneToOneWithGroupRelatedIDAliasFromSecondaryWithoutInnerGroup(t *t }`, Results: []map[string]any{ { - "author_id": "bae-6b624301-3d0a-5336-bd2c-ca00bca3de85", + "author_id": "bae-3c308f94-dc9e-5262-b0ce-ef4e8e545820", }, { - "author_id": "bae-92fa9dcb-c1ee-5b84-b2f6-e9437c7f261c", + "author_id": "bae-420e72a6-e0c6-5a06-a958-2cc7adb7b3d0", }, }, }, @@ -193,33 +185,29 @@ func TestQueryOneToOneWithGroupRelatedIDAliasFromSecondaryWithoutInnerGroupWithJ }, testUtils.CreateDoc{ CollectionID: 0, - // bae-3d236f89-6a31-5add-a36a-27971a2eac76 Doc: `{ "name": "Painted House" }`, }, testUtils.CreateDoc{ CollectionID: 0, - // bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6 Doc: `{ "name": "Go Guide for Rust developers" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-6b624301-3d0a-5336-bd2c-ca00bca3de85 - Doc: `{ - "name": "John Grisham", - "published_id": "bae-3d236f89-6a31-5add-a36a-27971a2eac76" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "published_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-92fa9dcb-c1ee-5b84-b2f6-e9437c7f261c - Doc: `{ - "name": "Andrew Lone", - "published_id": "bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6" - }`, + DocMap: map[string]any{ + "name": "Andrew Lone", + "published_id": testUtils.NewDocIndex(0, 1), + }, }, testUtils.Request{ Request: `query { @@ -232,15 +220,15 @@ func TestQueryOneToOneWithGroupRelatedIDAliasFromSecondaryWithoutInnerGroupWithJ }`, Results: []map[string]any{ { - "author_id": "bae-6b624301-3d0a-5336-bd2c-ca00bca3de85", + "author_id": "bae-3c308f94-dc9e-5262-b0ce-ef4e8e545820", "author": map[string]any{ - "name": "John Grisham", + "name": "Andrew Lone", }, }, { - "author_id": "bae-92fa9dcb-c1ee-5b84-b2f6-e9437c7f261c", + "author_id": "bae-420e72a6-e0c6-5a06-a958-2cc7adb7b3d0", "author": map[string]any{ - "name": "Andrew Lone", + "name": "John Grisham", }, }, }, @@ -270,33 +258,29 @@ func TestQueryOneToOneWithGroupRelatedIDAliasFromSecondaryWithInnerGroup(t *test }, testUtils.CreateDoc{ CollectionID: 0, - // bae-3d236f89-6a31-5add-a36a-27971a2eac76 Doc: `{ "name": "Painted House" }`, }, testUtils.CreateDoc{ CollectionID: 0, - // bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6 Doc: `{ "name": "Go Guide for Rust developers" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-6b624301-3d0a-5336-bd2c-ca00bca3de85 - Doc: `{ - "name": "John Grisham", - "published_id": "bae-3d236f89-6a31-5add-a36a-27971a2eac76" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "published_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-92fa9dcb-c1ee-5b84-b2f6-e9437c7f261c - Doc: `{ - "name": "Andrew Lone", - "published_id": "bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6" - }`, + DocMap: map[string]any{ + "name": "Andrew Lone", + "published_id": testUtils.NewDocIndex(0, 1), + }, }, testUtils.Request{ Request: `query { @@ -309,18 +293,18 @@ func TestQueryOneToOneWithGroupRelatedIDAliasFromSecondaryWithInnerGroup(t *test }`, Results: []map[string]any{ { - "author_id": "bae-6b624301-3d0a-5336-bd2c-ca00bca3de85", + "author_id": "bae-3c308f94-dc9e-5262-b0ce-ef4e8e545820", "_group": []map[string]any{ { - "name": "Painted House", + "name": "Go Guide for Rust developers", }, }, }, { - "author_id": "bae-92fa9dcb-c1ee-5b84-b2f6-e9437c7f261c", + "author_id": "bae-420e72a6-e0c6-5a06-a958-2cc7adb7b3d0", "_group": []map[string]any{ { - "name": "Go Guide for Rust developers", + "name": "Painted House", }, }, }, @@ -351,33 +335,29 @@ func TestQueryOneToOneWithGroupRelatedIDAliasFromSecondaryWithInnerGroupWithJoin }, testUtils.CreateDoc{ CollectionID: 0, - // bae-3d236f89-6a31-5add-a36a-27971a2eac76 Doc: `{ "name": "Painted House" }`, }, testUtils.CreateDoc{ CollectionID: 0, - // bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6 Doc: `{ "name": "Go Guide for Rust developers" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-6b624301-3d0a-5336-bd2c-ca00bca3de85 - Doc: `{ - "name": "John Grisham", - "published_id": "bae-3d236f89-6a31-5add-a36a-27971a2eac76" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "published_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-92fa9dcb-c1ee-5b84-b2f6-e9437c7f261c - Doc: `{ - "name": "Andrew Lone", - "published_id": "bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6" - }`, + DocMap: map[string]any{ + "name": "Andrew Lone", + "published_id": testUtils.NewDocIndex(0, 1), + }, }, testUtils.Request{ Request: `query { @@ -393,24 +373,24 @@ func TestQueryOneToOneWithGroupRelatedIDAliasFromSecondaryWithInnerGroupWithJoin }`, Results: []map[string]any{ { - "author_id": "bae-6b624301-3d0a-5336-bd2c-ca00bca3de85", + "author_id": "bae-3c308f94-dc9e-5262-b0ce-ef4e8e545820", "author": map[string]any{ - "name": "John Grisham", + "name": "Andrew Lone", }, "_group": []map[string]any{ { - "name": "Painted House", + "name": "Go Guide for Rust developers", }, }, }, { - "author_id": "bae-92fa9dcb-c1ee-5b84-b2f6-e9437c7f261c", + "author_id": "bae-420e72a6-e0c6-5a06-a958-2cc7adb7b3d0", "author": map[string]any{ - "name": "Andrew Lone", + "name": "John Grisham", }, "_group": []map[string]any{ { - "name": "Go Guide for Rust developers", + "name": "Painted House", }, }, }, diff --git a/tests/integration/query/one_to_one/with_group_related_id_test.go b/tests/integration/query/one_to_one/with_group_related_id_test.go index 45b432192d..d3b9867886 100644 --- a/tests/integration/query/one_to_one/with_group_related_id_test.go +++ b/tests/integration/query/one_to_one/with_group_related_id_test.go @@ -35,33 +35,29 @@ func TestQueryOneToOneWithGroupRelatedID(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-3d236f89-6a31-5add-a36a-27971a2eac76 Doc: `{ "name": "Painted House" }`, }, testUtils.CreateDoc{ CollectionID: 0, - // bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6 Doc: `{ "name": "Go Guide for Rust developers" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-6b624301-3d0a-5336-bd2c-ca00bca3de85 - Doc: `{ - "name": "John Grisham", - "published_id": "bae-3d236f89-6a31-5add-a36a-27971a2eac76" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "published_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-92fa9dcb-c1ee-5b84-b2f6-e9437c7f261c - Doc: `{ - "name": "Andrew Lone", - "published_id": "bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "published_id": testUtils.NewDocIndex(0, 1), + }, }, testUtils.Request{ Request: `query { @@ -74,7 +70,7 @@ func TestQueryOneToOneWithGroupRelatedID(t *testing.T) { }`, Results: []map[string]any{ { - "author_id": "bae-6b624301-3d0a-5336-bd2c-ca00bca3de85", + "author_id": "bae-077b5e8d-5a86-5ae7-a321-ac7e423bb260", "_group": []map[string]any{ { "name": "Painted House", @@ -82,7 +78,7 @@ func TestQueryOneToOneWithGroupRelatedID(t *testing.T) { }, }, { - "author_id": "bae-92fa9dcb-c1ee-5b84-b2f6-e9437c7f261c", + "author_id": "bae-cfee1ed9-ede8-5b80-a6fa-78c727a076ac", "_group": []map[string]any{ { "name": "Go Guide for Rust developers", @@ -116,33 +112,29 @@ func TestQueryOneToOneWithGroupRelatedIDFromSecondaryWithoutGroup(t *testing.T) }, testUtils.CreateDoc{ CollectionID: 0, - // bae-3d236f89-6a31-5add-a36a-27971a2eac76 Doc: `{ "name": "Painted House" }`, }, testUtils.CreateDoc{ CollectionID: 0, - // bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6 Doc: `{ "name": "Go Guide for Rust developers" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-6b624301-3d0a-5336-bd2c-ca00bca3de85 - Doc: `{ - "name": "John Grisham", - "published_id": "bae-3d236f89-6a31-5add-a36a-27971a2eac76" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "published_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-92fa9dcb-c1ee-5b84-b2f6-e9437c7f261c - Doc: `{ - "name": "Andrew Lone", - "published_id": "bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6" - }`, + DocMap: map[string]any{ + "name": "Andrew Lone", + "published_id": testUtils.NewDocIndex(0, 1), + }, }, testUtils.Request{ Request: `query { @@ -152,10 +144,10 @@ func TestQueryOneToOneWithGroupRelatedIDFromSecondaryWithoutGroup(t *testing.T) }`, Results: []map[string]any{ { - "author_id": "bae-6b624301-3d0a-5336-bd2c-ca00bca3de85", + "author_id": "bae-3c308f94-dc9e-5262-b0ce-ef4e8e545820", }, { - "author_id": "bae-92fa9dcb-c1ee-5b84-b2f6-e9437c7f261c", + "author_id": "bae-420e72a6-e0c6-5a06-a958-2cc7adb7b3d0", }, }, }, @@ -184,33 +176,29 @@ func TestQueryOneToOneWithGroupRelatedIDFromSecondaryWithoutGroupWithJoin(t *tes }, testUtils.CreateDoc{ CollectionID: 0, - // bae-3d236f89-6a31-5add-a36a-27971a2eac76 Doc: `{ "name": "Painted House" }`, }, testUtils.CreateDoc{ CollectionID: 0, - // bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6 Doc: `{ "name": "Go Guide for Rust developers" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-6b624301-3d0a-5336-bd2c-ca00bca3de85 - Doc: `{ - "name": "John Grisham", - "published_id": "bae-3d236f89-6a31-5add-a36a-27971a2eac76" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "published_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-92fa9dcb-c1ee-5b84-b2f6-e9437c7f261c - Doc: `{ - "name": "Andrew Lone", - "published_id": "bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6" - }`, + DocMap: map[string]any{ + "name": "Andrew Lone", + "published_id": testUtils.NewDocIndex(0, 1), + }, }, testUtils.Request{ Request: `query { @@ -223,15 +211,15 @@ func TestQueryOneToOneWithGroupRelatedIDFromSecondaryWithoutGroupWithJoin(t *tes }`, Results: []map[string]any{ { - "author_id": "bae-6b624301-3d0a-5336-bd2c-ca00bca3de85", + "author_id": "bae-3c308f94-dc9e-5262-b0ce-ef4e8e545820", "author": map[string]any{ - "name": "John Grisham", + "name": "Andrew Lone", }, }, { - "author_id": "bae-92fa9dcb-c1ee-5b84-b2f6-e9437c7f261c", + "author_id": "bae-420e72a6-e0c6-5a06-a958-2cc7adb7b3d0", "author": map[string]any{ - "name": "Andrew Lone", + "name": "John Grisham", }, }, }, @@ -261,33 +249,29 @@ func TestQueryOneToOneWithGroupRelatedIDFromSecondaryWithGroup(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-3d236f89-6a31-5add-a36a-27971a2eac76 Doc: `{ "name": "Painted House" }`, }, testUtils.CreateDoc{ CollectionID: 0, - // bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6 Doc: `{ "name": "Go Guide for Rust developers" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-6b624301-3d0a-5336-bd2c-ca00bca3de85 - Doc: `{ - "name": "John Grisham", - "published_id": "bae-3d236f89-6a31-5add-a36a-27971a2eac76" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "published_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-92fa9dcb-c1ee-5b84-b2f6-e9437c7f261c - Doc: `{ - "name": "Andrew Lone", - "published_id": "bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "published_id": testUtils.NewDocIndex(0, 1), + }, }, testUtils.Request{ Request: `query { @@ -300,18 +284,18 @@ func TestQueryOneToOneWithGroupRelatedIDFromSecondaryWithGroup(t *testing.T) { }`, Results: []map[string]any{ { - "author_id": "bae-6b624301-3d0a-5336-bd2c-ca00bca3de85", + "author_id": "bae-bb4d6e89-e8b4-5eec-bfeb-6f7aa4840950", "_group": []map[string]any{ { - "name": "Painted House", + "name": "Go Guide for Rust developers", }, }, }, { - "author_id": "bae-92fa9dcb-c1ee-5b84-b2f6-e9437c7f261c", + "author_id": "bae-420e72a6-e0c6-5a06-a958-2cc7adb7b3d0", "_group": []map[string]any{ { - "name": "Go Guide for Rust developers", + "name": "Painted House", }, }, }, @@ -342,33 +326,29 @@ func TestQueryOneToOneWithGroupRelatedIDFromSecondaryWithGroupWithJoin(t *testin }, testUtils.CreateDoc{ CollectionID: 0, - // bae-3d236f89-6a31-5add-a36a-27971a2eac76 Doc: `{ "name": "Painted House" }`, }, testUtils.CreateDoc{ CollectionID: 0, - // bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6 Doc: `{ "name": "Go Guide for Rust developers" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-6b624301-3d0a-5336-bd2c-ca00bca3de85 - Doc: `{ - "name": "John Grisham", - "published_id": "bae-3d236f89-6a31-5add-a36a-27971a2eac76" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "published_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-92fa9dcb-c1ee-5b84-b2f6-e9437c7f261c - Doc: `{ - "name": "Andrew Lone", - "published_id": "bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6" - }`, + DocMap: map[string]any{ + "name": "Andrew Lone", + "published_id": testUtils.NewDocIndex(0, 1), + }, }, testUtils.Request{ Request: `query { @@ -384,24 +364,24 @@ func TestQueryOneToOneWithGroupRelatedIDFromSecondaryWithGroupWithJoin(t *testin }`, Results: []map[string]any{ { - "author_id": "bae-6b624301-3d0a-5336-bd2c-ca00bca3de85", + "author_id": "bae-3c308f94-dc9e-5262-b0ce-ef4e8e545820", "author": map[string]any{ - "name": "John Grisham", + "name": "Andrew Lone", }, "_group": []map[string]any{ { - "name": "Painted House", + "name": "Go Guide for Rust developers", }, }, }, { - "author_id": "bae-92fa9dcb-c1ee-5b84-b2f6-e9437c7f261c", + "author_id": "bae-420e72a6-e0c6-5a06-a958-2cc7adb7b3d0", "author": map[string]any{ - "name": "Andrew Lone", + "name": "John Grisham", }, "_group": []map[string]any{ { - "name": "Go Guide for Rust developers", + "name": "Painted House", }, }, }, diff --git a/tests/integration/query/one_to_one/with_order_test.go b/tests/integration/query/one_to_one/with_order_test.go index eca937539a..bdf5d6d1e3 100644 --- a/tests/integration/query/one_to_one/with_order_test.go +++ b/tests/integration/query/one_to_one/with_order_test.go @@ -32,12 +32,12 @@ func TestQueryOneToOneWithChildBooleanOrderDescending(t *testing.T) { Docs: map[int][]string{ //books 0: { - // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9 }`, - // bae-d432bdfb-787d-5a1c-ac29-dc025ab80095 + // bae-26a28d23-ae5b-5257-91b7-d4f2c6abef7b `{ "name": "Theif Lord", "rating": 4.8 @@ -45,19 +45,19 @@ func TestQueryOneToOneWithChildBooleanOrderDescending(t *testing.T) { }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-7aabc9d2-fbbc-5911-b0d0-b49a2a1d0e84 `{ "name": "John Grisham", "age": 65, "verified": true, - "published_id": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" + "published_id": "bae-be6d8024-4953-5a92-84b4-f042d25230c6" }`, // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 `{ "name": "Cornelia Funke", "age": 62, "verified": false, - "published_id": "bae-d432bdfb-787d-5a1c-ac29-dc025ab80095" + "published_id": "bae-26a28d23-ae5b-5257-91b7-d4f2c6abef7b" }`, }, }, @@ -100,12 +100,12 @@ func TestQueryOneToOneWithChildBooleanOrderAscending(t *testing.T) { Docs: map[int][]string{ //books 0: { - // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9 }`, - // bae-d432bdfb-787d-5a1c-ac29-dc025ab80095 + // bae-26a28d23-ae5b-5257-91b7-d4f2c6abef7b `{ "name": "Theif Lord", "rating": 4.8 @@ -113,19 +113,19 @@ func TestQueryOneToOneWithChildBooleanOrderAscending(t *testing.T) { }, //authors 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + // bae-7aabc9d2-fbbc-5911-b0d0-b49a2a1d0e84 `{ "name": "John Grisham", "age": 65, "verified": true, - "published_id": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" + "published_id": "bae-be6d8024-4953-5a92-84b4-f042d25230c6" }`, // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 `{ "name": "Cornelia Funke", "age": 62, "verified": false, - "published_id": "bae-d432bdfb-787d-5a1c-ac29-dc025ab80095" + "published_id": "bae-26a28d23-ae5b-5257-91b7-d4f2c6abef7b" }`, }, }, @@ -164,12 +164,12 @@ func TestQueryOneToOneWithChildIntOrderDescendingWithNoSubTypeFieldsSelected(t * Docs: map[int][]string{ //books 0: { - // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9 }`, - // bae-d432bdfb-787d-5a1c-ac29-dc025ab80095 + // bae-26a28d23-ae5b-5257-91b7-d4f2c6abef7b `{ "name": "Theif Lord", "rating": 4.8 @@ -182,14 +182,14 @@ func TestQueryOneToOneWithChildIntOrderDescendingWithNoSubTypeFieldsSelected(t * "name": "John Grisham", "age": 65, "verified": true, - "published_id": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" + "published_id": "bae-be6d8024-4953-5a92-84b4-f042d25230c6" }`, // "bae-08519989-280d-5a4d-90b2-915ea06df3c4" `{ "name": "Cornelia Funke", "age": 62, "verified": false, - "published_id": "bae-d432bdfb-787d-5a1c-ac29-dc025ab80095" + "published_id": "bae-26a28d23-ae5b-5257-91b7-d4f2c6abef7b" }`, }, }, @@ -220,12 +220,12 @@ func TestQueryOneToOneWithChildIntOrderAscendingWithNoSubTypeFieldsSelected(t *t Docs: map[int][]string{ //books 0: { - // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + // bae-be6d8024-4953-5a92-84b4-f042d25230c6 `{ "name": "Painted House", "rating": 4.9 }`, - // bae-d432bdfb-787d-5a1c-ac29-dc025ab80095 + // bae-26a28d23-ae5b-5257-91b7-d4f2c6abef7b `{ "name": "Theif Lord", "rating": 4.8 @@ -238,14 +238,14 @@ func TestQueryOneToOneWithChildIntOrderAscendingWithNoSubTypeFieldsSelected(t *t "name": "John Grisham", "age": 65, "verified": true, - "published_id": "bae-fd541c25-229e-5280-b44b-e5c2af3e374d" + "published_id": "bae-be6d8024-4953-5a92-84b4-f042d25230c6" }`, // "bae-08519989-280d-5a4d-90b2-915ea06df3c4" `{ "name": "Cornelia Funke", "age": 62, "verified": false, - "published_id": "bae-d432bdfb-787d-5a1c-ac29-dc025ab80095" + "published_id": "bae-26a28d23-ae5b-5257-91b7-d4f2c6abef7b" }`, }, }, diff --git a/tests/integration/query/one_to_one_multiple/simple_test.go b/tests/integration/query/one_to_one_multiple/simple_test.go index cc5a97c117..f7988a1940 100644 --- a/tests/integration/query/one_to_one_multiple/simple_test.go +++ b/tests/integration/query/one_to_one_multiple/simple_test.go @@ -41,47 +41,43 @@ func TestQueryOneToOneMultiple_FromPrimary(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // "bae-1f4cc394-08a8-5825-87b9-b02de2f25f7d" Doc: `{ "name": "Old Publisher" }`, }, testUtils.CreateDoc{ CollectionID: 0, - // "bae-a3cd6fac-13c0-5c8f-970b-0ce7abbb49a5" Doc: `{ "name": "New Publisher" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed Doc: `{ "name": "John Grisham" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-b6ea52b8-a5a5-5127-b9c0-5df4243457a3 Doc: `{ "name": "Cornelia Funke" }`, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Painted House", - "publisher_id": "bae-1f4cc394-08a8-5825-87b9-b02de2f25f7d", - "author_id": "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" - }`, + DocMap: map[string]any{ + "name": "Painted House", + "publisher_id": testUtils.NewDocIndex(0, 0), + "author_id": testUtils.NewDocIndex(1, 0), + }, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Theif Lord", - "publisher_id": "bae-a3cd6fac-13c0-5c8f-970b-0ce7abbb49a5", - "author_id": "bae-b6ea52b8-a5a5-5127-b9c0-5df4243457a3" - }`, + DocMap: map[string]any{ + "name": "Theif Lord", + "publisher_id": testUtils.NewDocIndex(0, 1), + "author_id": testUtils.NewDocIndex(1, 1), + }, }, testUtils.Request{ Request: `query { @@ -147,47 +143,43 @@ func TestQueryOneToOneMultiple_FromMixedPrimaryAndSecondary(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // "bae-1f4cc394-08a8-5825-87b9-b02de2f25f7d" Doc: `{ "name": "Old Publisher" }`, }, testUtils.CreateDoc{ CollectionID: 0, - // "bae-a3cd6fac-13c0-5c8f-970b-0ce7abbb49a5" Doc: `{ "name": "New Publisher" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed Doc: `{ "name": "John Grisham" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-b6ea52b8-a5a5-5127-b9c0-5df4243457a3 Doc: `{ "name": "Cornelia Funke" }`, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Painted House", - "publisher_id": "bae-1f4cc394-08a8-5825-87b9-b02de2f25f7d", - "author_id": "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" - }`, + DocMap: map[string]any{ + "name": "Painted House", + "publisher_id": testUtils.NewDocIndex(0, 0), + "author_id": testUtils.NewDocIndex(1, 0), + }, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Theif Lord", - "publisher_id": "bae-a3cd6fac-13c0-5c8f-970b-0ce7abbb49a5", - "author_id": "bae-b6ea52b8-a5a5-5127-b9c0-5df4243457a3" - }`, + DocMap: map[string]any{ + "name": "Theif Lord", + "publisher_id": testUtils.NewDocIndex(0, 1), + "author_id": testUtils.NewDocIndex(1, 1), + }, }, testUtils.Request{ Request: `query { @@ -203,21 +195,21 @@ func TestQueryOneToOneMultiple_FromMixedPrimaryAndSecondary(t *testing.T) { }`, Results: []map[string]any{ { - "name": "Painted House", + "name": "Theif Lord", "publisher": map[string]any{ - "name": "Old Publisher", + "name": "New Publisher", }, "author": map[string]any{ - "name": "John Grisham", + "name": "Cornelia Funke", }, }, { - "name": "Theif Lord", + "name": "Painted House", "publisher": map[string]any{ - "name": "New Publisher", + "name": "Old Publisher", }, "author": map[string]any{ - "name": "Cornelia Funke", + "name": "John Grisham", }, }, }, @@ -253,47 +245,43 @@ func TestQueryOneToOneMultiple_FromSecondary(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // "bae-1f4cc394-08a8-5825-87b9-b02de2f25f7d" Doc: `{ "name": "Old Publisher" }`, }, testUtils.CreateDoc{ CollectionID: 0, - // "bae-a3cd6fac-13c0-5c8f-970b-0ce7abbb49a5" Doc: `{ "name": "New Publisher" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed Doc: `{ "name": "John Grisham" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-b6ea52b8-a5a5-5127-b9c0-5df4243457a3 Doc: `{ "name": "Cornelia Funke" }`, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Painted House", - "publisher_id": "bae-1f4cc394-08a8-5825-87b9-b02de2f25f7d", - "author_id": "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" - }`, + DocMap: map[string]any{ + "name": "Painted House", + "publisher_id": testUtils.NewDocIndex(0, 0), + "author_id": testUtils.NewDocIndex(1, 0), + }, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Theif Lord", - "publisher_id": "bae-a3cd6fac-13c0-5c8f-970b-0ce7abbb49a5", - "author_id": "bae-b6ea52b8-a5a5-5127-b9c0-5df4243457a3" - }`, + DocMap: map[string]any{ + "name": "Theif Lord", + "publisher_id": testUtils.NewDocIndex(0, 1), + "author_id": testUtils.NewDocIndex(1, 1), + }, }, testUtils.Request{ Request: `query { diff --git a/tests/integration/query/one_to_one_to_many/simple_test.go b/tests/integration/query/one_to_one_to_many/simple_test.go index 184d2eae1b..cee2d465cf 100644 --- a/tests/integration/query/one_to_one_to_many/simple_test.go +++ b/tests/integration/query/one_to_one_to_many/simple_test.go @@ -41,25 +41,23 @@ func TestQueryOneToOneToMany(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-5d900ac7-8bef-5565-9040-364c99601ae0 Doc: `{ "name": "Indicator1" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-14b75d37-f17d-58f0-89a8-43f2ec067122 - Doc: `{ - "name": "Observable1", - "indicator_id": "bae-5d900ac7-8bef-5565-9040-364c99601ae0" - }`, + DocMap: map[string]any{ + "name": "Observable1", + "indicator_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Observation1", - "observable_id": "bae-14b75d37-f17d-58f0-89a8-43f2ec067122" - }`, + DocMap: map[string]any{ + "name": "Observation1", + "observable_id": testUtils.NewDocIndex(1, 0), + }, }, testUtils.Request{ Request: `query { @@ -116,25 +114,23 @@ func TestQueryOneToOneToManyFromSecondaryOnOneToMany(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-5d900ac7-8bef-5565-9040-364c99601ae0 Doc: `{ "name": "Indicator1" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-14b75d37-f17d-58f0-89a8-43f2ec067122 - Doc: `{ - "name": "Observable1", - "indicator_id": "bae-5d900ac7-8bef-5565-9040-364c99601ae0" - }`, + DocMap: map[string]any{ + "name": "Observable1", + "indicator_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Observation1", - "observable_id": "bae-14b75d37-f17d-58f0-89a8-43f2ec067122" - }`, + DocMap: map[string]any{ + "name": "Observation1", + "observable_id": testUtils.NewDocIndex(1, 0), + }, }, testUtils.Request{ Request: `query { @@ -193,25 +189,23 @@ func TestQueryOneToOneToManyFromSecondaryOnOneToOne(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-5d900ac7-8bef-5565-9040-364c99601ae0 Doc: `{ "name": "Indicator1" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-14b75d37-f17d-58f0-89a8-43f2ec067122 - Doc: `{ - "name": "Observable1", - "indicator_id": "bae-5d900ac7-8bef-5565-9040-364c99601ae0" - }`, + DocMap: map[string]any{ + "name": "Observable1", + "indicator_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Observation1", - "observable_id": "bae-14b75d37-f17d-58f0-89a8-43f2ec067122" - }`, + DocMap: map[string]any{ + "name": "Observation1", + "observable_id": testUtils.NewDocIndex(1, 0), + }, }, testUtils.Request{ Request: `query { @@ -268,25 +262,23 @@ func TestQueryOneToOneToManyFromSecondary(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // bae-5d900ac7-8bef-5565-9040-364c99601ae0 Doc: `{ "name": "Indicator1" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // bae-14b75d37-f17d-58f0-89a8-43f2ec067122 - Doc: `{ - "name": "Observable1", - "indicator_id": "bae-5d900ac7-8bef-5565-9040-364c99601ae0" - }`, + DocMap: map[string]any{ + "name": "Observable1", + "indicator_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Observation1", - "observable_id": "bae-14b75d37-f17d-58f0-89a8-43f2ec067122" - }`, + DocMap: map[string]any{ + "name": "Observation1", + "observable_id": testUtils.NewDocIndex(1, 0), + }, }, testUtils.Request{ Request: `query { diff --git a/tests/integration/query/one_to_one_to_one/simple_test.go b/tests/integration/query/one_to_one_to_one/simple_test.go index 11f75e38f9..e788ddc732 100644 --- a/tests/integration/query/one_to_one_to_one/simple_test.go +++ b/tests/integration/query/one_to_one_to_one/simple_test.go @@ -41,47 +41,43 @@ func TestQueryOneToOneToOne(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // "bae-1f4cc394-08a8-5825-87b9-b02de2f25f7d" Doc: `{ "name": "Old Publisher" }`, }, testUtils.CreateDoc{ CollectionID: 0, - // "bae-a3cd6fac-13c0-5c8f-970b-0ce7abbb49a5" Doc: `{ "name": "New Publisher" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // "bae-a6cdabfc-17dd-5662-b213-c596ee4c3292" - Doc: `{ - "name": "Painted House", - "publisher_id": "bae-1f4cc394-08a8-5825-87b9-b02de2f25f7d" - }`, + DocMap: map[string]any{ + "name": "Painted House", + "publisher_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // "bae-bc198c5f-6238-5b50-8072-68dec9c7a16b" - Doc: `{ - "name": "Theif Lord", - "publisher_id": "bae-a3cd6fac-13c0-5c8f-970b-0ce7abbb49a5" - }`, + DocMap: map[string]any{ + "name": "Theif Lord", + "publisher_id": testUtils.NewDocIndex(0, 1), + }, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "John Grisham", - "published_id": "bae-a6cdabfc-17dd-5662-b213-c596ee4c3292" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "published_id": testUtils.NewDocIndex(1, 0), + }, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Cornelia Funke", - "published_id": "bae-bc198c5f-6238-5b50-8072-68dec9c7a16b" - }`, + DocMap: map[string]any{ + "name": "Cornelia Funke", + "published_id": testUtils.NewDocIndex(1, 1), + }, }, testUtils.Request{ Request: `query { @@ -97,20 +93,20 @@ func TestQueryOneToOneToOne(t *testing.T) { }`, Results: []map[string]any{ { - "name": "Old Publisher", + "name": "New Publisher", "printed": map[string]any{ - "name": "Painted House", + "name": "Theif Lord", "author": map[string]any{ - "name": "John Grisham", + "name": "Cornelia Funke", }, }, }, { - "name": "New Publisher", + "name": "Old Publisher", "printed": map[string]any{ - "name": "Theif Lord", + "name": "Painted House", "author": map[string]any{ - "name": "Cornelia Funke", + "name": "John Grisham", }, }, }, @@ -147,47 +143,43 @@ func TestQueryOneToOneToOneSecondaryThenPrimary(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // "bae-1f4cc394-08a8-5825-87b9-b02de2f25f7d" Doc: `{ "name": "Old Publisher" }`, }, testUtils.CreateDoc{ CollectionID: 0, - // "bae-a3cd6fac-13c0-5c8f-970b-0ce7abbb49a5" Doc: `{ "name": "New Publisher" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // "bae-a6cdabfc-17dd-5662-b213-c596ee4c3292" - Doc: `{ - "name": "Painted House", - "publisher_id": "bae-1f4cc394-08a8-5825-87b9-b02de2f25f7d" - }`, + DocMap: map[string]any{ + "name": "Painted House", + "publisher_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // "bae-bc198c5f-6238-5b50-8072-68dec9c7a16b" - Doc: `{ - "name": "Theif Lord", - "publisher_id": "bae-a3cd6fac-13c0-5c8f-970b-0ce7abbb49a5" - }`, + DocMap: map[string]any{ + "name": "Theif Lord", + "publisher_id": testUtils.NewDocIndex(0, 1), + }, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "John Grisham", - "published_id": "bae-a6cdabfc-17dd-5662-b213-c596ee4c3292" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "published_id": testUtils.NewDocIndex(1, 0), + }, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Cornelia Funke", - "published_id": "bae-bc198c5f-6238-5b50-8072-68dec9c7a16b" - }`, + DocMap: map[string]any{ + "name": "Cornelia Funke", + "published_id": testUtils.NewDocIndex(1, 1), + }, }, testUtils.Request{ Request: `query { @@ -203,20 +195,20 @@ func TestQueryOneToOneToOneSecondaryThenPrimary(t *testing.T) { }`, Results: []map[string]any{ { - "name": "Old Publisher", + "name": "New Publisher", "printed": map[string]any{ - "name": "Painted House", + "name": "Theif Lord", "author": map[string]any{ - "name": "John Grisham", + "name": "Cornelia Funke", }, }, }, { - "name": "New Publisher", + "name": "Old Publisher", "printed": map[string]any{ - "name": "Theif Lord", + "name": "Painted House", "author": map[string]any{ - "name": "Cornelia Funke", + "name": "John Grisham", }, }, }, @@ -253,47 +245,43 @@ func TestQueryOneToOneToOnePrimaryThenSecondary(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // "bae-1f4cc394-08a8-5825-87b9-b02de2f25f7d" Doc: `{ "name": "Old Publisher" }`, }, testUtils.CreateDoc{ CollectionID: 0, - // "bae-a3cd6fac-13c0-5c8f-970b-0ce7abbb49a5" Doc: `{ "name": "New Publisher" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // "bae-a6cdabfc-17dd-5662-b213-c596ee4c3292" - Doc: `{ - "name": "Painted House", - "publisher_id": "bae-1f4cc394-08a8-5825-87b9-b02de2f25f7d" - }`, + DocMap: map[string]any{ + "name": "Painted House", + "publisher_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // "bae-bc198c5f-6238-5b50-8072-68dec9c7a16b" - Doc: `{ - "name": "Theif Lord", - "publisher_id": "bae-a3cd6fac-13c0-5c8f-970b-0ce7abbb49a5" - }`, + DocMap: map[string]any{ + "name": "Theif Lord", + "publisher_id": testUtils.NewDocIndex(0, 1), + }, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "John Grisham", - "published_id": "bae-a6cdabfc-17dd-5662-b213-c596ee4c3292" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "published_id": testUtils.NewDocIndex(1, 0), + }, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Cornelia Funke", - "published_id": "bae-bc198c5f-6238-5b50-8072-68dec9c7a16b" - }`, + DocMap: map[string]any{ + "name": "Cornelia Funke", + "published_id": testUtils.NewDocIndex(1, 1), + }, }, testUtils.Request{ Request: `query { @@ -309,20 +297,20 @@ func TestQueryOneToOneToOnePrimaryThenSecondary(t *testing.T) { }`, Results: []map[string]any{ { - "name": "Old Publisher", + "name": "New Publisher", "printed": map[string]any{ - "name": "Painted House", + "name": "Theif Lord", "author": map[string]any{ - "name": "John Grisham", + "name": "Cornelia Funke", }, }, }, { - "name": "New Publisher", + "name": "Old Publisher", "printed": map[string]any{ - "name": "Theif Lord", + "name": "Painted House", "author": map[string]any{ - "name": "Cornelia Funke", + "name": "John Grisham", }, }, }, @@ -359,47 +347,43 @@ func TestQueryOneToOneToOneSecondary(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // "bae-1f4cc394-08a8-5825-87b9-b02de2f25f7d" Doc: `{ "name": "Old Publisher" }`, }, testUtils.CreateDoc{ CollectionID: 0, - // "bae-a3cd6fac-13c0-5c8f-970b-0ce7abbb49a5" Doc: `{ "name": "New Publisher" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // "bae-a6cdabfc-17dd-5662-b213-c596ee4c3292" - Doc: `{ - "name": "Painted House", - "publisher_id": "bae-1f4cc394-08a8-5825-87b9-b02de2f25f7d" - }`, + DocMap: map[string]any{ + "name": "Painted House", + "publisher_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // "bae-bc198c5f-6238-5b50-8072-68dec9c7a16b" - Doc: `{ - "name": "Theif Lord", - "publisher_id": "bae-a3cd6fac-13c0-5c8f-970b-0ce7abbb49a5" - }`, + DocMap: map[string]any{ + "name": "Theif Lord", + "publisher_id": testUtils.NewDocIndex(0, 1), + }, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "John Grisham", - "published_id": "bae-a6cdabfc-17dd-5662-b213-c596ee4c3292" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "published_id": testUtils.NewDocIndex(1, 0), + }, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Cornelia Funke", - "published_id": "bae-bc198c5f-6238-5b50-8072-68dec9c7a16b" - }`, + DocMap: map[string]any{ + "name": "Cornelia Funke", + "published_id": testUtils.NewDocIndex(1, 1), + }, }, testUtils.Request{ Request: `query { @@ -415,20 +399,20 @@ func TestQueryOneToOneToOneSecondary(t *testing.T) { }`, Results: []map[string]any{ { - "name": "Old Publisher", + "name": "New Publisher", "printed": map[string]any{ - "name": "Painted House", + "name": "Theif Lord", "author": map[string]any{ - "name": "John Grisham", + "name": "Cornelia Funke", }, }, }, { - "name": "New Publisher", + "name": "Old Publisher", "printed": map[string]any{ - "name": "Theif Lord", + "name": "Painted House", "author": map[string]any{ - "name": "Cornelia Funke", + "name": "John Grisham", }, }, }, diff --git a/tests/integration/query/one_to_one_to_one/with_order_test.go b/tests/integration/query/one_to_one_to_one/with_order_test.go index c5da7a19e7..899f890f74 100644 --- a/tests/integration/query/one_to_one_to_one/with_order_test.go +++ b/tests/integration/query/one_to_one_to_one/with_order_test.go @@ -41,47 +41,43 @@ func TestQueryOneToOneToOneWithNestedOrder(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 0, - // "bae-1f4cc394-08a8-5825-87b9-b02de2f25f7d" Doc: `{ "name": "Old Publisher" }`, }, testUtils.CreateDoc{ CollectionID: 0, - // "bae-a3cd6fac-13c0-5c8f-970b-0ce7abbb49a5" Doc: `{ "name": "New Publisher" }`, }, testUtils.CreateDoc{ CollectionID: 1, - // "bae-a6cdabfc-17dd-5662-b213-c596ee4c3292" - Doc: `{ - "name": "Painted House", - "publisher_id": "bae-1f4cc394-08a8-5825-87b9-b02de2f25f7d" - }`, + DocMap: map[string]any{ + "name": "Painted House", + "publisher_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - // "bae-bc198c5f-6238-5b50-8072-68dec9c7a16b" - Doc: `{ - "name": "Theif Lord", - "publisher_id": "bae-a3cd6fac-13c0-5c8f-970b-0ce7abbb49a5" - }`, + DocMap: map[string]any{ + "name": "Theif Lord", + "publisher_id": testUtils.NewDocIndex(0, 1), + }, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "John Grisham", - "published_id": "bae-a6cdabfc-17dd-5662-b213-c596ee4c3292" - }`, + DocMap: map[string]any{ + "name": "John Grisham", + "published_id": testUtils.NewDocIndex(1, 0), + }, }, testUtils.CreateDoc{ CollectionID: 2, - Doc: `{ - "name": "Cornelia Funke", - "published_id": "bae-bc198c5f-6238-5b50-8072-68dec9c7a16b" - }`, + DocMap: map[string]any{ + "name": "Cornelia Funke", + "published_id": testUtils.NewDocIndex(1, 1), + }, }, testUtils.Request{ Request: `query { diff --git a/tests/integration/query/one_to_two_many/simple_test.go b/tests/integration/query/one_to_two_many/simple_test.go index 6768c9e9b9..58a677d6fa 100644 --- a/tests/integration/query/one_to_two_many/simple_test.go +++ b/tests/integration/query/one_to_two_many/simple_test.go @@ -16,185 +16,403 @@ import ( testUtils "github.com/sourcenetwork/defradb/tests/integration" ) -func TestQueryOneToTwoManyWithNilUnnamedRelationship(t *testing.T) { - tests := []testUtils.RequestTestCase{ - { - Description: "One-to-many relation query from one side", - Request: `query { - Book { +func TestQueryOneToTwoManyWithNilUnnamedRelationship_FromOneSide(t *testing.T) { + test := testUtils.TestCase{ + Description: "One-to-many relation query from one side", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + rating: Float + author: Author @relation(name: "written_books") + reviewedBy: Author @relation(name: "reviewed_books") + } + + type Author { + name: String + age: Int + verified: Boolean + written: [Book] @relation(name: "written_books") + reviewed: [Book] @relation(name: "reviewed_books") + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ + "name": "John Grisham", + "age": 65, + "verified": true + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ + "name": "Cornelia Funke", + "age": 62, + "verified": false + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Painted House", + "rating": 4.9, + "author_id": testUtils.NewDocIndex(1, 0), + "reviewedBy_id": testUtils.NewDocIndex(1, 1), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": testUtils.NewDocIndex(1, 0), + "reviewedBy_id": testUtils.NewDocIndex(1, 1), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Theif Lord", + "rating": 4.8, + "author_id": testUtils.NewDocIndex(1, 1), + "reviewedBy_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.Request{ + Request: `query { + Book { + name + rating + author { name - rating - author { - name - } - reviewedBy { - name - age - } } - }`, - Docs: map[int][]string{ - //books - 0: { - `{ - "name": "Painted House", + reviewedBy { + name + age + } + } + }`, + Results: []map[string]any{ + { + "name": "Painted House", "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "reviewedBy_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - `{ - "name": "A Time for Mercy", + "author": map[string]any{ + "name": "John Grisham", + }, + "reviewedBy": map[string]any{ + "name": "Cornelia Funke", + "age": int64(62), + }, + }, + { + "name": "A Time for Mercy", "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "reviewedBy_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - `{ - "name": "Theif Lord", + "author": map[string]any{ + "name": "John Grisham", + }, + "reviewedBy": map[string]any{ + "name": "Cornelia Funke", + "age": int64(62), + }, + }, + { + "name": "Theif Lord", "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "reviewedBy_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, + "author": map[string]any{ + "name": "Cornelia Funke", + }, + "reviewedBy": map[string]any{ + "name": "John Grisham", + "age": int64(65), + }, + }, }, - //authors - 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false - }`, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryOneToTwoManyWithNilUnnamedRelationship_FromManySide(t *testing.T) { + test := testUtils.TestCase{ + Description: "One-to-many relation query from many side", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + rating: Float + author: Author @relation(name: "written_books") + reviewedBy: Author @relation(name: "reviewed_books") + } + + type Author { + name: String + age: Int + verified: Boolean + written: [Book] @relation(name: "written_books") + reviewed: [Book] @relation(name: "reviewed_books") + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ + "name": "John Grisham", + "age": 65, + "verified": true + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ + "name": "Cornelia Funke", + "age": 62, + "verified": false + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Painted House", + "rating": 4.9, + "author_id": testUtils.NewDocIndex(1, 0), + "reviewedBy_id": testUtils.NewDocIndex(1, 1), }, }, - Results: []map[string]any{ - { - "name": "Painted House", - "rating": 4.9, - "author": map[string]any{ - "name": "John Grisham", - }, - "reviewedBy": map[string]any{ - "name": "Cornelia Funke", - "age": int64(62), - }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": testUtils.NewDocIndex(1, 0), + "reviewedBy_id": testUtils.NewDocIndex(1, 1), }, - { - "name": "Theif Lord", - "rating": 4.8, - "author": map[string]any{ + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Theif Lord", + "rating": 4.8, + "author_id": testUtils.NewDocIndex(1, 1), + "reviewedBy_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.Request{ + Request: `query { + Author { + name + age + written { + name + } + reviewed { + name + rating + } + } + }`, + Results: []map[string]any{ + { "name": "Cornelia Funke", + "age": int64(62), + "reviewed": []map[string]any{ + { + "name": "Painted House", + "rating": 4.9, + }, + { + "name": "A Time for Mercy", + "rating": 4.5, + }, + }, + "written": []map[string]any{ + { + "name": "Theif Lord", + }, + }, }, - "reviewedBy": map[string]any{ + { "name": "John Grisham", "age": int64(65), - }, - }, - { - "name": "A Time for Mercy", - "rating": 4.5, - "author": map[string]any{ - "name": "John Grisham", - }, - "reviewedBy": map[string]any{ - "name": "Cornelia Funke", - "age": int64(62), + "reviewed": []map[string]any{ + { + "name": "Theif Lord", + "rating": 4.8, + }, + }, + "written": []map[string]any{ + { + "name": "Painted House", + }, + { + "name": "A Time for Mercy", + }, + }, }, }, }, }, - { - Description: "One-to-many relation query from many side", - Request: `query { - Author { - name - age - written { - name + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryOneToTwoManyWithNamedAndUnnamedRelationships(t *testing.T) { + test := testUtils.TestCase{ + Description: "One-to-many relation query from one side", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + rating: Float + price: Price + author: Author @relation(name: "written_books") + reviewedBy: Author @relation(name: "reviewed_books") } - reviewed { - name - rating + + type Author { + name: String + age: Int + verified: Boolean + written: [Book] @relation(name: "written_books") + reviewed: [Book] @relation(name: "reviewed_books") } - } - }`, - Docs: map[int][]string{ - //books - 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d - `{ - "name": "Painted House", - "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "reviewedBy_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - `{ - "name": "A Time for Mercy", - "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "reviewedBy_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" + + type Price { + currency: String + value: Float + books: [Book] + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 2, + Doc: `{ + "currency": "GBP", + "value": 12.99 }`, - `{ - "name": "Theif Lord", - "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "reviewedBy_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }, + testUtils.CreateDoc{ + CollectionID: 2, + Doc: `{ + "currency": "SEK", + "value": 129 }`, - }, - //authors - 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ "name": "Cornelia Funke", "age": 62, "verified": false }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Painted House", + "rating": 4.9, + "author_id": testUtils.NewDocIndex(1, 0), + "reviewedBy_id": testUtils.NewDocIndex(1, 1), + "price_id": testUtils.NewDocIndex(2, 0), }, }, - Results: []map[string]any{ - { - "name": "John Grisham", - "age": int64(65), - "reviewed": []map[string]any{ - { - "name": "Theif Lord", - "rating": 4.8, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": testUtils.NewDocIndex(1, 0), + "reviewedBy_id": testUtils.NewDocIndex(1, 1), + "price_id": testUtils.NewDocIndex(2, 1), + }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Theif Lord", + "rating": 4.8, + "author_id": testUtils.NewDocIndex(1, 1), + "reviewedBy_id": testUtils.NewDocIndex(1, 0), + "price_id": testUtils.NewDocIndex(2, 0), + }, + }, + testUtils.Request{ + Request: `query { + Book { + name + rating + author { + name + } + reviewedBy { + name + age + } + price { + currency + value + } + } + }`, + Results: []map[string]any{ + { + "name": "Painted House", + "rating": 4.9, + "author": map[string]any{ + "name": "John Grisham", }, - }, - "written": []map[string]any{ - { - "name": "Painted House", + "reviewedBy": map[string]any{ + "name": "Cornelia Funke", + "age": int64(62), }, - { - "name": "A Time for Mercy", + "price": map[string]any{ + "currency": "GBP", + "value": 12.99, }, }, - }, - { - "name": "Cornelia Funke", - "age": int64(62), - "reviewed": []map[string]any{ - { - "name": "Painted House", - "rating": 4.9, + { + "name": "A Time for Mercy", + "rating": 4.5, + "author": map[string]any{ + "name": "John Grisham", + }, + "reviewedBy": map[string]any{ + "name": "Cornelia Funke", + "age": int64(62), }, - { - "name": "A Time for Mercy", - "rating": 4.5, + "price": map[string]any{ + "currency": "SEK", + "value": float64(129), }, }, - "written": []map[string]any{ - { - "name": "Theif Lord", + { + "name": "Theif Lord", + "rating": 4.8, + "author": map[string]any{ + "name": "Cornelia Funke", + }, + "reviewedBy": map[string]any{ + "name": "John Grisham", + "age": int64(65), + }, + "price": map[string]any{ + "currency": "GBP", + "value": 12.99, }, }, }, @@ -202,247 +420,159 @@ func TestQueryOneToTwoManyWithNilUnnamedRelationship(t *testing.T) { }, } - for _, test := range tests { - executeTestCase(t, test) - } + testUtils.ExecuteTestCase(t, test) } -func TestQueryOneToTwoManyWithNamedAndUnnamedRelationships(t *testing.T) { - tests := []testUtils.RequestTestCase{ - { - Description: "One-to-many relation query from one side", - Request: `query { - Book { - name - rating - author { - name - } - reviewedBy { - name - age - } - price { - currency - value - } +func TestQueryOneToTwoManyWithNamedAndUnnamedRelationships_FromManySide(t *testing.T) { + test := testUtils.TestCase{ + Description: "One-to-many relation query from many side", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + rating: Float + price: Price + author: Author @relation(name: "written_books") + reviewedBy: Author @relation(name: "reviewed_books") } + + type Author { + name: String + age: Int + verified: Boolean + written: [Book] @relation(name: "written_books") + reviewed: [Book] @relation(name: "reviewed_books") + } + + type Price { + currency: String + value: Float + books: [Book] + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 2, + Doc: `{ + "currency": "GBP", + "value": 12.99 }`, - Docs: map[int][]string{ - //books - 0: { - `{ - "name": "Painted House", - "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "reviewedBy_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "price_id": "bae-fcc7a01d-6855-5e7a-abdd-261a46dcb9bd" - }`, - `{ - "name": "A Time for Mercy", - "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "reviewedBy_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "price_id": "bae-b4b58dab-7bc3-5a3a-a26b-63d9d555116d" - }`, - `{ - "name": "Theif Lord", - "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "reviewedBy_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "price_id": "bae-fcc7a01d-6855-5e7a-abdd-261a46dcb9bd" + }, + testUtils.CreateDoc{ + CollectionID: 2, + Doc: `{ + "currency": "SEK", + "value": 129 }`, - }, - //authors - 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ "name": "John Grisham", "age": 65, "verified": true }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ "name": "Cornelia Funke", "age": 62, "verified": false }`, - }, - 2: { - // bae-fcc7a01d-6855-5e7a-abdd-261a46dcb9bd - `{ - "currency": "GBP", - "value": 12.99 - }`, - // bae-b4b58dab-7bc3-5a3a-a26b-63d9d555116d - `{ - "currency": "SEK", - "value": 129 - }`, - }, }, - Results: []map[string]any{ - { - "name": "Theif Lord", - "rating": 4.8, - "author": map[string]any{ - "name": "Cornelia Funke", - }, - "reviewedBy": map[string]any{ - "name": "John Grisham", - "age": int64(65), - }, - "price": map[string]any{ - "currency": "GBP", - "value": 12.99, - }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Painted House", + "rating": 4.9, + "author_id": testUtils.NewDocIndex(1, 0), + "reviewedBy_id": testUtils.NewDocIndex(1, 1), + "price_id": testUtils.NewDocIndex(2, 0), }, - { - "name": "A Time for Mercy", - "rating": 4.5, - "author": map[string]any{ - "name": "John Grisham", - }, - "reviewedBy": map[string]any{ - "name": "Cornelia Funke", - "age": int64(62), - }, - "price": map[string]any{ - "currency": "SEK", - "value": float64(129), - }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": testUtils.NewDocIndex(1, 0), + "reviewedBy_id": testUtils.NewDocIndex(1, 1), + "price_id": testUtils.NewDocIndex(2, 1), }, - { - "name": "Painted House", - "rating": 4.9, - "author": map[string]any{ - "name": "John Grisham", - }, - "reviewedBy": map[string]any{ - "name": "Cornelia Funke", - "age": int64(62), - }, - "price": map[string]any{ - "currency": "GBP", - "value": 12.99, - }, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Theif Lord", + "rating": 4.8, + "author_id": testUtils.NewDocIndex(1, 1), + "reviewedBy_id": testUtils.NewDocIndex(1, 0), + "price_id": testUtils.NewDocIndex(2, 0), }, }, - }, - { - Description: "One-to-many relation query from many side", - Request: `query { - Author { - name - age - written { + testUtils.Request{ + Request: `query { + Author { name - price { - value + age + written { + name + price { + value + } + } + reviewed { + name + rating } } - reviewed { - name - rating - } - } - }`, - Docs: map[int][]string{ - //books - 0: { - `{ - "name": "Painted House", - "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "reviewedBy_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "price_id": "bae-fcc7a01d-6855-5e7a-abdd-261a46dcb9bd" - }`, - `{ - "name": "A Time for Mercy", - "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "reviewedBy_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "price_id": "bae-b4b58dab-7bc3-5a3a-a26b-63d9d555116d" - }`, - `{ - "name": "Theif Lord", - "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "reviewedBy_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "price_id": "bae-fcc7a01d-6855-5e7a-abdd-261a46dcb9bd" - }`, - }, - //authors - 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ + }`, + Results: []map[string]any{ + { "name": "Cornelia Funke", - "age": 62, - "verified": false - }`, - }, - 2: { - // bae-fcc7a01d-6855-5e7a-abdd-261a46dcb9bd - `{ - "currency": "GBP", - "value": 12.99 - }`, - // bae-b4b58dab-7bc3-5a3a-a26b-63d9d555116d - `{ - "currency": "SEK", - "value": 129 - }`, - }, - }, - Results: []map[string]any{ - { - "name": "John Grisham", - "age": int64(65), - "reviewed": []map[string]any{ - { - "name": "Theif Lord", - "rating": 4.8, - }, - }, - "written": []map[string]any{ - { - "name": "A Time for Mercy", - "price": map[string]any{ - "value": float64(129), + "age": int64(62), + "reviewed": []map[string]any{ + { + "name": "Painted House", + "rating": 4.9, + }, + { + "name": "A Time for Mercy", + "rating": 4.5, }, }, - { - "name": "Painted House", - "price": map[string]any{ - "value": 12.99, + "written": []map[string]any{ + { + "name": "Theif Lord", + "price": map[string]any{ + "value": 12.99, + }, }, }, }, - }, - { - "name": "Cornelia Funke", - "age": int64(62), - "reviewed": []map[string]any{ - { - "name": "A Time for Mercy", - "rating": 4.5, - }, - { - "name": "Painted House", - "rating": 4.9, + { + "name": "John Grisham", + "age": int64(65), + "reviewed": []map[string]any{ + { + "name": "Theif Lord", + "rating": 4.8, + }, }, - }, - "written": []map[string]any{ - { - "name": "Theif Lord", - "price": map[string]any{ - "value": 12.99, + "written": []map[string]any{ + { + "name": "Painted House", + "price": map[string]any{ + "value": 12.99, + }, + }, + { + "name": "A Time for Mercy", + "price": map[string]any{ + "value": float64(129), + }, }, }, }, @@ -451,7 +581,5 @@ func TestQueryOneToTwoManyWithNamedAndUnnamedRelationships(t *testing.T) { }, } - for _, test := range tests { - executeTestCase(t, test) - } + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/one_to_two_many/utils.go b/tests/integration/query/one_to_two_many/utils.go deleted file mode 100644 index 363a10efe1..0000000000 --- a/tests/integration/query/one_to_two_many/utils.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package one_to_two_many - -import ( - "testing" - - testUtils "github.com/sourcenetwork/defradb/tests/integration" -) - -var bookAuthorGQLSchema = (` - type Book { - name: String - rating: Float - price: Price - author: Author @relation(name: "written_books") - reviewedBy: Author @relation(name: "reviewed_books") - } - - type Author { - name: String - age: Int - verified: Boolean - written: [Book] @relation(name: "written_books") - reviewed: [Book] @relation(name: "reviewed_books") - } - - type Price { - currency: String - value: Float - books: [Book] - } -`) - -func executeTestCase(t *testing.T, test testUtils.RequestTestCase) { - testUtils.ExecuteRequestTestCase(t, bookAuthorGQLSchema, []string{"Book", "Author", "Price"}, test) -} diff --git a/tests/integration/query/one_to_two_many/with_order_test.go b/tests/integration/query/one_to_two_many/with_order_test.go index 63f948dfaa..d84aa4e869 100644 --- a/tests/integration/query/one_to_two_many/with_order_test.go +++ b/tests/integration/query/one_to_two_many/with_order_test.go @@ -17,92 +17,118 @@ import ( ) func TestQueryOneToTwoManyWithOrder(t *testing.T) { - tests := []testUtils.RequestTestCase{ - { - Description: "One-to-many relation query from one side, order in opposite directions on children", - Request: `query { - Author { - name - written (order: {rating: ASC}) { - name - } - reviewed (order: {rating: DESC}){ - name - rating - } - } - }`, - Docs: map[int][]string{ - //books - 0: { - `{ - "name": "Painted House", - "rating": 4.9, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "reviewedBy_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - `{ - "name": "A Time for Mercy", - "rating": 4.5, - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "reviewedBy_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "Theif Lord", - "rating": 4.8, - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", - "reviewedBy_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, + test := testUtils.TestCase{ + Description: "One-to-many relation query from one side, order in opposite directions on children", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + rating: Float + author: Author @relation(name: "written_books") + reviewedBy: Author @relation(name: "reviewed_books") + } + + type Author { + name: String + age: Int + verified: Boolean + written: [Book] @relation(name: "written_books") + reviewed: [Book] @relation(name: "reviewed_books") + } + `, + }, + + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ + "name": "John Grisham", + "age": 65, + "verified": true + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ + "name": "Cornelia Funke", + "age": 62, + "verified": false + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Painted House", + "rating": 4.9, + "author_id": testUtils.NewDocIndex(1, 0), + "reviewedBy_id": testUtils.NewDocIndex(1, 1), }, - //authors - 1: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false - }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": testUtils.NewDocIndex(1, 0), + "reviewedBy_id": testUtils.NewDocIndex(1, 0), }, }, - Results: []map[string]any{ - { - "name": "John Grisham", - "reviewed": []map[string]any{ - { - "name": "Theif Lord", - "rating": 4.8, - }, - { - "name": "A Time for Mercy", - "rating": 4.5, - }, - }, - "written": []map[string]any{ - { - "name": "A Time for Mercy", + testUtils.CreateDoc{ + CollectionID: 0, + DocMap: map[string]any{ + "name": "Theif Lord", + "rating": 4.8, + "author_id": testUtils.NewDocIndex(1, 1), + "reviewedBy_id": testUtils.NewDocIndex(1, 0), + }, + }, + testUtils.Request{ + Request: `query { + Author { + name + written (order: {rating: ASC}) { + name + } + reviewed (order: {rating: DESC}){ + name + rating + } + } + }`, + Results: []map[string]any{ + { + "name": "Cornelia Funke", + "reviewed": []map[string]any{ + { + "name": "Painted House", + "rating": 4.9, + }, }, - { - "name": "Painted House", + "written": []map[string]any{ + { + "name": "Theif Lord", + }, }, }, - }, - { - "name": "Cornelia Funke", - "reviewed": []map[string]any{ - { - "name": "Painted House", - "rating": 4.9, + { + "name": "John Grisham", + "reviewed": []map[string]any{ + { + "name": "Theif Lord", + "rating": 4.8, + }, + { + "name": "A Time for Mercy", + "rating": 4.5, + }, }, - }, - "written": []map[string]any{ - { - "name": "Theif Lord", + "written": []map[string]any{ + { + "name": "A Time for Mercy", + }, + { + "name": "Painted House", + }, }, }, }, @@ -110,7 +136,5 @@ func TestQueryOneToTwoManyWithOrder(t *testing.T) { }, } - for _, test := range tests { - executeTestCase(t, test) - } + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/simple/simple_test.go b/tests/integration/query/simple/simple_test.go index abdc0cd1f3..5872998e81 100644 --- a/tests/integration/query/simple/simple_test.go +++ b/tests/integration/query/simple/simple_test.go @@ -36,7 +36,7 @@ func TestQuerySimple(t *testing.T) { }, Results: []map[string]any{ { - "_docID": "bae-52b9170d-b77a-5887-b877-cbdbb99b009f", + "_docID": "bae-d4303725-7db9-53d2-b324-f3ee44020e52", "Name": "John", "Age": int64(21), }, diff --git a/tests/integration/query/simple/with_cid_doc_id_test.go b/tests/integration/query/simple/with_cid_doc_id_test.go index 28bd453faf..97791ce993 100644 --- a/tests/integration/query/simple/with_cid_doc_id_test.go +++ b/tests/integration/query/simple/with_cid_doc_id_test.go @@ -93,8 +93,8 @@ func TestQuerySimpleWithCidAndDocID(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafyreifvrmwmlwtglxe3afki36spu6d5qs6vvza57kxs4giyi53r5vbbnu", - docID: "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" + cid: "bafyreicceacb554vtciciumodqmz6vmnfvr6uod2hfhnwujmfqx5pgq3fi", + docID: "bae-6845cfdf-cb0f-56a3-be3a-b5a67be5fbdc" ) { name } @@ -135,8 +135,8 @@ func TestQuerySimpleWithUpdateAndFirstCidAndDocID(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafyreifvrmwmlwtglxe3afki36spu6d5qs6vvza57kxs4giyi53r5vbbnu", - docID: "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" + cid: "bafyreicceacb554vtciciumodqmz6vmnfvr6uod2hfhnwujmfqx5pgq3fi", + docID: "bae-6845cfdf-cb0f-56a3-be3a-b5a67be5fbdc" ) { name } @@ -177,8 +177,8 @@ func TestQuerySimpleWithUpdateAndLastCidAndDocID(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafyreihyutx64yvxcfeglc2kax3l4kxrp5mae4p7txrxyiszqybs54h3zq", - docID: "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" + cid: "bafyreic3z3mjat7i7nm52jwprew7f7dimyob7uzgcuoypmdqekrhknnwba", + docID: "bae-6845cfdf-cb0f-56a3-be3a-b5a67be5fbdc" ) { name } @@ -224,8 +224,8 @@ func TestQuerySimpleWithUpdateAndMiddleCidAndDocID(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafyreihyutx64yvxcfeglc2kax3l4kxrp5mae4p7txrxyiszqybs54h3zq", - docID: "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" + cid: "bafyreic3z3mjat7i7nm52jwprew7f7dimyob7uzgcuoypmdqekrhknnwba", + docID: "bae-6845cfdf-cb0f-56a3-be3a-b5a67be5fbdc" ) { name } @@ -266,8 +266,8 @@ func TestQuerySimpleWithUpdateAndFirstCidAndDocIDAndSchemaVersion(t *testing.T) testUtils.Request{ Request: `query { Users ( - cid: "bafyreifvrmwmlwtglxe3afki36spu6d5qs6vvza57kxs4giyi53r5vbbnu", - docID: "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" + cid: "bafyreicceacb554vtciciumodqmz6vmnfvr6uod2hfhnwujmfqx5pgq3fi", + docID: "bae-6845cfdf-cb0f-56a3-be3a-b5a67be5fbdc" ) { name _version { @@ -324,8 +324,8 @@ func TestCidAndDocIDQuery_ContainsPNCounterWithIntKind_NoError(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafyreifgpewgsde3mpq5upokcngfcehdujokqultxlez3cnskhxfde3dw4", - docID: "bae-a688789e-d8a6-57a7-be09-22e005ab79e0" + cid: "bafyreicsx7flfz4b6iwfmwgrnrnd2klxrbg6yojuffh4ia3lrrqcph5q7a", + docID: "bae-d8cb53d4-ac5a-5c55-8306-64df633d400d" ) { name points @@ -376,8 +376,8 @@ func TestCidAndDocIDQuery_ContainsPNCounterWithFloatKind_NoError(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafyreic7ds5alv2e7hxdbz3st5tj3vt4rvwydz6sap4wraudl3kzy6y27e", - docID: "bae-fa6a97e9-e0e9-5826-8a8c-57775d35e07c" + cid: "bafyreidwtowbnmdfshq3dptfdggzswtdftyh5374ohfcmqki4ad2wd4m64", + docID: "bae-d420ebcd-023a-5800-ae2e-8ea89442318e" ) { name points @@ -423,8 +423,8 @@ func TestCidAndDocIDQuery_ContainsPCounterWithIntKind_NoError(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafyreih2vkwfws7vksed465gq736gi5nfxmajlxxemc64nkcendkddtqym", - docID: "bae-a688789e-d8a6-57a7-be09-22e005ab79e0" + cid: "bafyreifngcu76fxe3dtjee556hwymfjgsm3sqhxned4cykit5lcsyy3ope", + docID: "bae-d8cb53d4-ac5a-5c55-8306-64df633d400d" ) { name points @@ -470,8 +470,8 @@ func TestCidAndDocIDQuery_ContainsPCounterWithFloatKind_NoError(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafyreihafx27q435dg6mpbfvdsav47oeq65zkeie3eykjbvuhlwtk5ttwm", - docID: "bae-fa6a97e9-e0e9-5826-8a8c-57775d35e07c" + cid: "bafyreigih3wl4ycq5lktczydbecvcvlmdsy5jzarx2l6hcqdcrqkoranny", + docID: "bae-d420ebcd-023a-5800-ae2e-8ea89442318e" ) { name points diff --git a/tests/integration/query/simple/with_deleted_field_test.go b/tests/integration/query/simple/with_deleted_field_test.go index 182cce3280..cc302e29b7 100644 --- a/tests/integration/query/simple/with_deleted_field_test.go +++ b/tests/integration/query/simple/with_deleted_field_test.go @@ -52,11 +52,11 @@ func TestQuerySimple_WithDeletedField(t *testing.T) { Results: []map[string]any{ { "_deleted": true, - "name": "Andy", + "name": "John", }, { "_deleted": true, - "name": "John", + "name": "Andy", }, }, }, diff --git a/tests/integration/query/simple/with_doc_id_filter_test.go b/tests/integration/query/simple/with_doc_id_filter_test.go index 5477665e1c..7c1e886787 100644 --- a/tests/integration/query/simple/with_doc_id_filter_test.go +++ b/tests/integration/query/simple/with_doc_id_filter_test.go @@ -20,7 +20,7 @@ func TestQuerySimpleWithDocIDFilterBlock(t *testing.T) { test := testUtils.RequestTestCase{ Description: "Simple query with basic filter (docID by filter block)", Request: `query { - Users(filter: {_docID: {_eq: "bae-52b9170d-b77a-5887-b877-cbdbb99b009f"}}) { + Users(filter: {_docID: {_eq: "bae-d4303725-7db9-53d2-b324-f3ee44020e52"}}) { Name Age } diff --git a/tests/integration/query/simple/with_doc_id_test.go b/tests/integration/query/simple/with_doc_id_test.go index 6067baea38..7d5e7c0206 100644 --- a/tests/integration/query/simple/with_doc_id_test.go +++ b/tests/integration/query/simple/with_doc_id_test.go @@ -21,7 +21,7 @@ func TestQuerySimpleWithDocIDFilter(t *testing.T) { { Description: "Simple query with basic filter (by docID arg)", Request: `query { - Users(docID: "bae-52b9170d-b77a-5887-b877-cbdbb99b009f") { + Users(docID: "bae-d4303725-7db9-53d2-b324-f3ee44020e52") { Name Age } @@ -62,7 +62,7 @@ func TestQuerySimpleWithDocIDFilter(t *testing.T) { { Description: "Simple query with basic filter (by docID arg), partial results", Request: `query { - Users(docID: "bae-52b9170d-b77a-5887-b877-cbdbb99b009f") { + Users(docID: "bae-d4303725-7db9-53d2-b324-f3ee44020e52") { Name Age } diff --git a/tests/integration/query/simple/with_doc_ids_test.go b/tests/integration/query/simple/with_doc_ids_test.go index c28fb5d075..1dc61bb610 100644 --- a/tests/integration/query/simple/with_doc_ids_test.go +++ b/tests/integration/query/simple/with_doc_ids_test.go @@ -21,7 +21,7 @@ func TestQuerySimpleWithDocIDsFilter(t *testing.T) { { Description: "Simple query with basic filter (single ID by docIDs arg)", Request: `query { - Users(docIDs: ["bae-52b9170d-b77a-5887-b877-cbdbb99b009f"]) { + Users(docIDs: ["bae-d4303725-7db9-53d2-b324-f3ee44020e52"]) { Name Age } @@ -62,7 +62,7 @@ func TestQuerySimpleWithDocIDsFilter(t *testing.T) { { Description: "Simple query with basic filter (duplicate ID by docIDs arg), partial results", Request: `query { - Users(docIDs: ["bae-52b9170d-b77a-5887-b877-cbdbb99b009f", "bae-52b9170d-b77a-5887-b877-cbdbb99b009f"]) { + Users(docIDs: ["bae-d4303725-7db9-53d2-b324-f3ee44020e52", "bae-d4303725-7db9-53d2-b324-f3ee44020e52"]) { Name Age } @@ -89,7 +89,7 @@ func TestQuerySimpleWithDocIDsFilter(t *testing.T) { { Description: "Simple query with basic filter (multiple ID by docIDs arg), partial results", Request: `query { - Users(docIDs: ["bae-52b9170d-b77a-5887-b877-cbdbb99b009f", "bae-1378ab62-e064-5af4-9ea6-49941c8d8f94"]) { + Users(docIDs: ["bae-d4303725-7db9-53d2-b324-f3ee44020e52", "bae-428c6d76-3491-520b-ad1f-a218f4dad787"]) { Name Age } diff --git a/tests/integration/query/simple/with_filter/with_and_test.go b/tests/integration/query/simple/with_filter/with_and_test.go index 6ec50be686..bbcbeb38b9 100644 --- a/tests/integration/query/simple/with_filter/with_and_test.go +++ b/tests/integration/query/simple/with_filter/with_and_test.go @@ -46,14 +46,14 @@ func TestQuerySimpleWithIntGreaterThanAndIntLessThanFilter(t *testing.T) { }, }, Results: []map[string]any{ - { - "Name": "Bob", - "Age": int64(32), - }, { "Name": "John", "Age": int64(21), }, + { + "Name": "Bob", + "Age": int64(32), + }, }, } diff --git a/tests/integration/query/simple/with_filter/with_ge_float_test.go b/tests/integration/query/simple/with_filter/with_ge_float_test.go index f35726bfa2..3983f08718 100644 --- a/tests/integration/query/simple/with_filter/with_ge_float_test.go +++ b/tests/integration/query/simple/with_filter/with_ge_float_test.go @@ -127,10 +127,10 @@ func TestQuerySimpleWithHeightMGEFilterBlockWithNilValue(t *testing.T) { }, Results: []map[string]any{ { - "Name": "Bob", + "Name": "John", }, { - "Name": "John", + "Name": "Bob", }, }, } diff --git a/tests/integration/query/simple/with_filter/with_gt_float_test.go b/tests/integration/query/simple/with_filter/with_gt_float_test.go index c0152b976e..fea3cc968a 100644 --- a/tests/integration/query/simple/with_filter/with_gt_float_test.go +++ b/tests/integration/query/simple/with_filter/with_gt_float_test.go @@ -85,10 +85,10 @@ func TestQuerySimpleWithFloatGreaterThanFilterBlock(t *testing.T) { }, Results: []map[string]any{ { - "Name": "Bob", + "Name": "John", }, { - "Name": "John", + "Name": "Bob", }, }, }, diff --git a/tests/integration/query/simple/with_filter/with_gt_int_test.go b/tests/integration/query/simple/with_filter/with_gt_int_test.go index ecafd44ee4..d832114cac 100644 --- a/tests/integration/query/simple/with_filter/with_gt_int_test.go +++ b/tests/integration/query/simple/with_filter/with_gt_int_test.go @@ -88,14 +88,14 @@ func TestQuerySimpleWithIntGreaterThanFilterBlock(t *testing.T) { }, }, Results: []map[string]any{ - { - "Name": "Bob", - "Age": int64(32), - }, { "Name": "John", "Age": int64(21), }, + { + "Name": "Bob", + "Age": int64(32), + }, }, }, } diff --git a/tests/integration/query/simple/with_filter/with_in_test.go b/tests/integration/query/simple/with_filter/with_in_test.go index 7e2aa6df82..75867469d9 100644 --- a/tests/integration/query/simple/with_filter/with_in_test.go +++ b/tests/integration/query/simple/with_filter/with_in_test.go @@ -46,14 +46,14 @@ func TestQuerySimpleWithIntInFilter(t *testing.T) { }, }, Results: []map[string]any{ - { - "Name": "Alice", - "Age": int64(19), - }, { "Name": "Carlo", "Age": int64(55), }, + { + "Name": "Alice", + "Age": int64(19), + }, }, } @@ -90,10 +90,10 @@ func TestQuerySimpleWithIntInFilterOnFloat(t *testing.T) { }, Results: []map[string]any{ { - "Name": "John", + "Name": "Carlo", }, { - "Name": "Carlo", + "Name": "John", }, }, } @@ -138,14 +138,14 @@ func TestQuerySimpleWithIntInFilterWithNullValue(t *testing.T) { "Name": "Fred", "Age": nil, }, - { - "Name": "Alice", - "Age": int64(19), - }, { "Name": "Carlo", "Age": int64(55), }, + { + "Name": "Alice", + "Age": int64(19), + }, }, } diff --git a/tests/integration/query/simple/with_filter/with_like_string_test.go b/tests/integration/query/simple/with_filter/with_like_string_test.go index 95ebabc5de..ff6ddf1fa2 100644 --- a/tests/integration/query/simple/with_filter/with_like_string_test.go +++ b/tests/integration/query/simple/with_filter/with_like_string_test.go @@ -278,10 +278,10 @@ func TestQuerySimpleWithLikeStringContainsFilterBlockContainsStringMuplitpleResu }, Results: []map[string]any{ { - "Name": "Daenerys Stormborn of House Targaryen, the First of Her Name", + "Name": "Viserys I Targaryen, King of the Andals", }, { - "Name": "Viserys I Targaryen, King of the Andals", + "Name": "Daenerys Stormborn of House Targaryen, the First of Her Name", }, }, } diff --git a/tests/integration/query/simple/with_filter/with_ne_bool_test.go b/tests/integration/query/simple/with_filter/with_ne_bool_test.go index b738cefc19..4b68d73592 100644 --- a/tests/integration/query/simple/with_filter/with_ne_bool_test.go +++ b/tests/integration/query/simple/with_filter/with_ne_bool_test.go @@ -77,10 +77,10 @@ func TestQuerySimpleWithBoolNotEqualsNilFilterBlock(t *testing.T) { }, Results: []map[string]any{ { - "Name": "Fred", + "Name": "John", }, { - "Name": "John", + "Name": "Fred", }, }, } @@ -113,10 +113,10 @@ func TestQuerySimpleWithBoolNotEqualsFalseFilterBlock(t *testing.T) { }, Results: []map[string]any{ { - "Name": "Bob", + "Name": "John", }, { - "Name": "John", + "Name": "Bob", }, }, } diff --git a/tests/integration/query/simple/with_filter/with_ne_datetime_test.go b/tests/integration/query/simple/with_filter/with_ne_datetime_test.go index 0179684732..11acb8629c 100644 --- a/tests/integration/query/simple/with_filter/with_ne_datetime_test.go +++ b/tests/integration/query/simple/with_filter/with_ne_datetime_test.go @@ -76,10 +76,10 @@ func TestQuerySimpleWithDateTimeNotEqualsNilFilterBlock(t *testing.T) { }, Results: []map[string]any{ { - "Name": "John", + "Name": "Bob", }, { - "Name": "Bob", + "Name": "John", }, }, } diff --git a/tests/integration/query/simple/with_filter/with_ne_int_test.go b/tests/integration/query/simple/with_filter/with_ne_int_test.go index 92282990ee..7f61063eed 100644 --- a/tests/integration/query/simple/with_filter/with_ne_int_test.go +++ b/tests/integration/query/simple/with_filter/with_ne_int_test.go @@ -71,10 +71,10 @@ func TestQuerySimpleWithIntNotEqualsNilFilterBlock(t *testing.T) { }, Results: []map[string]any{ { - "Name": "Bob", + "Name": "John", }, { - "Name": "John", + "Name": "Bob", }, }, } diff --git a/tests/integration/query/simple/with_filter/with_ne_string_test.go b/tests/integration/query/simple/with_filter/with_ne_string_test.go index 4142eac647..b052e89e89 100644 --- a/tests/integration/query/simple/with_filter/with_ne_string_test.go +++ b/tests/integration/query/simple/with_filter/with_ne_string_test.go @@ -71,10 +71,10 @@ func TestQuerySimpleWithStringNotEqualsNilFilterBlock(t *testing.T) { }, Results: []map[string]any{ { - "Age": int64(32), + "Age": int64(21), }, { - "Age": int64(21), + "Age": int64(32), }, }, } diff --git a/tests/integration/query/simple/with_filter/with_nin_test.go b/tests/integration/query/simple/with_filter/with_nin_test.go index 77ecf57ddf..9993847c0e 100644 --- a/tests/integration/query/simple/with_filter/with_nin_test.go +++ b/tests/integration/query/simple/with_filter/with_nin_test.go @@ -49,10 +49,10 @@ func TestQuerySimpleWithNotInFilter(t *testing.T) { }, Results: []map[string]any{ { - "Name": "Bob", + "Name": "John", }, { - "Name": "John", + "Name": "Bob", }, }, } diff --git a/tests/integration/query/simple/with_filter/with_nlike_string_test.go b/tests/integration/query/simple/with_filter/with_nlike_string_test.go index a7ca84163d..14526d39e2 100644 --- a/tests/integration/query/simple/with_filter/with_nlike_string_test.go +++ b/tests/integration/query/simple/with_filter/with_nlike_string_test.go @@ -364,10 +364,10 @@ func TestQuerySimpleWithNotLikeStringContainsFilterBlockHasEither(t *testing.T) }, Results: []map[string]any{ { - "Name": "Daenerys Stormborn of House Targaryen, the First of Her Name", + "Name": "Viserys I Targaryen, King of the Andals", }, { - "Name": "Viserys I Targaryen, King of the Andals", + "Name": "Daenerys Stormborn of House Targaryen, the First of Her Name", }, }, } diff --git a/tests/integration/query/simple/with_filter/with_not_test.go b/tests/integration/query/simple/with_filter/with_not_test.go index 2ce454a358..14fef0d155 100644 --- a/tests/integration/query/simple/with_filter/with_not_test.go +++ b/tests/integration/query/simple/with_filter/with_not_test.go @@ -46,6 +46,10 @@ func TestQuerySimple_WithNotEqualToXFilter_NoError(t *testing.T) { }, }, Results: []map[string]any{ + { + "Name": "John", + "Age": int64(21), + }, { "Name": "Bob", "Age": int64(32), @@ -54,10 +58,6 @@ func TestQuerySimple_WithNotEqualToXFilter_NoError(t *testing.T) { "Name": "Alice", "Age": int64(19), }, - { - "Name": "John", - "Age": int64(21), - }, }, } @@ -134,14 +134,14 @@ func TestQuerySimple_WithNotEqualToXorYFilter_NoError(t *testing.T) { }, }, Results: []map[string]any{ - { - "Name": "Bob", - "Age": int64(32), - }, { "Name": "John", "Age": int64(21), }, + { + "Name": "Bob", + "Age": int64(32), + }, }, } @@ -217,22 +217,22 @@ func TestQuerySimple_WithNotEqualToXAndNotYFilter_NoError(t *testing.T) { }, }, Results: []map[string]any{ - { - "Name": "Bob", - "Age": int64(32), - }, - { - "Name": "Alice", - "Age": int64(19), - }, { "Name": "John", "Age": int64(21), }, + { + "Name": "Bob", + "Age": int64(32), + }, { "Name": "Carlo", "Age": int64(55), }, + { + "Name": "Alice", + "Age": int64(19), + }, }, } diff --git a/tests/integration/query/simple/with_filter/with_or_test.go b/tests/integration/query/simple/with_filter/with_or_test.go index 1ff1f91ce6..2c1ea68e94 100644 --- a/tests/integration/query/simple/with_filter/with_or_test.go +++ b/tests/integration/query/simple/with_filter/with_or_test.go @@ -46,14 +46,14 @@ func TestQuerySimpleWithIntEqualToXOrYFilter(t *testing.T) { }, }, Results: []map[string]any{ - { - "Name": "Alice", - "Age": int64(19), - }, { "Name": "Carlo", "Age": int64(55), }, + { + "Name": "Alice", + "Age": int64(19), + }, }, } diff --git a/tests/integration/query/simple/with_group_average_filter_test.go b/tests/integration/query/simple/with_group_average_filter_test.go index fe33e9f4ef..fa68f2d0e9 100644 --- a/tests/integration/query/simple/with_group_average_filter_test.go +++ b/tests/integration/query/simple/with_group_average_filter_test.go @@ -42,14 +42,14 @@ func TestQuerySimpleWithGroupByStringWithoutRenderedGroupAndChildAverageWithFilt }, }, Results: []map[string]any{ - { - "Name": "Alice", - "_avg": float64(0), - }, { "Name": "John", "_avg": float64(33), }, + { + "Name": "Alice", + "_avg": float64(0), + }, }, } @@ -86,23 +86,23 @@ func TestQuerySimpleWithGroupByStringWithRenderedGroupAndChildAverageWithFilter( }, Results: []map[string]any{ { - "Name": "Alice", - "_avg": float64(0), + "Name": "John", + "_avg": float64(33), "_group": []map[string]any{ { - "Age": int64(19), + "Age": int64(34), + }, + { + "Age": int64(32), }, }, }, { - "Name": "John", - "_avg": float64(33), + "Name": "Alice", + "_avg": float64(0), "_group": []map[string]any{ { - "Age": int64(32), - }, - { - "Age": int64(34), + "Age": int64(19), }, }, }, @@ -200,11 +200,6 @@ func TestQuerySimpleWithGroupByStringWithRenderedGroupWithFilterAndChildAverageW }, }, Results: []map[string]any{ - { - "Name": "Alice", - "_avg": float64(0), - "_group": []map[string]any{}, - }, { "Name": "John", "_avg": float64(34), @@ -214,6 +209,11 @@ func TestQuerySimpleWithGroupByStringWithRenderedGroupWithFilterAndChildAverageW }, }, }, + { + "Name": "Alice", + "_avg": float64(0), + "_group": []map[string]any{}, + }, }, } @@ -302,20 +302,20 @@ func TestQuerySimpleWithGroupByStringWithRenderedGroupWithFilterAndChildAverageW }, Results: []map[string]any{ { - "Name": "Alice", - "_avg": float64(0), + "Name": "John", + "_avg": float64(34), "_group": []map[string]any{ { - "Age": int64(19), + "Age": int64(32), }, }, }, { - "Name": "John", - "_avg": float64(34), + "Name": "Alice", + "_avg": float64(0), "_group": []map[string]any{ { - "Age": int64(32), + "Age": int64(19), }, }, }, @@ -352,16 +352,16 @@ func TestQuerySimpleWithGroupByStringWithoutRenderedGroupAndChildAveragesWithDif }, }, Results: []map[string]any{ - { - "Name": "Alice", - "A1": float64(0), - "A2": float64(19), - }, { "Name": "John", "A1": float64(33), "A2": float64(0), }, + { + "Name": "Alice", + "A1": float64(0), + "A2": float64(19), + }, }, } @@ -402,14 +402,14 @@ func TestQuerySimpleWithGroupByStringWithoutRenderedGroupAndChildAverageWithFilt }, }, Results: []map[string]any{ - { - "Name": "Alice", - "_avg": float64(19), - }, { "Name": "John", "_avg": float64(31), }, + { + "Name": "Alice", + "_avg": float64(19), + }, }, } diff --git a/tests/integration/query/simple/with_group_average_sum_test.go b/tests/integration/query/simple/with_group_average_sum_test.go index 04cc852bea..4a1851df67 100644 --- a/tests/integration/query/simple/with_group_average_sum_test.go +++ b/tests/integration/query/simple/with_group_average_sum_test.go @@ -74,22 +74,22 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanAndSumOfCountOfInt(t * }, }, { - "Name": "Alice", - "_sum": float64(19), + "Name": "Carlo", + "_sum": float64(55), "_group": []map[string]any{ { - "Verified": false, - "_avg": float64(19), + "Verified": true, + "_avg": float64(55), }, }, }, { - "Name": "Carlo", - "_sum": float64(55), + "Name": "Alice", + "_sum": float64(19), "_group": []map[string]any{ { - "Verified": true, - "_avg": float64(55), + "Verified": false, + "_avg": float64(19), }, }, }, diff --git a/tests/integration/query/simple/with_group_average_test.go b/tests/integration/query/simple/with_group_average_test.go index cebf392c26..91aacd3c8a 100644 --- a/tests/integration/query/simple/with_group_average_test.go +++ b/tests/integration/query/simple/with_group_average_test.go @@ -121,14 +121,14 @@ func TestQuerySimpleWithGroupByStringWithoutRenderedGroupAndChildNilAverage(t *t }, }, Results: []map[string]any{ - { - "Name": "Alice", - "_avg": float64(19), - }, { "Name": "John", "_avg": float64(32), }, + { + "Name": "Alice", + "_avg": float64(19), + }, }, } @@ -193,22 +193,22 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanAndAverageOfAverageOfI }, }, { - "Name": "Alice", - "_avg": float64(19), + "Name": "Carlo", + "_avg": float64(55), "_group": []map[string]any{ { - "Verified": false, - "_avg": float64(19), + "Verified": true, + "_avg": float64(55), }, }, }, { - "Name": "Carlo", - "_avg": float64(55), + "Name": "Alice", + "_avg": float64(19), "_group": []map[string]any{ { - "Verified": true, - "_avg": float64(55), + "Verified": false, + "_avg": float64(19), }, }, }, @@ -341,26 +341,26 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanAndAverageOfAverageOfF }, Results: []map[string]any{ { - "Name": "John", - "_avg": float64(1.9675000000000002), + "Name": "Alice", + "_avg": float64(2.04), "_group": []map[string]any{ { "Verified": false, - "_avg": float64(2.22), - }, - { - "Verified": true, - "_avg": float64(1.715), + "_avg": float64(2.04), }, }, }, { - "Name": "Alice", - "_avg": float64(2.04), + "Name": "John", + "_avg": float64(1.9675000000000002), "_group": []map[string]any{ + { + "Verified": true, + "_avg": float64(1.715), + }, { "Verified": false, - "_avg": float64(2.04), + "_avg": float64(2.22), }, }, }, @@ -433,30 +433,16 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanAndAverageOfAverageOfA }, Results: []map[string]any{ { - "Name": "John", - "_avg": float64(1.9675000000000002), + "Name": "Carlo", + "_avg": float64(1.74), "_group": []map[string]any{ - { - "Verified": false, - "_avg": float64(2.22), - "_group": []map[string]any{ - { - "Age": int64(34), - "_avg": float64(2.22), - }, - }, - }, { "Verified": true, - "_avg": float64(1.715), + "_avg": float64(1.74), "_group": []map[string]any{ { - "Age": int64(32), - "_avg": float64(1.61), - }, - { - "Age": int64(25), - "_avg": float64(1.82), + "Age": int64(55), + "_avg": float64(1.74), }, }, }, @@ -479,16 +465,30 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanAndAverageOfAverageOfA }, }, { - "Name": "Carlo", - "_avg": float64(1.74), + "Name": "John", + "_avg": float64(1.9675000000000002), "_group": []map[string]any{ { "Verified": true, - "_avg": float64(1.74), + "_avg": float64(1.715), "_group": []map[string]any{ { - "Age": int64(55), - "_avg": float64(1.74), + "Age": int64(32), + "_avg": float64(1.61), + }, + { + "Age": int64(25), + "_avg": float64(1.82), + }, + }, + }, + { + "Verified": false, + "_avg": float64(2.22), + "_group": []map[string]any{ + { + "Age": int64(34), + "_avg": float64(2.22), }, }, }, diff --git a/tests/integration/query/simple/with_group_count_limit_offset_test.go b/tests/integration/query/simple/with_group_count_limit_offset_test.go index 45fe4bf90a..015f76c30c 100644 --- a/tests/integration/query/simple/with_group_count_limit_offset_test.go +++ b/tests/integration/query/simple/with_group_count_limit_offset_test.go @@ -97,7 +97,7 @@ func TestQuerySimpleWithGroupByNumberWithRenderedGroupWithLimitAndChildCountWith "Name": "Bob", }, { - "Name": "John", + "Name": "Shahzad", }, }, }, diff --git a/tests/integration/query/simple/with_group_count_limit_test.go b/tests/integration/query/simple/with_group_count_limit_test.go index 9476c99bc9..1870e07bc2 100644 --- a/tests/integration/query/simple/with_group_count_limit_test.go +++ b/tests/integration/query/simple/with_group_count_limit_test.go @@ -97,7 +97,7 @@ func TestQuerySimpleWithGroupByNumberWithRenderedGroupWithLimitAndChildCountWith "Name": "Bob", }, { - "Name": "John", + "Name": "Shahzad", }, }, }, diff --git a/tests/integration/query/simple/with_group_count_sum_test.go b/tests/integration/query/simple/with_group_count_sum_test.go index 21ac55a6dd..aab55c4002 100644 --- a/tests/integration/query/simple/with_group_count_sum_test.go +++ b/tests/integration/query/simple/with_group_count_sum_test.go @@ -74,21 +74,21 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanAndSumOfCount(t *testi }, }, { - "Name": "Alice", + "Name": "Carlo", "_sum": int64(1), "_group": []map[string]any{ { - "Verified": false, + "Verified": true, "_count": int(1), }, }, }, { - "Name": "Carlo", + "Name": "Alice", "_sum": int64(1), "_group": []map[string]any{ { - "Verified": true, + "Verified": false, "_count": int(1), }, }, diff --git a/tests/integration/query/simple/with_group_doc_id_test.go b/tests/integration/query/simple/with_group_doc_id_test.go index 177934ebdc..f476b3c617 100644 --- a/tests/integration/query/simple/with_group_doc_id_test.go +++ b/tests/integration/query/simple/with_group_doc_id_test.go @@ -22,14 +22,14 @@ func TestQuerySimpleWithGroupByWithGroupWithDocID(t *testing.T) { Request: `query { Users(groupBy: [Age]) { Age - _group(docID: "bae-52b9170d-b77a-5887-b877-cbdbb99b009f") { + _group(docID: "bae-d4303725-7db9-53d2-b324-f3ee44020e52") { Name } } }`, Docs: map[int][]string{ 0: { - // bae-52b9170d-b77a-5887-b877-cbdbb99b009f + // bae-d4303725-7db9-53d2-b324-f3ee44020e52 `{ "Name": "John", "Age": 21 @@ -45,10 +45,6 @@ func TestQuerySimpleWithGroupByWithGroupWithDocID(t *testing.T) { }, }, Results: []map[string]any{ - { - "Age": int64(32), - "_group": []map[string]any{}, - }, { "Age": int64(21), "_group": []map[string]any{ @@ -57,6 +53,10 @@ func TestQuerySimpleWithGroupByWithGroupWithDocID(t *testing.T) { }, }, }, + { + "Age": int64(32), + "_group": []map[string]any{}, + }, }, } diff --git a/tests/integration/query/simple/with_group_doc_ids_test.go b/tests/integration/query/simple/with_group_doc_ids_test.go index 9db3bae934..72aa527b66 100644 --- a/tests/integration/query/simple/with_group_doc_ids_test.go +++ b/tests/integration/query/simple/with_group_doc_ids_test.go @@ -22,14 +22,14 @@ func TestQuerySimpleWithGroupByWithGroupWithDocIDs(t *testing.T) { Request: `query { Users(groupBy: [Age]) { Age - _group(docIDs: ["bae-52b9170d-b77a-5887-b877-cbdbb99b009f", "bae-9b2e1434-9d61-5eb1-b3b9-82e8e40729a7"]) { + _group(docIDs: ["bae-d4303725-7db9-53d2-b324-f3ee44020e52", "bae-19b16890-5f24-5e5b-8822-ed2a97ebcc24"]) { Name } } }`, Docs: map[int][]string{ 0: { - // bae-52b9170d-b77a-5887-b877-cbdbb99b009f + // bae-d4303725-7db9-53d2-b324-f3ee44020e52 `{ "Name": "John", "Age": 21 @@ -38,7 +38,7 @@ func TestQuerySimpleWithGroupByWithGroupWithDocIDs(t *testing.T) { "Name": "Bob", "Age": 32 }`, - // bae-9b2e1434-9d61-5eb1-b3b9-82e8e40729a7 + // bae-19b16890-5f24-5e5b-8822-ed2a97ebcc24 `{ "Name": "Fred", "Age": 21 @@ -50,21 +50,21 @@ func TestQuerySimpleWithGroupByWithGroupWithDocIDs(t *testing.T) { }, }, Results: []map[string]any{ - { - "Age": int64(32), - "_group": []map[string]any{}, - }, { "Age": int64(21), "_group": []map[string]any{ { - "Name": "John", + "Name": "Fred", }, { - "Name": "Fred", + "Name": "John", }, }, }, + { + "Age": int64(32), + "_group": []map[string]any{}, + }, }, } diff --git a/tests/integration/query/simple/with_group_filter_test.go b/tests/integration/query/simple/with_group_filter_test.go index 36e09fa69f..919d4c311b 100644 --- a/tests/integration/query/simple/with_group_filter_test.go +++ b/tests/integration/query/simple/with_group_filter_test.go @@ -49,25 +49,25 @@ func TestQuerySimpleWithGroupByStringWithGroupNumberFilter(t *testing.T) { }, Results: []map[string]any{ { - "Name": "Alice", - "_group": []map[string]any{}, - }, - { - "Name": "John", + "Name": "Carlo", "_group": []map[string]any{ { - "Age": int64(32), + "Age": int64(55), }, }, }, { - "Name": "Carlo", + "Name": "John", "_group": []map[string]any{ { - "Age": int64(55), + "Age": int64(32), }, }, }, + { + "Name": "Alice", + "_group": []map[string]any{}, + }, }, } @@ -107,18 +107,18 @@ func TestQuerySimpleWithGroupByStringWithGroupNumberWithParentFilter(t *testing. }, Results: []map[string]any{ { - "Name": "John", + "Name": "Carlo", "_group": []map[string]any{ { - "Age": int64(32), + "Age": int64(55), }, }, }, { - "Name": "Carlo", + "Name": "John", "_group": []map[string]any{ { - "Age": int64(55), + "Age": int64(32), }, }, }, @@ -158,10 +158,10 @@ func TestQuerySimpleWithGroupByStringWithUnrenderedGroupNumberWithParentFilter(t }, Results: []map[string]any{ { - "Name": "John", + "Name": "Carlo", }, { - "Name": "Carlo", + "Name": "John", }, }, } @@ -229,19 +229,19 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanThenInnerNumberFilterT }, }, { - "Name": "Alice", + "Name": "Carlo", "_group": []map[string]any{ { - "Verified": false, + "Verified": true, "_group": []map[string]any{}, }, }, }, { - "Name": "Carlo", + "Name": "Alice", "_group": []map[string]any{ { - "Verified": true, + "Verified": false, "_group": []map[string]any{}, }, }, @@ -288,13 +288,13 @@ func TestQuerySimpleWithGroupByStringWithMultipleGroupNumberFilter(t *testing.T) }, Results: []map[string]any{ { - "Name": "Alice", - "G1": []map[string]any{}, - "G2": []map[string]any{ + "Name": "Carlo", + "G1": []map[string]any{ { - "Age": int64(19), + "Age": int64(55), }, }, + "G2": []map[string]any{}, }, { "Name": "John", @@ -310,13 +310,13 @@ func TestQuerySimpleWithGroupByStringWithMultipleGroupNumberFilter(t *testing.T) }, }, { - "Name": "Carlo", - "G1": []map[string]any{ + "Name": "Alice", + "G1": []map[string]any{}, + "G2": []map[string]any{ { - "Age": int64(55), + "Age": int64(19), }, }, - "G2": []map[string]any{}, }, }, } diff --git a/tests/integration/query/simple/with_group_order_test.go b/tests/integration/query/simple/with_group_order_test.go index ffe52ca7c1..29d7014b03 100644 --- a/tests/integration/query/simple/with_group_order_test.go +++ b/tests/integration/query/simple/with_group_order_test.go @@ -48,6 +48,14 @@ func TestQuerySimpleWithGroupByStringWithGroupNumberWithGroupOrder(t *testing.T) }, }, Results: []map[string]any{ + { + "Name": "Carlo", + "_group": []map[string]any{ + { + "Age": int64(55), + }, + }, + }, { "Name": "Alice", "_group": []map[string]any{ @@ -67,14 +75,6 @@ func TestQuerySimpleWithGroupByStringWithGroupNumberWithGroupOrder(t *testing.T) }, }, }, - { - "Name": "Carlo", - "_group": []map[string]any{ - { - "Age": int64(55), - }, - }, - }, }, } @@ -113,14 +113,6 @@ func TestQuerySimpleWithGroupByStringWithGroupNumberWithGroupOrderDescending(t * }, }, Results: []map[string]any{ - { - "Name": "Alice", - "_group": []map[string]any{ - { - "Age": int64(19), - }, - }, - }, { "Name": "Carlo", "_group": []map[string]any{ @@ -140,6 +132,14 @@ func TestQuerySimpleWithGroupByStringWithGroupNumberWithGroupOrderDescending(t * }, }, }, + { + "Name": "Alice", + "_group": []map[string]any{ + { + "Age": int64(19), + }, + }, + }, }, } diff --git a/tests/integration/query/simple/with_group_sum_test.go b/tests/integration/query/simple/with_group_sum_test.go index 9391ef7d5f..8fd0ce6fb1 100644 --- a/tests/integration/query/simple/with_group_sum_test.go +++ b/tests/integration/query/simple/with_group_sum_test.go @@ -121,14 +121,14 @@ func TestQuerySimpleWithGroupByStringWithoutRenderedGroupAndChildNilSum(t *testi }, }, Results: []map[string]any{ - { - "Name": "Alice", - "_sum": int64(19), - }, { "Name": "John", "_sum": int64(32), }, + { + "Name": "Alice", + "_sum": int64(19), + }, }, } @@ -193,22 +193,22 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanAndSumOfSumOfInt(t *te }, }, { - "Name": "Alice", - "_sum": int64(19), + "Name": "Carlo", + "_sum": int64(55), "_group": []map[string]any{ { - "Verified": false, - "_sum": int64(19), + "Verified": true, + "_sum": int64(55), }, }, }, { - "Name": "Carlo", - "_sum": int64(55), + "Name": "Alice", + "_sum": int64(19), "_group": []map[string]any{ { - "Verified": true, - "_sum": int64(55), + "Verified": false, + "_sum": int64(19), }, }, }, @@ -341,26 +341,26 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanAndSumOfSumOfFloat(t * }, Results: []map[string]any{ { - "Name": "John", - "_sum": float64(5.65), + "Name": "Alice", + "_sum": float64(2.04), "_group": []map[string]any{ { "Verified": false, - "_sum": float64(2.22), - }, - { - "Verified": true, - "_sum": float64(3.43), + "_sum": float64(2.04), }, }, }, { - "Name": "Alice", - "_sum": float64(2.04), + "Name": "John", + "_sum": float64(5.65), "_group": []map[string]any{ + { + "Verified": true, + "_sum": float64(3.43), + }, { "Verified": false, - "_sum": float64(2.04), + "_sum": float64(2.22), }, }, }, @@ -433,30 +433,16 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanAndSumOfSumOfSumOfFloa }, Results: []map[string]any{ { - "Name": "John", - "_sum": float64(5.65), + "Name": "Carlo", + "_sum": float64(1.74), "_group": []map[string]any{ - { - "Verified": false, - "_sum": float64(2.22), - "_group": []map[string]any{ - { - "Age": int64(34), - "_sum": float64(2.22), - }, - }, - }, { "Verified": true, - "_sum": float64(3.43), + "_sum": float64(1.74), "_group": []map[string]any{ { - "Age": int64(32), - "_sum": float64(1.61), - }, - { - "Age": int64(25), - "_sum": float64(1.82), + "Age": int64(55), + "_sum": float64(1.74), }, }, }, @@ -479,16 +465,30 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanAndSumOfSumOfSumOfFloa }, }, { - "Name": "Carlo", - "_sum": float64(1.74), + "Name": "John", + "_sum": float64(5.65), "_group": []map[string]any{ { "Verified": true, - "_sum": float64(1.74), + "_sum": float64(3.43), "_group": []map[string]any{ { - "Age": int64(55), - "_sum": float64(1.74), + "Age": int64(32), + "_sum": float64(1.61), + }, + { + "Age": int64(25), + "_sum": float64(1.82), + }, + }, + }, + { + "Verified": false, + "_sum": float64(2.22), + "_group": []map[string]any{ + { + "Age": int64(34), + "_sum": float64(2.22), }, }, }, diff --git a/tests/integration/query/simple/with_group_test.go b/tests/integration/query/simple/with_group_test.go index e740787d3c..a12ecd0668 100644 --- a/tests/integration/query/simple/with_group_test.go +++ b/tests/integration/query/simple/with_group_test.go @@ -85,13 +85,13 @@ func TestQuerySimpleWithGroupByNumber(t *testing.T) { }, Results: []map[string]any{ { - "Age": int64(32), + "Age": int64(55), }, { - "Age": int64(19), + "Age": int64(32), }, { - "Age": int64(55), + "Age": int64(19), }, }, } @@ -129,13 +129,13 @@ func TestQuerySimpleWithGroupByDateTime(t *testing.T) { }, Results: []map[string]any{ { - "CreatedAt": testUtils.MustParseTime("2013-07-23T03:46:56-05:00"), + "CreatedAt": testUtils.MustParseTime("2011-07-23T03:46:56-05:00"), }, { - "CreatedAt": testUtils.MustParseTime("2012-07-23T03:46:56-05:00"), + "CreatedAt": testUtils.MustParseTime("2013-07-23T03:46:56-05:00"), }, { - "CreatedAt": testUtils.MustParseTime("2011-07-23T03:46:56-05:00"), + "CreatedAt": testUtils.MustParseTime("2012-07-23T03:46:56-05:00"), }, }, } @@ -175,6 +175,14 @@ func TestQuerySimpleWithGroupByNumberWithGroupString(t *testing.T) { }, }, Results: []map[string]any{ + { + "Age": int64(55), + "_group": []map[string]any{ + { + "Name": "Carlo", + }, + }, + }, { "Age": int64(32), "_group": []map[string]any{ @@ -194,14 +202,6 @@ func TestQuerySimpleWithGroupByNumberWithGroupString(t *testing.T) { }, }, }, - { - "Age": int64(55), - "_group": []map[string]any{ - { - "Name": "Carlo", - }, - }, - }, }, } @@ -241,10 +241,10 @@ func TestQuerySimpleWithGroupByWithoutGroupedFieldSelectedWithInnerGroup(t *test }, Results: []map[string]any{ { - "Name": "Alice", + "Name": "Carlo", "_group": []map[string]any{ { - "Age": int64(19), + "Age": int64(55), }, }, }, @@ -252,18 +252,18 @@ func TestQuerySimpleWithGroupByWithoutGroupedFieldSelectedWithInnerGroup(t *test "Name": "John", "_group": []map[string]any{ { - "Age": int64(32), + "Age": int64(25), }, { - "Age": int64(25), + "Age": int64(32), }, }, }, { - "Name": "Carlo", + "Name": "Alice", "_group": []map[string]any{ { - "Age": int64(55), + "Age": int64(19), }, }, }, @@ -306,10 +306,10 @@ func TestQuerySimpleWithGroupByString(t *testing.T) { }, Results: []map[string]any{ { - "Name": "Alice", + "Name": "Carlo", "_group": []map[string]any{ { - "Age": int64(19), + "Age": int64(55), }, }, }, @@ -317,18 +317,18 @@ func TestQuerySimpleWithGroupByString(t *testing.T) { "Name": "John", "_group": []map[string]any{ { - "Age": int64(32), + "Age": int64(25), }, { - "Age": int64(25), + "Age": int64(32), }, }, }, { - "Name": "Carlo", + "Name": "Alice", "_group": []map[string]any{ { - "Age": int64(55), + "Age": int64(19), }, }, }, @@ -407,26 +407,26 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBoolean(t *testing.T) { }, }, { - "Name": "Alice", + "Name": "Carlo", "_group": []map[string]any{ { - "Verified": false, + "Verified": true, "_group": []map[string]any{ { - "Age": int64(19), + "Age": int64(55), }, }, }, }, }, { - "Name": "Carlo", + "Name": "Alice", "_group": []map[string]any{ { - "Verified": true, + "Verified": false, "_group": []map[string]any{ { - "Age": int64(55), + "Age": int64(19), }, }, }, @@ -502,20 +502,20 @@ func TestQuerySimpleWithGroupByStringThenBoolean(t *testing.T) { }, }, { - "Name": "Alice", - "Verified": false, + "Name": "Carlo", + "Verified": true, "_group": []map[string]any{ { - "Age": int64(19), + "Age": int64(55), }, }, }, { - "Name": "Carlo", - "Verified": true, + "Name": "Alice", + "Verified": false, "_group": []map[string]any{ { - "Age": int64(55), + "Age": int64(19), }, }, }, @@ -589,20 +589,20 @@ func TestQuerySimpleWithGroupByBooleanThenNumber(t *testing.T) { }, }, { - "Name": "Alice", - "Verified": false, + "Name": "Carlo", + "Verified": true, "_group": []map[string]any{ { - "Age": int64(19), + "Age": int64(55), }, }, }, { - "Name": "Carlo", - "Verified": true, + "Name": "Alice", + "Verified": false, "_group": []map[string]any{ { - "Age": int64(55), + "Age": int64(19), }, }, }, @@ -677,10 +677,10 @@ func TestQuerySimpleWithGroupByNumberOnUndefinedWithChildren(t *testing.T) { "Age": nil, "_group": []map[string]any{ { - "Name": "Bob", + "Name": "Alice", }, { - "Name": "Alice", + "Name": "Bob", }, }, }, diff --git a/tests/integration/query/simple/with_limit_offset_test.go b/tests/integration/query/simple/with_limit_offset_test.go index 13683414c7..21e02f049c 100644 --- a/tests/integration/query/simple/with_limit_offset_test.go +++ b/tests/integration/query/simple/with_limit_offset_test.go @@ -108,12 +108,12 @@ func TestQuerySimpleWithLimit(t *testing.T) { }, Results: []map[string]any{ { - "Name": "Bob", - "Age": int64(32), + "Name": "Carlo", + "Age": int64(55), }, { - "Name": "Alice", - "Age": int64(19), + "Name": "Bob", + "Age": int64(32), }, }, }, @@ -187,8 +187,8 @@ func TestQuerySimpleWithLimitAndOffset(t *testing.T) { "Age": int64(21), }, { - "Name": "Carlo", - "Age": int64(55), + "Name": "Alice", + "Age": int64(19), }, }, }, @@ -262,16 +262,16 @@ func TestQuerySimpleWithOffset(t *testing.T) { }, Results: []map[string]any{ { - "Name": "Alice", - "Age": int64(19), + "Name": "Melynda", + "Age": int64(30), }, { "Name": "John", "Age": int64(21), }, { - "Name": "Carlo", - "Age": int64(55), + "Name": "Alice", + "Age": int64(19), }, }, }, diff --git a/tests/integration/query/simple/with_order_test.go b/tests/integration/query/simple/with_order_test.go index 1a1f966e60..1bd56574f9 100644 --- a/tests/integration/query/simple/with_order_test.go +++ b/tests/integration/query/simple/with_order_test.go @@ -42,6 +42,10 @@ func TestQuerySimpleWithEmptyOrder(t *testing.T) { }, }, Results: []map[string]any{ + { + "Name": "Carlo", + "Age": int64(55), + }, { "Name": "Bob", "Age": int64(32), @@ -50,10 +54,6 @@ func TestQuerySimpleWithEmptyOrder(t *testing.T) { "Name": "John", "Age": int64(21), }, - { - "Name": "Carlo", - "Age": int64(55), - }, }, } diff --git a/tests/integration/query/simple/with_version_test.go b/tests/integration/query/simple/with_version_test.go index ea7ac76a2b..67edabea9d 100644 --- a/tests/integration/query/simple/with_version_test.go +++ b/tests/integration/query/simple/with_version_test.go @@ -46,15 +46,15 @@ func TestQuerySimpleWithEmbeddedLatestCommit(t *testing.T) { "Age": int64(21), "_version": []map[string]any{ { - "cid": "bafyreidmbagmnhwb3qr5qctclsylkzgrwpbmiuxirtfbdf3fuzxbibljfi", + "cid": "bafyreiamhlxewin3mgbr6dh3mrbwzvjfngfbwif2qdpjvaldzciivojaiu", "links": []map[string]any{ { - "cid": "bafyreibhdfmodhqycxtw33ffdceh2wlxqlwcwbyowvs2lrlvimph7ekg2u", - "name": "Name", + "cid": "bafyreibmearhvd62tofeoxhhodzwkz446ehjybll22fyb4tgmnvx2kwfma", + "name": "Age", }, { - "cid": "bafyreigrxupxvzvjfx6wblmpc6fgekapr7nxlmokvi4gmz6ojmzmbrnapa", - "name": "Age", + "cid": "bafyreid45hs4k3kxxl5t6glfn5ohd6pebjbdhyvtrjmino6g3l2dqdzwiq", + "name": "Name", }, }, }, @@ -101,7 +101,7 @@ func TestQuerySimpleWithEmbeddedLatestCommitWithSchemaVersionID(t *testing.T) { } func TestQuerySimpleWithEmbeddedLatestCommitWithDocID(t *testing.T) { - const docID = "bae-52b9170d-b77a-5887-b877-cbdbb99b009f" + const docID = "bae-d4303725-7db9-53d2-b324-f3ee44020e52" test := testUtils.RequestTestCase{ Description: "Embedded commits query within object query with document ID", @@ -171,23 +171,23 @@ func TestQuerySimpleWithMultipleAliasedEmbeddedLatestCommit(t *testing.T) { "Age": int64(21), "_version": []map[string]any{ { - "cid": "bafyreidmbagmnhwb3qr5qctclsylkzgrwpbmiuxirtfbdf3fuzxbibljfi", + "cid": "bafyreiamhlxewin3mgbr6dh3mrbwzvjfngfbwif2qdpjvaldzciivojaiu", "L1": []map[string]any{ { - "cid": "bafyreibhdfmodhqycxtw33ffdceh2wlxqlwcwbyowvs2lrlvimph7ekg2u", - "name": "Name", + "cid": "bafyreibmearhvd62tofeoxhhodzwkz446ehjybll22fyb4tgmnvx2kwfma", + "name": "Age", }, { - "cid": "bafyreigrxupxvzvjfx6wblmpc6fgekapr7nxlmokvi4gmz6ojmzmbrnapa", - "name": "Age", + "cid": "bafyreid45hs4k3kxxl5t6glfn5ohd6pebjbdhyvtrjmino6g3l2dqdzwiq", + "name": "Name", }, }, "L2": []map[string]any{ { - "name": "Name", + "name": "Age", }, { - "name": "Age", + "name": "Name", }, }, }, @@ -200,7 +200,7 @@ func TestQuerySimpleWithMultipleAliasedEmbeddedLatestCommit(t *testing.T) { } func TestQuery_WithAllCommitFields_NoError(t *testing.T) { - const docID = "bae-52b9170d-b77a-5887-b877-cbdbb99b009f" + const docID = "bae-d4303725-7db9-53d2-b324-f3ee44020e52" test := testUtils.TestCase{ Description: "Embedded commits query within object query with document ID", @@ -242,21 +242,21 @@ func TestQuery_WithAllCommitFields_NoError(t *testing.T) { "_docID": docID, "_version": []map[string]any{ { - "cid": "bafyreidmbagmnhwb3qr5qctclsylkzgrwpbmiuxirtfbdf3fuzxbibljfi", + "cid": "bafyreiamhlxewin3mgbr6dh3mrbwzvjfngfbwif2qdpjvaldzciivojaiu", "collectionID": int64(1), "delta": nil, - "docID": "bae-52b9170d-b77a-5887-b877-cbdbb99b009f", + "docID": "bae-d4303725-7db9-53d2-b324-f3ee44020e52", "fieldId": "C", "fieldName": nil, "height": int64(1), "links": []map[string]any{ { - "cid": "bafyreibhdfmodhqycxtw33ffdceh2wlxqlwcwbyowvs2lrlvimph7ekg2u", - "name": "Name", + "cid": "bafyreibmearhvd62tofeoxhhodzwkz446ehjybll22fyb4tgmnvx2kwfma", + "name": "Age", }, { - "cid": "bafyreigrxupxvzvjfx6wblmpc6fgekapr7nxlmokvi4gmz6ojmzmbrnapa", - "name": "Age", + "cid": "bafyreid45hs4k3kxxl5t6glfn5ohd6pebjbdhyvtrjmino6g3l2dqdzwiq", + "name": "Name", }, }, "schemaVersionId": "bafkreigqmcqzkbg3elpe24vfza4rjle2r6cxu7ihzvg56aov57crhaebry", @@ -272,7 +272,7 @@ func TestQuery_WithAllCommitFields_NoError(t *testing.T) { } func TestQuery_WithAllCommitFieldsWithUpdate_NoError(t *testing.T) { - const docID = "bae-52b9170d-b77a-5887-b877-cbdbb99b009f" + const docID = "bae-d4303725-7db9-53d2-b324-f3ee44020e52" test := testUtils.TestCase{ Description: "Embedded commits query within object query with document ID", @@ -321,41 +321,41 @@ func TestQuery_WithAllCommitFieldsWithUpdate_NoError(t *testing.T) { "_docID": docID, "_version": []map[string]any{ { - "cid": "bafyreibbn2vjovh65xe5v2bqxqxkb6sek5xkbnouhryya6enesbhzfplvm", + "cid": "bafyreiewiyarxxkzmgss6g35i4h2uiyzoe6kbnmtwaxv4cab6xefnjlzka", "collectionID": int64(1), "delta": nil, - "docID": "bae-52b9170d-b77a-5887-b877-cbdbb99b009f", + "docID": docID, "fieldId": "C", "fieldName": nil, "height": int64(2), "links": []map[string]any{ { - "cid": "bafyreidmbagmnhwb3qr5qctclsylkzgrwpbmiuxirtfbdf3fuzxbibljfi", + "cid": "bafyreiamhlxewin3mgbr6dh3mrbwzvjfngfbwif2qdpjvaldzciivojaiu", "name": "_head", }, { - "cid": "bafyreifycx5aqjhdlmzaf3bqb6ieomfxrzercas3hxnqcwz2jb25mkrzxi", + "cid": "bafyreiejccdc662jvyhia2ee57dvuuzsrsrhbx3hoenojtasofxtix3k7y", "name": "Age", }, }, "schemaVersionId": "bafkreigqmcqzkbg3elpe24vfza4rjle2r6cxu7ihzvg56aov57crhaebry", }, { - "cid": "bafyreidmbagmnhwb3qr5qctclsylkzgrwpbmiuxirtfbdf3fuzxbibljfi", + "cid": "bafyreiamhlxewin3mgbr6dh3mrbwzvjfngfbwif2qdpjvaldzciivojaiu", "collectionID": int64(1), "delta": nil, - "docID": "bae-52b9170d-b77a-5887-b877-cbdbb99b009f", + "docID": docID, "fieldId": "C", "fieldName": nil, "height": int64(1), "links": []map[string]any{ { - "cid": "bafyreibhdfmodhqycxtw33ffdceh2wlxqlwcwbyowvs2lrlvimph7ekg2u", - "name": "Name", + "cid": "bafyreibmearhvd62tofeoxhhodzwkz446ehjybll22fyb4tgmnvx2kwfma", + "name": "Age", }, { - "cid": "bafyreigrxupxvzvjfx6wblmpc6fgekapr7nxlmokvi4gmz6ojmzmbrnapa", - "name": "Age", + "cid": "bafyreid45hs4k3kxxl5t6glfn5ohd6pebjbdhyvtrjmino6g3l2dqdzwiq", + "name": "Name", }, }, "schemaVersionId": "bafkreigqmcqzkbg3elpe24vfza4rjle2r6cxu7ihzvg56aov57crhaebry", diff --git a/tests/integration/schema/migrations/query/with_doc_id_test.go b/tests/integration/schema/migrations/query/with_doc_id_test.go index ee175515dc..a006441c4f 100644 --- a/tests/integration/schema/migrations/query/with_doc_id_test.go +++ b/tests/integration/schema/migrations/query/with_doc_id_test.go @@ -33,7 +33,7 @@ func TestSchemaMigrationQueryByDocID(t *testing.T) { `, }, testUtils.CreateDoc{ - // bae-d7546ac1-c133-5853-b866-9b9f926fe7e5 + // bae-48c8dacd-58ab-5fd5-8bbf-91bd823f4d5e Doc: `{ "name": "Shahzad" }`, @@ -69,7 +69,7 @@ func TestSchemaMigrationQueryByDocID(t *testing.T) { }, testUtils.Request{ Request: `query { - Users (docID: "bae-d7546ac1-c133-5853-b866-9b9f926fe7e5") { + Users (docID: "bae-48c8dacd-58ab-5fd5-8bbf-91bd823f4d5e") { name verified } @@ -114,37 +114,37 @@ func TestSchemaMigrationQueryMultipleQueriesByDocID(t *testing.T) { // and we want to make sure that lenses are being correctly returned // to the pool for reuse after. testUtils.CreateDoc{ - // bae-d7546ac1-c133-5853-b866-9b9f926fe7e5 + // bae-48c8dacd-58ab-5fd5-8bbf-91bd823f4d5e Doc: `{ "name": "Shahzad" }`, }, testUtils.CreateDoc{ - // bae-92393ad0-07b6-5753-8dbb-19c9c41374ed + // bae-3a7df128-bfa9-559a-a9c5-96f2bf6d1038 Doc: `{ "name": "Fred" }`, }, testUtils.CreateDoc{ - // bae-403d7337-f73e-5c81-8719-e853938c8985 + // bae-5622129c-b893-5768-a3f4-8f745db4cc04 Doc: `{ "name": "Chris" }`, }, testUtils.CreateDoc{ - // bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad + // bae-6845cfdf-cb0f-56a3-be3a-b5a67be5fbdc Doc: `{ "name": "John" }`, }, testUtils.CreateDoc{ - // bae-3f1174ba-d9bc-5a6a-b0bc-8f19581f199d + // bae-38a4ebb2-583a-5b6e-8e90-a6fe9e13be06 Doc: `{ "name": "Islam" }`, }, testUtils.CreateDoc{ - // bae-0698bda7-2c69-5028-a26a-0a1c491b793b + // bae-4d2c0f6e-af73-54d9-ac8a-a419077ea1e5 Doc: `{ "name": "Dave" }`, @@ -175,7 +175,7 @@ func TestSchemaMigrationQueryMultipleQueriesByDocID(t *testing.T) { }, testUtils.Request{ Request: `query { - Users (docID: "bae-d7546ac1-c133-5853-b866-9b9f926fe7e5") { + Users (docID: "bae-48c8dacd-58ab-5fd5-8bbf-91bd823f4d5e") { name verified } @@ -189,7 +189,7 @@ func TestSchemaMigrationQueryMultipleQueriesByDocID(t *testing.T) { }, testUtils.Request{ Request: `query { - Users (docID: "bae-92393ad0-07b6-5753-8dbb-19c9c41374ed") { + Users (docID: "bae-3a7df128-bfa9-559a-a9c5-96f2bf6d1038") { name verified } @@ -203,7 +203,7 @@ func TestSchemaMigrationQueryMultipleQueriesByDocID(t *testing.T) { }, testUtils.Request{ Request: `query { - Users (docID: "bae-403d7337-f73e-5c81-8719-e853938c8985") { + Users (docID: "bae-5622129c-b893-5768-a3f4-8f745db4cc04") { name verified } @@ -217,7 +217,7 @@ func TestSchemaMigrationQueryMultipleQueriesByDocID(t *testing.T) { }, testUtils.Request{ Request: `query { - Users (docID: "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad") { + Users (docID: "bae-6845cfdf-cb0f-56a3-be3a-b5a67be5fbdc") { name verified } @@ -231,7 +231,7 @@ func TestSchemaMigrationQueryMultipleQueriesByDocID(t *testing.T) { }, testUtils.Request{ Request: `query { - Users (docID: "bae-3f1174ba-d9bc-5a6a-b0bc-8f19581f199d") { + Users (docID: "bae-38a4ebb2-583a-5b6e-8e90-a6fe9e13be06") { name verified } @@ -245,7 +245,7 @@ func TestSchemaMigrationQueryMultipleQueriesByDocID(t *testing.T) { }, testUtils.Request{ Request: `query { - Users (docID: "bae-0698bda7-2c69-5028-a26a-0a1c491b793b") { + Users (docID: "bae-4d2c0f6e-af73-54d9-ac8a-a419077ea1e5") { name verified } diff --git a/tests/integration/schema/updates/add/field/create_test.go b/tests/integration/schema/updates/add/field/create_test.go index af771d025c..32f10b1a60 100644 --- a/tests/integration/schema/updates/add/field/create_test.go +++ b/tests/integration/schema/updates/add/field/create_test.go @@ -50,7 +50,7 @@ func TestSchemaUpdatesAddFieldWithCreate(t *testing.T) { }`, Results: []map[string]any{ { - "_docID": "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad", + "_docID": "bae-6845cfdf-cb0f-56a3-be3a-b5a67be5fbdc", "name": "John", "email": nil, }, @@ -98,21 +98,18 @@ func TestSchemaUpdatesAddFieldWithCreateAfterSchemaUpdate(t *testing.T) { testUtils.Request{ Request: `query { Users { - _docID name email } }`, Results: []map[string]any{ { - "_docID": "bae-1ff978e7-b6ab-5ca7-8344-7fdcff65f94e", - "name": "Shahzad", - "email": "sqlizded@yahoo.ca", + "name": "Shahzad", + "email": "sqlizded@yahoo.ca", }, { - "_docID": "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad", - "name": "John", - "email": nil, + "name": "John", + "email": nil, }, }, }, diff --git a/tests/integration/schema/updates/add/field/kind/foreign_object_test.go b/tests/integration/schema/updates/add/field/kind/foreign_object_test.go index 56bfbd2131..9b18500446 100644 --- a/tests/integration/schema/updates/add/field/kind/foreign_object_test.go +++ b/tests/integration/schema/updates/add/field/kind/foreign_object_test.go @@ -11,7 +11,6 @@ package kind import ( - "fmt" "testing" testUtils "github.com/sourcenetwork/defradb/tests/integration" @@ -122,8 +121,6 @@ func TestSchemaUpdatesAddFieldKindForeignObject_IDFieldInvalidKind(t *testing.T) } func TestSchemaUpdatesAddFieldKindForeignObject_Succeeds(t *testing.T) { - key1 := "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" - test := testUtils.TestCase{ Description: "Test schema update, add field with kind foreign object, valid, functional", Actions: []any{ @@ -146,36 +143,15 @@ func TestSchemaUpdatesAddFieldKindForeignObject_Succeeds(t *testing.T) { ] `, }, - testUtils.Request{ - Request: `mutation { - create_Users(input: {name: "John"}) { - _docID - } - }`, - Results: []map[string]any{ - { - "_docID": key1, - }, + testUtils.CreateDoc{ + DocMap: map[string]any{ + "name": "John", }, }, - testUtils.Request{ - Request: fmt.Sprintf(`mutation { - create_Users(input: {name: "Keenan", foo: "%s"}) { - name - foo { - name - } - } - }`, - key1, - ), - Results: []map[string]any{ - { - "name": "Keenan", - "foo": map[string]any{ - "name": "John", - }, - }, + testUtils.CreateDoc{ + DocMap: map[string]any{ + "name": "Keenan", + "foo": testUtils.NewDocIndex(0, 0), }, }, testUtils.Request{ @@ -188,16 +164,16 @@ func TestSchemaUpdatesAddFieldKindForeignObject_Succeeds(t *testing.T) { } }`, Results: []map[string]any{ + { + "name": "John", + "foo": nil, + }, { "name": "Keenan", "foo": map[string]any{ "name": "John", }, }, - { - "name": "John", - "foo": nil, - }, }, }, }, diff --git a/tests/integration/subscription/subscription_test.go b/tests/integration/subscription/subscription_test.go index 02ac058c90..2de42f2793 100644 --- a/tests/integration/subscription/subscription_test.go +++ b/tests/integration/subscription/subscription_test.go @@ -30,12 +30,12 @@ func TestSubscriptionWithCreateMutations(t *testing.T) { }`, Results: []map[string]any{ { - "_docID": "bae-0a24cf29-b2c2-5861-9d00-abd6250c475d", + "_docID": "bae-b3ce089b-f543-5984-be9f-ad7d08969f4e", "age": int64(27), "name": "John", }, { - "_docID": "bae-18def051-7f0f-5dc9-8a69-2a5e423f6b55", + "_docID": "bae-bc20b854-10b3-5408-b28c-f273ddda9434", "age": int64(31), "name": "Addo", }, @@ -78,16 +78,14 @@ func TestSubscriptionWithFilterAndOneCreateMutation(t *testing.T) { testUtils.SubscriptionRequest{ Request: `subscription { User(filter: {age: {_lt: 30}}) { - _docID name age } }`, Results: []map[string]any{ { - "_docID": "bae-0a24cf29-b2c2-5861-9d00-abd6250c475d", - "age": int64(27), - "name": "John", + "age": int64(27), + "name": "John", }, }, }, @@ -148,16 +146,14 @@ func TestSubscriptionWithFilterAndCreateMutations(t *testing.T) { testUtils.SubscriptionRequest{ Request: `subscription { User(filter: {age: {_lt: 30}}) { - _docID name age } }`, Results: []map[string]any{ { - "_docID": "bae-0a24cf29-b2c2-5861-9d00-abd6250c475d", - "age": int64(27), - "name": "John", + "age": int64(27), + "name": "John", }, }, }, @@ -216,7 +212,6 @@ func TestSubscriptionWithUpdateMutations(t *testing.T) { testUtils.SubscriptionRequest{ Request: `subscription { User { - _docID name age points @@ -224,7 +219,6 @@ func TestSubscriptionWithUpdateMutations(t *testing.T) { }`, Results: []map[string]any{ { - "_docID": "bae-0a24cf29-b2c2-5861-9d00-abd6250c475d", "age": int64(27), "name": "John", "points": float64(45), @@ -274,7 +268,6 @@ func TestSubscriptionWithUpdateAllMutations(t *testing.T) { testUtils.SubscriptionRequest{ Request: `subscription { User { - _docID name age points @@ -282,15 +275,13 @@ func TestSubscriptionWithUpdateAllMutations(t *testing.T) { }`, Results: []map[string]any{ { - "_docID": "bae-0a24cf29-b2c2-5861-9d00-abd6250c475d", - "age": int64(27), - "name": "John", + "age": int64(31), + "name": "Addo", "points": float64(55), }, { - "_docID": "bae-76b0f3f5-964c-57c3-b44b-4a91bea70d40", - "age": int64(31), - "name": "Addo", + "age": int64(27), + "name": "John", "points": float64(55), }, }, @@ -303,10 +294,10 @@ func TestSubscriptionWithUpdateAllMutations(t *testing.T) { }`, Results: []map[string]any{ { - "name": "John", + "name": "Addo", }, { - "name": "Addo", + "name": "John", }, }, }, diff --git a/tests/integration/view/one_to_many/simple_test.go b/tests/integration/view/one_to_many/simple_test.go index 30f76987a2..9c846b6ba0 100644 --- a/tests/integration/view/one_to_many/simple_test.go +++ b/tests/integration/view/one_to_many/simple_test.go @@ -51,7 +51,6 @@ func TestView_OneToMany(t *testing.T) { } `, }, - // bae-ef9cd756-08e1-5f23-abeb-7b3e6351a68d testUtils.CreateDoc{ CollectionID: 0, Doc: `{ @@ -60,10 +59,10 @@ func TestView_OneToMany(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "name": "To Kill a Mockingbird", - "author_id": "bae-ef9cd756-08e1-5f23-abeb-7b3e6351a68d" - }`, + DocMap: map[string]any{ + "name": "To Kill a Mockingbird", + "author_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.Request{ Request: `query { @@ -418,7 +417,6 @@ func TestView_OneToManyWithDoubleSidedRelation_Errors(t *testing.T) { } `, }, - // bae-ef9cd756-08e1-5f23-abeb-7b3e6351a68d testUtils.CreateDoc{ CollectionID: 0, Doc: `{ @@ -427,10 +425,10 @@ func TestView_OneToManyWithDoubleSidedRelation_Errors(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "name": "To Kill a Mockingbird", - "author_id": "bae-ef9cd756-08e1-5f23-abeb-7b3e6351a68d" - }`, + DocMap: map[string]any{ + "name": "To Kill a Mockingbird", + "author_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.Request{ Request: `query { diff --git a/tests/integration/view/one_to_many/with_alias_test.go b/tests/integration/view/one_to_many/with_alias_test.go index be2d4a8f30..bfd6e443d9 100644 --- a/tests/integration/view/one_to_many/with_alias_test.go +++ b/tests/integration/view/one_to_many/with_alias_test.go @@ -51,7 +51,6 @@ func TestView_OneToManyWithAliasOnOuter(t *testing.T) { } `, }, - // bae-ef9cd756-08e1-5f23-abeb-7b3e6351a68d testUtils.CreateDoc{ CollectionID: 0, Doc: `{ @@ -60,10 +59,10 @@ func TestView_OneToManyWithAliasOnOuter(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "name": "To Kill a Mockingbird", - "author_id": "bae-ef9cd756-08e1-5f23-abeb-7b3e6351a68d" - }`, + DocMap: map[string]any{ + "name": "To Kill a Mockingbird", + "author_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.Request{ Request: `query { @@ -126,7 +125,6 @@ func TestView_OneToManyWithAliasOnInner(t *testing.T) { } `, }, - // bae-ef9cd756-08e1-5f23-abeb-7b3e6351a68d testUtils.CreateDoc{ CollectionID: 0, Doc: `{ @@ -135,10 +133,10 @@ func TestView_OneToManyWithAliasOnInner(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "name": "To Kill a Mockingbird", - "author_id": "bae-ef9cd756-08e1-5f23-abeb-7b3e6351a68d" - }`, + DocMap: map[string]any{ + "name": "To Kill a Mockingbird", + "author_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.Request{ Request: ` diff --git a/tests/integration/view/one_to_many/with_count_test.go b/tests/integration/view/one_to_many/with_count_test.go index 256b2057bb..2fa06d9aec 100644 --- a/tests/integration/view/one_to_many/with_count_test.go +++ b/tests/integration/view/one_to_many/with_count_test.go @@ -114,7 +114,6 @@ func TestView_OneToManyWithAliasedCount(t *testing.T) { } `, }, - // bae-ef9cd756-08e1-5f23-abeb-7b3e6351a68d testUtils.CreateDoc{ CollectionID: 0, Doc: `{ @@ -123,17 +122,17 @@ func TestView_OneToManyWithAliasedCount(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "name": "To Kill a Mockingbird", - "author_id": "bae-ef9cd756-08e1-5f23-abeb-7b3e6351a68d" - }`, + DocMap: map[string]any{ + "name": "To Kill a Mockingbird", + "author_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "name": "Go Set a Watchman", - "author_id": "bae-ef9cd756-08e1-5f23-abeb-7b3e6351a68d" - }`, + DocMap: map[string]any{ + "name": "Go Set a Watchman", + "author_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.Request{ Request: ` diff --git a/tests/integration/view/one_to_many/with_transform_test.go b/tests/integration/view/one_to_many/with_transform_test.go index 05b41516f4..13ef4f19c6 100644 --- a/tests/integration/view/one_to_many/with_transform_test.go +++ b/tests/integration/view/one_to_many/with_transform_test.go @@ -76,10 +76,10 @@ func TestView_OneToManyWithTransformOnOuter(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "name": "Shahnameh", - "author": "bae-db3c6923-c6a4-5386-8301-b20a5454bf1d" - }`, + DocMap: map[string]any{ + "name": "Shahnameh", + "author": testUtils.NewDocIndex(0, 0), + }, }, testUtils.Request{ Request: ` diff --git a/tests/integration/view/one_to_one/identical_schema_test.go b/tests/integration/view/one_to_one/identical_schema_test.go index 90248ede17..ec83cd81cc 100644 --- a/tests/integration/view/one_to_one/identical_schema_test.go +++ b/tests/integration/view/one_to_one/identical_schema_test.go @@ -53,7 +53,6 @@ func TestView_OneToOneSameSchema(t *testing.T) { } `, }, - // bae-f3db7a4d-3db1-5d57-9996-32c3fdff99d3 testUtils.CreateDoc{ CollectionID: 0, Doc: `{ @@ -62,10 +61,10 @@ func TestView_OneToOneSameSchema(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "name": "Right hand 1", - "holding_id": "bae-f3db7a4d-3db1-5d57-9996-32c3fdff99d3" - }`, + DocMap: map[string]any{ + "name": "Right hand 1", + "holding_id": testUtils.NewDocIndex(0, 0), + }, }, testUtils.Request{ Request: ` diff --git a/tests/integration/view/one_to_one/with_transform_test.go b/tests/integration/view/one_to_one/with_transform_test.go index e6da410ee1..d4270b454d 100644 --- a/tests/integration/view/one_to_one/with_transform_test.go +++ b/tests/integration/view/one_to_one/with_transform_test.go @@ -76,10 +76,10 @@ func TestView_OneToOneWithTransformOnOuter(t *testing.T) { }, testUtils.CreateDoc{ CollectionID: 1, - Doc: `{ - "name": "Shahnameh", - "author": "bae-db3c6923-c6a4-5386-8301-b20a5454bf1d" - }`, + DocMap: map[string]any{ + "name": "Shahnameh", + "author": testUtils.NewDocIndex(0, 0), + }, }, testUtils.Request{ Request: ` diff --git a/tests/integration/view/simple/simple_test.go b/tests/integration/view/simple/simple_test.go index 5dd74da8ed..df9ef60379 100644 --- a/tests/integration/view/simple/simple_test.go +++ b/tests/integration/view/simple/simple_test.go @@ -103,10 +103,10 @@ func TestView_SimpleMultipleDocs(t *testing.T) { }`, Results: []map[string]any{ { - "name": "Fred", + "name": "John", }, { - "name": "John", + "name": "Fred", }, }, }, diff --git a/tests/integration/view/simple/with_transform_test.go b/tests/integration/view/simple/with_transform_test.go index fc148357e9..b6d903a286 100644 --- a/tests/integration/view/simple/with_transform_test.go +++ b/tests/integration/view/simple/with_transform_test.go @@ -78,10 +78,10 @@ func TestView_SimpleWithTransform(t *testing.T) { `, Results: []map[string]any{ { - "fullName": "Fred", + "fullName": "John", }, { - "fullName": "John", + "fullName": "Fred", }, }, }, @@ -159,11 +159,11 @@ func TestView_SimpleWithMultipleTransforms(t *testing.T) { `, Results: []map[string]any{ { - "fullName": "Fred", + "fullName": "John", "age": 23, }, { - "fullName": "John", + "fullName": "Fred", "age": 23, }, }, @@ -309,10 +309,10 @@ func TestView_SimpleWithTransformReturningFewerDocsThanInput(t *testing.T) { `, Results: []map[string]any{ { - "name": "Shahzad", + "name": "John", }, { - "name": "John", + "name": "Shahzad", }, }, }, From 8d0c75611161cc3c507966ccb3a1dcf1d95712ac Mon Sep 17 00:00:00 2001 From: Fred Carle Date: Mon, 10 Jun 2024 14:09:02 -0400 Subject: [PATCH 41/78] refactor: Change counters to support encryption (#2698) ## Relevant issue(s) Resolves #2696 ## Description This PR changes the counter CRDTs to make them support the document encryption feature. They previously stored their values as concrete types (int64 and float64) instead of bytes. Storing them as bytes allow them to be stored plainly or encrypted. --- .../i2696-support-encryption-for-counters.md | 2 + internal/core/block/block.go | 8 +- internal/core/crdt/counter.go | 129 +++++++++++------- internal/core/crdt/errors.go | 15 +- internal/core/crdt/ipld_union.go | 36 ++--- internal/merkle/crdt/counter.go | 21 +-- internal/merkle/crdt/merklecrdt.go | 26 ++-- .../query/simple/with_cid_doc_id_test.go | 8 +- 8 files changed, 131 insertions(+), 114 deletions(-) create mode 100644 docs/data_format_changes/i2696-support-encryption-for-counters.md diff --git a/docs/data_format_changes/i2696-support-encryption-for-counters.md b/docs/data_format_changes/i2696-support-encryption-for-counters.md new file mode 100644 index 0000000000..dd53e57898 --- /dev/null +++ b/docs/data_format_changes/i2696-support-encryption-for-counters.md @@ -0,0 +1,2 @@ +# Support encryption for counters +We changed the data format of counters from int64 and float64 to bytes to support encryption. This changes the generated CIDs for counters. \ No newline at end of file diff --git a/internal/core/block/block.go b/internal/core/block/block.go index 6be17908be..c9a3f629c2 100644 --- a/internal/core/block/block.go +++ b/internal/core/block/block.go @@ -40,7 +40,7 @@ func init() { &crdt.CRDT{}, &crdt.LWWRegDelta{}, &crdt.CompositeDAGDelta{}, - &crdt.CounterDelta[int64]{}, // Only need to call one of the CounterDelta types. + &crdt.CounterDelta{}, ) } @@ -149,10 +149,8 @@ func New(delta core.Delta, links []DAGLink, heads ...cid.Cid) *Block { crdtDelta = crdt.CRDT{LWWRegDelta: delta} case *crdt.CompositeDAGDelta: crdtDelta = crdt.CRDT{CompositeDAGDelta: delta} - case *crdt.CounterDelta[int64]: - crdtDelta = crdt.CRDT{CounterDeltaInt: delta} - case *crdt.CounterDelta[float64]: - crdtDelta = crdt.CRDT{CounterDeltaFloat: delta} + case *crdt.CounterDelta: + crdtDelta = crdt.CRDT{CounterDelta: delta} } return &Block{ diff --git a/internal/core/crdt/counter.go b/internal/core/crdt/counter.go index c87c7d6da6..4aa9a40793 100644 --- a/internal/core/crdt/counter.go +++ b/internal/core/crdt/counter.go @@ -33,7 +33,7 @@ type Incrementable interface { } // CounterDelta is a single delta operation for a Counter -type CounterDelta[T Incrementable] struct { +type CounterDelta struct { DocID []byte FieldName string Priority uint64 @@ -44,69 +44,60 @@ type CounterDelta[T Incrementable] struct { // // It can be used to identify the collection datastructure state at the time of commit. SchemaVersionID string - Data T + Data []byte } -var _ core.Delta = (*CounterDelta[float64])(nil) -var _ core.Delta = (*CounterDelta[int64])(nil) +var _ core.Delta = (*CounterDelta)(nil) // IPLDSchemaBytes returns the IPLD schema representation for the type. // -// This needs to match the [CounterDelta[T]] struct or [coreblock.mustSetSchema] will panic on init. -func (delta *CounterDelta[T]) IPLDSchemaBytes() []byte { +// This needs to match the [CounterDelta] struct or [coreblock.mustSetSchema] will panic on init. +func (delta *CounterDelta) IPLDSchemaBytes() []byte { return []byte(` - type CounterDeltaFloat struct { + type CounterDelta struct { docID Bytes fieldName String priority Int nonce Int schemaVersionID String - data Float - } - - type CounterDeltaInt struct { - docID Bytes - fieldName String - priority Int - nonce Int - schemaVersionID String - data Int + data Bytes }`) } // GetPriority gets the current priority for this delta. -func (delta *CounterDelta[T]) GetPriority() uint64 { +func (delta *CounterDelta) GetPriority() uint64 { return delta.Priority } // SetPriority will set the priority for this delta. -func (delta *CounterDelta[T]) SetPriority(prio uint64) { +func (delta *CounterDelta) SetPriority(prio uint64) { delta.Priority = prio } // Counter, is a simple CRDT type that allows increment/decrement // of an Int and Float data types that ensures convergence. -type Counter[T Incrementable] struct { +type Counter struct { baseCRDT AllowDecrement bool + Kind client.ScalarKind } -var _ core.ReplicatedData = (*Counter[float64])(nil) -var _ core.ReplicatedData = (*Counter[int64])(nil) +var _ core.ReplicatedData = (*Counter)(nil) // NewCounter returns a new instance of the Counter with the given ID. -func NewCounter[T Incrementable]( +func NewCounter( store datastore.DSReaderWriter, schemaVersionKey core.CollectionSchemaVersionKey, key core.DataStoreKey, fieldName string, allowDecrement bool, -) Counter[T] { - return Counter[T]{newBaseCRDT(store, key, schemaVersionKey, fieldName), allowDecrement} + kind client.ScalarKind, +) Counter { + return Counter{newBaseCRDT(store, key, schemaVersionKey, fieldName), allowDecrement, kind} } // Value gets the current counter value -func (c Counter[T]) Value(ctx context.Context) ([]byte, error) { +func (c Counter) Value(ctx context.Context) ([]byte, error) { valueK := c.key.WithValueFlag() buf, err := c.store.Get(ctx, valueK.ToDS()) if err != nil { @@ -120,7 +111,7 @@ func (c Counter[T]) Value(ctx context.Context) ([]byte, error) { // WARNING: Incrementing an integer and causing it to overflow the int64 max value // will cause the value to roll over to the int64 min value. Incremeting a float and // causing it to overflow the float64 max value will act like a no-op. -func (c Counter[T]) Increment(ctx context.Context, value T) (*CounterDelta[T], error) { +func (c Counter) Increment(ctx context.Context, value []byte) (*CounterDelta, error) { // To ensure that the dag block is unique, we add a random number to the delta. // This is done only on update (if the doc doesn't already exist) to ensure that the // initial dag block of a document can be reproducible. @@ -137,7 +128,7 @@ func (c Counter[T]) Increment(ctx context.Context, value T) (*CounterDelta[T], e nonce = r.Int64() } - return &CounterDelta[T]{ + return &CounterDelta{ DocID: []byte(c.key.DocID), FieldName: c.fieldName, Data: value, @@ -148,8 +139,8 @@ func (c Counter[T]) Increment(ctx context.Context, value T) (*CounterDelta[T], e // Merge implements ReplicatedData interface. // It merges two CounterRegisty by adding the values together. -func (c Counter[T]) Merge(ctx context.Context, delta core.Delta) error { - d, ok := delta.(*CounterDelta[T]) +func (c Counter) Merge(ctx context.Context, delta core.Delta) error { + d, ok := delta.(*CounterDelta) if !ok { return ErrMismatchedMergeType } @@ -157,10 +148,11 @@ func (c Counter[T]) Merge(ctx context.Context, delta core.Delta) error { return c.incrementValue(ctx, d.Data, d.GetPriority()) } -func (c Counter[T]) incrementValue(ctx context.Context, value T, priority uint64) error { - if !c.AllowDecrement && value < 0 { - return NewErrNegativeValue(value) - } +func (c Counter) incrementValue( + ctx context.Context, + valueAsBytes []byte, + priority uint64, +) error { key := c.key.WithValueFlag() marker, err := c.store.Get(ctx, c.key.ToPrimaryDataStoreKey().ToDS()) if err != nil && !errors.Is(err, ds.ErrNotFound) { @@ -170,27 +162,69 @@ func (c Counter[T]) incrementValue(ctx context.Context, value T, priority uint64 key = key.WithDeletedFlag() } - curValue, err := c.getCurrentValue(ctx, key) + var resultAsBytes []byte + + switch c.Kind { + case client.FieldKind_NILLABLE_INT: + resultAsBytes, err = validateAndIncrement[int64](ctx, c.store, key, valueAsBytes, c.AllowDecrement) + if err != nil { + return err + } + case client.FieldKind_NILLABLE_FLOAT: + resultAsBytes, err = validateAndIncrement[float64](ctx, c.store, key, valueAsBytes, c.AllowDecrement) + if err != nil { + return err + } + default: + return NewErrUnsupportedCounterType(c.Kind) + } + + err = c.store.Put(ctx, key.ToDS(), resultAsBytes) if err != nil { - return err + return NewErrFailedToStoreValue(err) } - newValue := curValue + value - b, err := cbor.Marshal(newValue) + return c.setPriority(ctx, c.key, priority) +} + +func (c Counter) CType() client.CType { + if c.AllowDecrement { + return client.PN_COUNTER + } + return client.P_COUNTER +} + +func validateAndIncrement[T Incrementable]( + ctx context.Context, + store datastore.DSReaderWriter, + key core.DataStoreKey, + valueAsBytes []byte, + allowDecrement bool, +) ([]byte, error) { + value, err := getNumericFromBytes[T](valueAsBytes) if err != nil { - return err + return nil, err } - err = c.store.Put(ctx, key.ToDS(), b) + if !allowDecrement && value < 0 { + return nil, NewErrNegativeValue(value) + } + + curValue, err := getCurrentValue[T](ctx, store, key) if err != nil { - return NewErrFailedToStoreValue(err) + return nil, err } - return c.setPriority(ctx, c.key, priority) + newValue := curValue + value + return cbor.Marshal(newValue) } -func (c Counter[T]) getCurrentValue(ctx context.Context, key core.DataStoreKey) (T, error) { - curValue, err := c.store.Get(ctx, key.ToDS()) +func getCurrentValue[T Incrementable]( + ctx context.Context, + store datastore.DSReaderWriter, + key core.DataStoreKey, +) (T, error) { + curValue, err := store.Get(ctx, key.ToDS()) if err != nil { if errors.Is(err, ds.ErrNotFound) { return 0, nil @@ -201,13 +235,6 @@ func (c Counter[T]) getCurrentValue(ctx context.Context, key core.DataStoreKey) return getNumericFromBytes[T](curValue) } -func (c Counter[T]) CType() client.CType { - if c.AllowDecrement { - return client.PN_COUNTER - } - return client.P_COUNTER -} - func getNumericFromBytes[T Incrementable](b []byte) (T, error) { var val T err := cbor.Unmarshal(b, &val) diff --git a/internal/core/crdt/errors.go b/internal/core/crdt/errors.go index 75af579850..43bc9c565c 100644 --- a/internal/core/crdt/errors.go +++ b/internal/core/crdt/errors.go @@ -11,13 +11,15 @@ package crdt import ( + "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/errors" ) const ( - errFailedToGetPriority string = "failed to get priority" - errFailedToStoreValue string = "failed to store value" - errNegativeValue string = "value cannot be negative" + errFailedToGetPriority string = "failed to get priority" + errFailedToStoreValue string = "failed to store value" + errNegativeValue string = "value cannot be negative" + errUnsupportedCounterType string = "unsupported counter type. Valid types are int64 and float64" ) // Errors returnable from this package. @@ -31,7 +33,8 @@ var ( ErrEncodingPriority = errors.New("error encoding priority") ErrDecodingPriority = errors.New("error decoding priority") // ErrMismatchedMergeType - Tying to merge two ReplicatedData of different types - ErrMismatchedMergeType = errors.New("given type to merge does not match source") + ErrMismatchedMergeType = errors.New("given type to merge does not match source") + ErrUnsupportedCounterType = errors.New(errUnsupportedCounterType) ) // NewErrFailedToGetPriority returns an error indicating that the priority could not be retrieved. @@ -47,3 +50,7 @@ func NewErrFailedToStoreValue(inner error) error { func NewErrNegativeValue[T Incrementable](value T) error { return errors.New(errNegativeValue, errors.NewKV("Value", value)) } + +func NewErrUnsupportedCounterType(valueType client.ScalarKind) error { + return errors.New(errUnsupportedCounterType, errors.NewKV("Type", valueType)) +} diff --git a/internal/core/crdt/ipld_union.go b/internal/core/crdt/ipld_union.go index 5d4cfc9f9e..361a41b150 100644 --- a/internal/core/crdt/ipld_union.go +++ b/internal/core/crdt/ipld_union.go @@ -16,8 +16,7 @@ import "github.com/sourcenetwork/defradb/internal/core" type CRDT struct { LWWRegDelta *LWWRegDelta CompositeDAGDelta *CompositeDAGDelta - CounterDeltaInt *CounterDelta[int64] - CounterDeltaFloat *CounterDelta[float64] + CounterDelta *CounterDelta } // IPLDSchemaBytes returns the IPLD schema representation for the CRDT. @@ -28,8 +27,7 @@ func (c CRDT) IPLDSchemaBytes() []byte { type CRDT union { | LWWRegDelta "lww" | CompositeDAGDelta "composite" - | CounterDeltaInt "counterInt" - | CounterDeltaFloat "counterFloat" + | CounterDelta "counter" } representation keyed`) } @@ -40,10 +38,8 @@ func (c CRDT) GetDelta() core.Delta { return c.LWWRegDelta case c.CompositeDAGDelta != nil: return c.CompositeDAGDelta - case c.CounterDeltaFloat != nil: - return c.CounterDeltaFloat - case c.CounterDeltaInt != nil: - return c.CounterDeltaInt + case c.CounterDelta != nil: + return c.CounterDelta } return nil } @@ -55,10 +51,8 @@ func (c CRDT) GetPriority() uint64 { return c.LWWRegDelta.GetPriority() case c.CompositeDAGDelta != nil: return c.CompositeDAGDelta.GetPriority() - case c.CounterDeltaFloat != nil: - return c.CounterDeltaFloat.GetPriority() - case c.CounterDeltaInt != nil: - return c.CounterDeltaInt.GetPriority() + case c.CounterDelta != nil: + return c.CounterDelta.GetPriority() } return 0 } @@ -70,10 +64,8 @@ func (c CRDT) GetFieldName() string { return c.LWWRegDelta.FieldName case c.CompositeDAGDelta != nil: return c.CompositeDAGDelta.FieldName - case c.CounterDeltaFloat != nil: - return c.CounterDeltaFloat.FieldName - case c.CounterDeltaInt != nil: - return c.CounterDeltaInt.FieldName + case c.CounterDelta != nil: + return c.CounterDelta.FieldName } return "" } @@ -85,10 +77,8 @@ func (c CRDT) GetDocID() []byte { return c.LWWRegDelta.DocID case c.CompositeDAGDelta != nil: return c.CompositeDAGDelta.DocID - case c.CounterDeltaFloat != nil: - return c.CounterDeltaFloat.DocID - case c.CounterDeltaInt != nil: - return c.CounterDeltaInt.DocID + case c.CounterDelta != nil: + return c.CounterDelta.DocID } return nil } @@ -100,10 +90,8 @@ func (c CRDT) GetSchemaVersionID() string { return c.LWWRegDelta.SchemaVersionID case c.CompositeDAGDelta != nil: return c.CompositeDAGDelta.SchemaVersionID - case c.CounterDeltaFloat != nil: - return c.CounterDeltaFloat.SchemaVersionID - case c.CounterDeltaInt != nil: - return c.CounterDeltaInt.SchemaVersionID + case c.CounterDelta != nil: + return c.CounterDelta.SchemaVersionID } return "" } diff --git a/internal/merkle/crdt/counter.go b/internal/merkle/crdt/counter.go index 4501de326c..c5d3a7e8dd 100644 --- a/internal/merkle/crdt/counter.go +++ b/internal/merkle/crdt/counter.go @@ -22,37 +22,42 @@ import ( ) // MerkleCounter is a MerkleCRDT implementation of the Counter using MerkleClocks. -type MerkleCounter[T crdt.Incrementable] struct { +type MerkleCounter struct { *baseMerkleCRDT - reg crdt.Counter[T] + reg crdt.Counter } // NewMerkleCounter creates a new instance (or loaded from DB) of a MerkleCRDT // backed by a Counter CRDT. -func NewMerkleCounter[T crdt.Incrementable]( +func NewMerkleCounter( store Stores, schemaVersionKey core.CollectionSchemaVersionKey, key core.DataStoreKey, fieldName string, allowDecrement bool, -) *MerkleCounter[T] { - register := crdt.NewCounter[T](store.Datastore(), schemaVersionKey, key, fieldName, allowDecrement) + kind client.ScalarKind, +) *MerkleCounter { + register := crdt.NewCounter(store.Datastore(), schemaVersionKey, key, fieldName, allowDecrement, kind) clk := clock.NewMerkleClock(store.Headstore(), store.DAGstore(), key.ToHeadStoreKey(), register) base := &baseMerkleCRDT{clock: clk, crdt: register} - return &MerkleCounter[T]{ + return &MerkleCounter{ baseMerkleCRDT: base, reg: register, } } // Save the value of the Counter to the DAG. -func (mc *MerkleCounter[T]) Save(ctx context.Context, data any) (cidlink.Link, []byte, error) { +func (mc *MerkleCounter) Save(ctx context.Context, data any) (cidlink.Link, []byte, error) { value, ok := data.(*client.FieldValue) if !ok { return cidlink.Link{}, nil, NewErrUnexpectedValueType(mc.reg.CType(), &client.FieldValue{}, data) } - delta, err := mc.reg.Increment(ctx, value.Value().(T)) + bytes, err := value.Bytes() + if err != nil { + return cidlink.Link{}, nil, err + } + delta, err := mc.reg.Increment(ctx, bytes) if err != nil { return cidlink.Link{}, nil, err } diff --git a/internal/merkle/crdt/merklecrdt.go b/internal/merkle/crdt/merklecrdt.go index ed8452195f..abc0ffeb51 100644 --- a/internal/merkle/crdt/merklecrdt.go +++ b/internal/merkle/crdt/merklecrdt.go @@ -88,24 +88,14 @@ func InstanceWithStore( fieldName, ), nil case client.PN_COUNTER, client.P_COUNTER: - switch kind { - case client.FieldKind_NILLABLE_INT: - return NewMerkleCounter[int64]( - store, - schemaVersionKey, - key, - fieldName, - cType == client.PN_COUNTER, - ), nil - case client.FieldKind_NILLABLE_FLOAT: - return NewMerkleCounter[float64]( - store, - schemaVersionKey, - key, - fieldName, - cType == client.PN_COUNTER, - ), nil - } + return NewMerkleCounter( + store, + schemaVersionKey, + key, + fieldName, + cType == client.PN_COUNTER, + kind.(client.ScalarKind), + ), nil case client.COMPOSITE: return NewMerkleCompositeDAG( store, diff --git a/tests/integration/query/simple/with_cid_doc_id_test.go b/tests/integration/query/simple/with_cid_doc_id_test.go index 97791ce993..dcf0d1a1d1 100644 --- a/tests/integration/query/simple/with_cid_doc_id_test.go +++ b/tests/integration/query/simple/with_cid_doc_id_test.go @@ -324,7 +324,7 @@ func TestCidAndDocIDQuery_ContainsPNCounterWithIntKind_NoError(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafyreicsx7flfz4b6iwfmwgrnrnd2klxrbg6yojuffh4ia3lrrqcph5q7a", + cid: "bafyreienkinjn7cvsonvhs4tslqvmmcnezuu4aif57jn75cyp6i3vdvkpm", docID: "bae-d8cb53d4-ac5a-5c55-8306-64df633d400d" ) { name @@ -376,7 +376,7 @@ func TestCidAndDocIDQuery_ContainsPNCounterWithFloatKind_NoError(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafyreidwtowbnmdfshq3dptfdggzswtdftyh5374ohfcmqki4ad2wd4m64", + cid: "bafyreiceodj32fyhq3v7ryk6mmcjanwx3zr7ajl2k47w4setngmyx7nc3e", docID: "bae-d420ebcd-023a-5800-ae2e-8ea89442318e" ) { name @@ -423,7 +423,7 @@ func TestCidAndDocIDQuery_ContainsPCounterWithIntKind_NoError(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafyreifngcu76fxe3dtjee556hwymfjgsm3sqhxned4cykit5lcsyy3ope", + cid: "bafyreieypgt2mq43g4ute2hkzombdqw5v6wctleyxyy6vdkzitrfje636i", docID: "bae-d8cb53d4-ac5a-5c55-8306-64df633d400d" ) { name @@ -470,7 +470,7 @@ func TestCidAndDocIDQuery_ContainsPCounterWithFloatKind_NoError(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafyreigih3wl4ycq5lktczydbecvcvlmdsy5jzarx2l6hcqdcrqkoranny", + cid: "bafyreigb3ujvnxie7kwl53w4chiq6cjcyuhranchseo5gmx5i6vfje67da", docID: "bae-d420ebcd-023a-5800-ae2e-8ea89442318e" ) { name From 05a0932a43486d6389b85185cd82121ad3cb0581 Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Mon, 10 Jun 2024 16:55:20 -0400 Subject: [PATCH 42/78] refactor: Extract definition stuff from collection.go (#2706) ## Relevant issue(s) Resolves #2407 ## Description Extracts collection create and patch, schema patch, and validation from collection.go. It was getting really messy in collection.go and this should hopefully be a little better. No code has changed, only moved. --- internal/db/collection.go | 1117 -------------------------- internal/db/collection_define.go | 373 +++++++++ internal/db/definition_validation.go | 585 ++++++++++++++ internal/db/schema.go | 201 +++++ 4 files changed, 1159 insertions(+), 1117 deletions(-) create mode 100644 internal/db/collection_define.go create mode 100644 internal/db/definition_validation.go diff --git a/internal/db/collection.go b/internal/db/collection.go index 4b9c988288..19cb42cb86 100644 --- a/internal/db/collection.go +++ b/internal/db/collection.go @@ -13,18 +13,14 @@ package db import ( "bytes" "context" - "encoding/json" "fmt" - "reflect" "strconv" "strings" - jsonpatch "github.com/evanphx/json-patch/v5" "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/query" cidlink "github.com/ipld/go-ipld-prime/linking/cid" - "github.com/lens-vm/lens/host-go/config/model" "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/acp" @@ -81,1119 +77,6 @@ func (c *collection) newFetcher() fetcher.Fetcher { return lens.NewFetcher(innerFetcher, c.db.LensRegistry()) } -// createCollection creates a collection and saves it to the database in its system store. -// Note: Collection.ID is an auto-incrementing value that is generated by the database. -func (db *db) createCollection( - ctx context.Context, - def client.CollectionDefinition, - newDefinitions []client.CollectionDefinition, -) (client.Collection, error) { - schema := def.Schema - desc := def.Description - txn := mustGetContextTxn(ctx) - - if desc.Name.HasValue() { - exists, err := description.HasCollectionByName(ctx, txn, desc.Name.Value()) - if err != nil { - return nil, err - } - if exists { - return nil, ErrCollectionAlreadyExists - } - } - - existingDefinitions, err := db.getAllActiveDefinitions(ctx) - if err != nil { - return nil, err - } - - schemaByName := map[string]client.SchemaDescription{} - for _, existingDefinition := range existingDefinitions { - schemaByName[existingDefinition.Schema.Name] = existingDefinition.Schema - } - for _, newDefinition := range newDefinitions { - schemaByName[newDefinition.Schema.Name] = newDefinition.Schema - } - - _, err = validateUpdateSchemaFields(schemaByName, client.SchemaDescription{}, schema) - if err != nil { - return nil, err - } - - definitionsByName := map[string]client.CollectionDefinition{} - for _, existingDefinition := range existingDefinitions { - definitionsByName[existingDefinition.GetName()] = existingDefinition - } - for _, newDefinition := range newDefinitions { - definitionsByName[newDefinition.GetName()] = newDefinition - } - err = db.validateNewCollection(def, definitionsByName) - if err != nil { - return nil, err - } - - colSeq, err := db.getSequence(ctx, core.CollectionIDSequenceKey{}) - if err != nil { - return nil, err - } - colID, err := colSeq.next(ctx) - if err != nil { - return nil, err - } - - fieldSeq, err := db.getSequence(ctx, core.NewFieldIDSequenceKey(uint32(colID))) - if err != nil { - return nil, err - } - - desc.ID = uint32(colID) - desc.RootID = desc.ID - - schema, err = description.CreateSchemaVersion(ctx, txn, schema) - if err != nil { - return nil, err - } - desc.SchemaVersionID = schema.VersionID - for _, localField := range desc.Fields { - var fieldID uint64 - if localField.Name == request.DocIDFieldName { - // There is no hard technical requirement for this, we just think it looks nicer - // if the doc id is at the zero index. It makes it look a little nicer in commit - // queries too. - fieldID = 0 - } else { - fieldID, err = fieldSeq.next(ctx) - if err != nil { - return nil, err - } - } - - for i := range desc.Fields { - if desc.Fields[i].Name == localField.Name { - desc.Fields[i].ID = client.FieldID(fieldID) - break - } - } - } - - desc, err = description.SaveCollection(ctx, txn, desc) - if err != nil { - return nil, err - } - - col := db.newCollection(desc, schema) - - for _, index := range desc.Indexes { - if _, err := col.createIndex(ctx, index); err != nil { - return nil, err - } - } - - return db.getCollectionByID(ctx, desc.ID) -} - -// validateCollectionDefinitionPolicyDesc validates that the policy definition is valid, beyond syntax. -// -// Ensures that the information within the policy definition makes sense, -// this function might also make relevant remote calls using the acp system. -func (db *db) validateCollectionDefinitionPolicyDesc( - ctx context.Context, - policyDesc immutable.Option[client.PolicyDescription], -) error { - if !policyDesc.HasValue() { - // No policy validation needed, whether acp exists or not doesn't matter. - return nil - } - - // If there is a policy specified, but the database does not have - // acp enabled/available return an error, database must have an acp available - // to enable access control (inorder to adhere to the policy specified). - if !db.acp.HasValue() { - return ErrCanNotHavePolicyWithoutACP - } - - // If we have the policy specified on the collection, and acp is available/enabled, - // then using the acp system we need to ensure the policy id specified - // actually exists as a policy, and the resource name exists on that policy - // and that the resource is a valid DPI. - return db.acp.Value().ValidateResourceExistsOnValidDPI( - ctx, - policyDesc.Value().ID, - policyDesc.Value().ResourceName, - ) -} - -// updateSchema updates the persisted schema description matching the name of the given -// description, to the values in the given description. -// -// It will validate the given description using [validateUpdateSchema] before updating it. -// -// The schema (including the schema version ID) will only be updated if any changes have actually -// been made, if the given description matches the current persisted description then no changes will be -// applied. -func (db *db) updateSchema( - ctx context.Context, - existingSchemaByName map[string]client.SchemaDescription, - proposedDescriptionsByName map[string]client.SchemaDescription, - schema client.SchemaDescription, - migration immutable.Option[model.Lens], - setAsActiveVersion bool, -) error { - hasChanged, err := db.validateUpdateSchema( - existingSchemaByName, - proposedDescriptionsByName, - schema, - ) - if err != nil { - return err - } - - if !hasChanged { - return nil - } - - for _, field := range schema.Fields { - if field.Kind.IsObject() && !field.Kind.IsArray() { - idFieldName := field.Name + "_id" - if _, ok := schema.GetFieldByName(idFieldName); !ok { - schema.Fields = append(schema.Fields, client.SchemaFieldDescription{ - Name: idFieldName, - Kind: client.FieldKind_DocID, - }) - } - } - } - - for i, field := range schema.Fields { - if field.Typ == client.NONE_CRDT { - // If no CRDT Type has been provided, default to LWW_REGISTER. - field.Typ = client.LWW_REGISTER - schema.Fields[i] = field - } - } - - txn := mustGetContextTxn(ctx) - previousVersionID := schema.VersionID - schema, err = description.CreateSchemaVersion(ctx, txn, schema) - if err != nil { - return err - } - - // After creating the new schema version, we need to create new collection versions for - // any collection using the previous version. These will be inactive unless [setAsActiveVersion] - // is true. - - cols, err := description.GetCollectionsBySchemaVersionID(ctx, txn, previousVersionID) - if err != nil { - return err - } - - colSeq, err := db.getSequence(ctx, core.CollectionIDSequenceKey{}) - if err != nil { - return err - } - - for _, col := range cols { - previousID := col.ID - - existingCols, err := description.GetCollectionsBySchemaVersionID(ctx, txn, schema.VersionID) - if err != nil { - return err - } - - // The collection version may exist before the schema version was created locally. This is - // because migrations for the globally known schema version may have been registered locally - // (typically to handle documents synced over P2P at higher versions) before the local schema - // was updated. We need to check for them now, and update them instead of creating new ones - // if they exist. - var isExistingCol bool - existingColLoop: - for _, existingCol := range existingCols { - sources := existingCol.CollectionSources() - for _, source := range sources { - // Make sure that this collection is the parent of the current [col], and not part of - // another collection set that happens to be using the same schema. - if source.SourceCollectionID == previousID { - if existingCol.RootID == client.OrphanRootID { - existingCol.RootID = col.RootID - } - - fieldSeq, err := db.getSequence(ctx, core.NewFieldIDSequenceKey(existingCol.RootID)) - if err != nil { - return err - } - - for _, globalField := range schema.Fields { - var fieldID client.FieldID - // We must check the source collection if the field already exists, and take its ID - // from there, otherwise the field must be generated by the sequence. - existingField, ok := col.GetFieldByName(globalField.Name) - if ok { - fieldID = existingField.ID - } else { - nextFieldID, err := fieldSeq.next(ctx) - if err != nil { - return err - } - fieldID = client.FieldID(nextFieldID) - } - - existingCol.Fields = append( - existingCol.Fields, - client.CollectionFieldDescription{ - Name: globalField.Name, - ID: fieldID, - }, - ) - } - existingCol, err = description.SaveCollection(ctx, txn, existingCol) - if err != nil { - return err - } - isExistingCol = true - break existingColLoop - } - } - } - - if !isExistingCol { - colID, err := colSeq.next(ctx) - if err != nil { - return err - } - - fieldSeq, err := db.getSequence(ctx, core.NewFieldIDSequenceKey(col.RootID)) - if err != nil { - return err - } - - // Create any new collections without a name (inactive), if [setAsActiveVersion] is true - // they will be activated later along with any existing collection versions. - col.Name = immutable.None[string]() - col.ID = uint32(colID) - col.SchemaVersionID = schema.VersionID - col.Sources = []any{ - &client.CollectionSource{ - SourceCollectionID: previousID, - Transform: migration, - }, - } - - for _, globalField := range schema.Fields { - _, exists := col.GetFieldByName(globalField.Name) - if !exists { - fieldID, err := fieldSeq.next(ctx) - if err != nil { - return err - } - - col.Fields = append( - col.Fields, - client.CollectionFieldDescription{ - Name: globalField.Name, - ID: client.FieldID(fieldID), - }, - ) - } - } - - _, err = description.SaveCollection(ctx, txn, col) - if err != nil { - return err - } - - if migration.HasValue() { - err = db.LensRegistry().SetMigration(ctx, col.ID, migration.Value()) - if err != nil { - return err - } - } - } - } - - if setAsActiveVersion { - // activate collection versions using the new schema ID. This call must be made after - // all new collection versions have been saved. - err = db.setActiveSchemaVersion(ctx, schema.VersionID) - if err != nil { - return err - } - } - - return nil -} - -// validateUpdateSchema validates that the given schema description is a valid update. -// -// Will return true if the given description differs from the current persisted state of the -// schema. Will return an error if it fails validation. -func (db *db) validateUpdateSchema( - existingDescriptionsByName map[string]client.SchemaDescription, - proposedDescriptionsByName map[string]client.SchemaDescription, - proposedDesc client.SchemaDescription, -) (bool, error) { - if proposedDesc.Name == "" { - return false, ErrSchemaNameEmpty - } - - existingDesc, collectionExists := existingDescriptionsByName[proposedDesc.Name] - if !collectionExists { - return false, NewErrAddCollectionWithPatch(proposedDesc.Name) - } - - if proposedDesc.Root != existingDesc.Root { - return false, NewErrSchemaRootDoesntMatch( - proposedDesc.Name, - existingDesc.Root, - proposedDesc.Root, - ) - } - - if proposedDesc.Name != existingDesc.Name { - // There is actually little reason to not support this atm besides controlling the surface area - // of the new feature. Changing this should not break anything, but it should be tested first. - return false, NewErrCannotModifySchemaName(existingDesc.Name, proposedDesc.Name) - } - - if proposedDesc.VersionID != "" && proposedDesc.VersionID != existingDesc.VersionID { - // If users specify this it will be overwritten, an error is preferred to quietly ignoring it. - return false, ErrCannotSetVersionID - } - - hasChangedFields, err := validateUpdateSchemaFields(proposedDescriptionsByName, existingDesc, proposedDesc) - if err != nil { - return hasChangedFields, err - } - - return hasChangedFields, err -} - -func validateUpdateSchemaFields( - descriptionsByName map[string]client.SchemaDescription, - existingDesc client.SchemaDescription, - proposedDesc client.SchemaDescription, -) (bool, error) { - hasChanged := false - existingFieldsByName := map[string]client.SchemaFieldDescription{} - existingFieldIndexesByName := map[string]int{} - for i, field := range existingDesc.Fields { - existingFieldIndexesByName[field.Name] = i - existingFieldsByName[field.Name] = field - } - - newFieldNames := map[string]struct{}{} - for proposedIndex, proposedField := range proposedDesc.Fields { - existingField, fieldAlreadyExists := existingFieldsByName[proposedField.Name] - - // If the field is new, then the collection has changed - hasChanged = hasChanged || !fieldAlreadyExists - - if !fieldAlreadyExists && proposedField.Kind.IsObject() { - _, relatedDescFound := descriptionsByName[proposedField.Kind.Underlying()] - - if !relatedDescFound { - return false, NewErrFieldKindNotFound(proposedField.Name, proposedField.Kind.Underlying()) - } - - if proposedField.Kind.IsObject() && !proposedField.Kind.IsArray() { - idFieldName := proposedField.Name + request.RelatedObjectID - idField, idFieldFound := proposedDesc.GetFieldByName(idFieldName) - if idFieldFound { - if idField.Kind != client.FieldKind_DocID { - return false, NewErrRelationalFieldIDInvalidType(idField.Name, client.FieldKind_DocID, idField.Kind) - } - } - } - } - - if proposedField.Kind.IsObjectArray() { - return false, NewErrSecondaryFieldOnSchema(proposedField.Name) - } - - if _, isDuplicate := newFieldNames[proposedField.Name]; isDuplicate { - return false, NewErrDuplicateField(proposedField.Name) - } - - if fieldAlreadyExists && proposedField != existingField { - return false, NewErrCannotMutateField(proposedField.Name) - } - - if existingIndex := existingFieldIndexesByName[proposedField.Name]; fieldAlreadyExists && - proposedIndex != existingIndex { - return false, NewErrCannotMoveField(proposedField.Name, proposedIndex, existingIndex) - } - - if !proposedField.Typ.IsSupportedFieldCType() { - return false, client.NewErrInvalidCRDTType(proposedField.Name, proposedField.Typ.String()) - } - - if !proposedField.Typ.IsCompatibleWith(proposedField.Kind) { - return false, client.NewErrCRDTKindMismatch(proposedField.Typ.String(), proposedField.Kind.String()) - } - - newFieldNames[proposedField.Name] = struct{}{} - } - - for _, field := range existingDesc.Fields { - if _, stillExists := newFieldNames[field.Name]; !stillExists { - return false, NewErrCannotDeleteField(field.Name) - } - } - return hasChanged, nil -} - -func (db *db) patchCollection( - ctx context.Context, - patchString string, -) error { - patch, err := jsonpatch.DecodePatch([]byte(patchString)) - if err != nil { - return err - } - txn := mustGetContextTxn(ctx) - cols, err := description.GetCollections(ctx, txn) - if err != nil { - return err - } - - existingColsByID := map[uint32]client.CollectionDescription{} - for _, col := range cols { - existingColsByID[col.ID] = col - } - - existingDescriptionJson, err := json.Marshal(existingColsByID) - if err != nil { - return err - } - - newDescriptionJson, err := patch.Apply(existingDescriptionJson) - if err != nil { - return err - } - - var newColsByID map[uint32]client.CollectionDescription - decoder := json.NewDecoder(strings.NewReader(string(newDescriptionJson))) - decoder.DisallowUnknownFields() - err = decoder.Decode(&newColsByID) - if err != nil { - return err - } - - err = db.validateCollectionChanges(existingColsByID, newColsByID) - if err != nil { - return err - } - - for _, col := range newColsByID { - _, err := description.SaveCollection(ctx, txn, col) - if err != nil { - return err - } - - existingCol, ok := existingColsByID[col.ID] - if ok { - // Clear any existing migrations in the registry, using this semi-hacky way - // to avoid adding more functions to a public interface that we wish to remove. - - for _, src := range existingCol.CollectionSources() { - if src.Transform.HasValue() { - err = db.LensRegistry().SetMigration(ctx, existingCol.ID, model.Lens{}) - if err != nil { - return err - } - } - } - for _, src := range existingCol.QuerySources() { - if src.Transform.HasValue() { - err = db.LensRegistry().SetMigration(ctx, existingCol.ID, model.Lens{}) - if err != nil { - return err - } - } - } - } - - for _, src := range col.CollectionSources() { - if src.Transform.HasValue() { - err = db.LensRegistry().SetMigration(ctx, col.ID, src.Transform.Value()) - if err != nil { - return err - } - } - } - - for _, src := range col.QuerySources() { - if src.Transform.HasValue() { - err = db.LensRegistry().SetMigration(ctx, col.ID, src.Transform.Value()) - if err != nil { - return err - } - } - } - } - - return db.loadSchema(ctx) -} - -var patchCollectionValidators = []func( - map[uint32]client.CollectionDescription, - map[uint32]client.CollectionDescription, -) error{ - validateCollectionNameUnique, - validateSingleVersionActive, - validateSourcesNotRedefined, - validateIndexesNotModified, - validateFieldsNotModified, - validatePolicyNotModified, - validateIDNotZero, - validateIDUnique, - validateIDExists, - validateRootIDNotMutated, - validateSchemaVersionIDNotMutated, - validateCollectionNotRemoved, -} - -func (db *db) validateCollectionChanges( - oldColsByID map[uint32]client.CollectionDescription, - newColsByID map[uint32]client.CollectionDescription, -) error { - for _, validators := range patchCollectionValidators { - err := validators(oldColsByID, newColsByID) - if err != nil { - return err - } - } - - return nil -} - -var newCollectionValidators = []func( - client.CollectionDefinition, - map[string]client.CollectionDefinition, -) error{ - validateSecondaryFieldsPairUp, - validateRelationPointsToValidKind, - validateSingleSidePrimary, -} - -func (db *db) validateNewCollection( - def client.CollectionDefinition, - defsByName map[string]client.CollectionDefinition, -) error { - for _, validators := range newCollectionValidators { - err := validators(def, defsByName) - if err != nil { - return err - } - } - - return nil -} - -func validateRelationPointsToValidKind( - def client.CollectionDefinition, - defsByName map[string]client.CollectionDefinition, -) error { - for _, field := range def.Description.Fields { - if !field.Kind.HasValue() { - continue - } - - if !field.Kind.Value().IsObject() { - continue - } - - underlying := field.Kind.Value().Underlying() - _, ok := defsByName[underlying] - if !ok { - return NewErrFieldKindNotFound(field.Name, underlying) - } - } - - return nil -} - -func validateSecondaryFieldsPairUp( - def client.CollectionDefinition, - defsByName map[string]client.CollectionDefinition, -) error { - for _, field := range def.Description.Fields { - if !field.Kind.HasValue() { - continue - } - - if !field.Kind.Value().IsObject() { - continue - } - - if !field.RelationName.HasValue() { - continue - } - - _, hasSchemaField := def.Schema.GetFieldByName(field.Name) - if hasSchemaField { - continue - } - - underlying := field.Kind.Value().Underlying() - otherDef, ok := defsByName[underlying] - if !ok { - continue - } - - if len(otherDef.Description.Fields) == 0 { - // Views/embedded objects do not require both sides of the relation to be defined. - continue - } - - otherField, ok := otherDef.Description.GetFieldByRelation( - field.RelationName.Value(), - def.GetName(), - field.Name, - ) - if !ok { - return NewErrRelationMissingField(underlying, field.RelationName.Value()) - } - - _, ok = otherDef.Schema.GetFieldByName(otherField.Name) - if !ok { - // This secondary is paired with another secondary, which is invalid - return NewErrRelationMissingField(underlying, field.RelationName.Value()) - } - } - - return nil -} - -func validateSingleSidePrimary( - def client.CollectionDefinition, - defsByName map[string]client.CollectionDefinition, -) error { - for _, field := range def.Description.Fields { - if !field.Kind.HasValue() { - continue - } - - if !field.Kind.Value().IsObject() { - continue - } - - if !field.RelationName.HasValue() { - continue - } - - _, hasSchemaField := def.Schema.GetFieldByName(field.Name) - if !hasSchemaField { - // This is a secondary field and thus passes this rule - continue - } - - underlying := field.Kind.Value().Underlying() - otherDef, ok := defsByName[underlying] - if !ok { - continue - } - - otherField, ok := otherDef.Description.GetFieldByRelation( - field.RelationName.Value(), - def.GetName(), - field.Name, - ) - if !ok { - // This must be a one-sided relation, in which case it passes this rule - continue - } - - _, ok = otherDef.Schema.GetFieldByName(otherField.Name) - if ok { - // This primary is paired with another primary, which is invalid - return ErrMultipleRelationPrimaries - } - } - - return nil -} - -func validateCollectionNameUnique( - oldColsByID map[uint32]client.CollectionDescription, - newColsByID map[uint32]client.CollectionDescription, -) error { - names := map[string]struct{}{} - for _, col := range newColsByID { - if !col.Name.HasValue() { - continue - } - - if _, ok := names[col.Name.Value()]; ok { - return NewErrCollectionAlreadyExists(col.Name.Value()) - } - names[col.Name.Value()] = struct{}{} - } - - return nil -} - -func validateSingleVersionActive( - oldColsByID map[uint32]client.CollectionDescription, - newColsByID map[uint32]client.CollectionDescription, -) error { - rootsWithActiveCol := map[uint32]struct{}{} - for _, col := range newColsByID { - if !col.Name.HasValue() { - continue - } - - if _, ok := rootsWithActiveCol[col.RootID]; ok { - return NewErrMultipleActiveCollectionVersions(col.Name.Value(), col.RootID) - } - rootsWithActiveCol[col.RootID] = struct{}{} - } - - return nil -} - -// validateSourcesNotRedefined specifies the limitations on how the collection sources -// can be mutated. -// -// Currently new sources cannot be added, existing cannot be removed, and CollectionSources -// cannot be redirected to other collections. -func validateSourcesNotRedefined( - oldColsByID map[uint32]client.CollectionDescription, - newColsByID map[uint32]client.CollectionDescription, -) error { - for _, newCol := range newColsByID { - oldCol, ok := oldColsByID[newCol.ID] - if !ok { - continue - } - - newColSources := newCol.CollectionSources() - oldColSources := oldCol.CollectionSources() - - if len(newColSources) != len(oldColSources) { - return NewErrCollectionSourcesCannotBeAddedRemoved(newCol.ID) - } - - for i := range newColSources { - if newColSources[i].SourceCollectionID != oldColSources[i].SourceCollectionID { - return NewErrCollectionSourceIDMutated( - newCol.ID, - newColSources[i].SourceCollectionID, - oldColSources[i].SourceCollectionID, - ) - } - } - - newQuerySources := newCol.QuerySources() - oldQuerySources := oldCol.QuerySources() - - if len(newQuerySources) != len(oldQuerySources) { - return NewErrCollectionSourcesCannotBeAddedRemoved(newCol.ID) - } - } - - return nil -} - -func validateIndexesNotModified( - oldColsByID map[uint32]client.CollectionDescription, - newColsByID map[uint32]client.CollectionDescription, -) error { - for _, newCol := range newColsByID { - oldCol, ok := oldColsByID[newCol.ID] - if !ok { - continue - } - - // DeepEqual is temporary, as this validation is temporary - if !reflect.DeepEqual(oldCol.Indexes, newCol.Indexes) { - return NewErrCollectionIndexesCannotBeMutated(newCol.ID) - } - } - - return nil -} - -func validateFieldsNotModified( - oldColsByID map[uint32]client.CollectionDescription, - newColsByID map[uint32]client.CollectionDescription, -) error { - for _, newCol := range newColsByID { - oldCol, ok := oldColsByID[newCol.ID] - if !ok { - continue - } - - // DeepEqual is temporary, as this validation is temporary - if !reflect.DeepEqual(oldCol.Fields, newCol.Fields) { - return NewErrCollectionFieldsCannotBeMutated(newCol.ID) - } - } - - return nil -} - -func validatePolicyNotModified( - oldColsByID map[uint32]client.CollectionDescription, - newColsByID map[uint32]client.CollectionDescription, -) error { - for _, newCol := range newColsByID { - oldCol, ok := oldColsByID[newCol.ID] - if !ok { - continue - } - - // DeepEqual is temporary, as this validation is temporary - if !reflect.DeepEqual(oldCol.Policy, newCol.Policy) { - return NewErrCollectionPolicyCannotBeMutated(newCol.ID) - } - } - - return nil -} - -func validateIDNotZero( - oldColsByID map[uint32]client.CollectionDescription, - newColsByID map[uint32]client.CollectionDescription, -) error { - for _, newCol := range newColsByID { - if newCol.ID == 0 { - return ErrCollectionIDCannotBeZero - } - } - - return nil -} - -func validateIDUnique( - oldColsByID map[uint32]client.CollectionDescription, - newColsByID map[uint32]client.CollectionDescription, -) error { - colIds := map[uint32]struct{}{} - for _, newCol := range newColsByID { - if _, ok := colIds[newCol.ID]; ok { - return NewErrCollectionIDAlreadyExists(newCol.ID) - } - colIds[newCol.ID] = struct{}{} - } - - return nil -} - -func validateIDExists( - oldColsByID map[uint32]client.CollectionDescription, - newColsByID map[uint32]client.CollectionDescription, -) error { - for _, newCol := range newColsByID { - if _, ok := oldColsByID[newCol.ID]; !ok { - return NewErrAddCollectionIDWithPatch(newCol.ID) - } - } - - return nil -} - -func validateRootIDNotMutated( - oldColsByID map[uint32]client.CollectionDescription, - newColsByID map[uint32]client.CollectionDescription, -) error { - for _, newCol := range newColsByID { - oldCol, ok := oldColsByID[newCol.ID] - if !ok { - continue - } - - if newCol.RootID != oldCol.RootID { - return NewErrCollectionRootIDCannotBeMutated(newCol.ID) - } - } - - return nil -} - -func validateSchemaVersionIDNotMutated( - oldColsByID map[uint32]client.CollectionDescription, - newColsByID map[uint32]client.CollectionDescription, -) error { - for _, newCol := range newColsByID { - oldCol, ok := oldColsByID[newCol.ID] - if !ok { - continue - } - - if newCol.SchemaVersionID != oldCol.SchemaVersionID { - return NewErrCollectionSchemaVersionIDCannotBeMutated(newCol.ID) - } - } - - return nil -} - -func validateCollectionNotRemoved( - oldColsByID map[uint32]client.CollectionDescription, - newColsByID map[uint32]client.CollectionDescription, -) error { -oldLoop: - for _, oldCol := range oldColsByID { - for _, newCol := range newColsByID { - // It is not enough to just match by the map index, in case the index does not pair - // up with the ID (this can happen if a user moves the collection within the map) - if newCol.ID == oldCol.ID { - continue oldLoop - } - } - - return NewErrCollectionsCannotBeDeleted(oldCol.ID) - } - - return nil -} - -// SetActiveSchemaVersion activates all collection versions with the given schema version, and deactivates all -// those without it (if they share the same schema root). -// -// This will affect all operations interacting with the schema where a schema version is not explicitly -// provided. This includes GQL queries and Collection operations. -// -// It will return an error if the provided schema version ID does not exist. -func (db *db) setActiveSchemaVersion( - ctx context.Context, - schemaVersionID string, -) error { - if schemaVersionID == "" { - return ErrSchemaVersionIDEmpty - } - txn := mustGetContextTxn(ctx) - cols, err := description.GetCollectionsBySchemaVersionID(ctx, txn, schemaVersionID) - if err != nil { - return err - } - - schema, err := description.GetSchemaVersion(ctx, txn, schemaVersionID) - if err != nil { - return err - } - - colsWithRoot, err := description.GetCollectionsBySchemaRoot(ctx, txn, schema.Root) - if err != nil { - return err - } - - colsBySourceID := map[uint32][]client.CollectionDescription{} - colsByID := make(map[uint32]client.CollectionDescription, len(colsWithRoot)) - for _, col := range colsWithRoot { - colsByID[col.ID] = col - - sources := col.CollectionSources() - if len(sources) > 0 { - // For now, we assume that each collection can only have a single source. This will likely need - // to change later. - slice := colsBySourceID[sources[0].SourceCollectionID] - slice = append(slice, col) - colsBySourceID[sources[0].SourceCollectionID] = slice - } - } - - for _, col := range cols { - if col.Name.HasValue() { - // The collection is already active, so we can skip it and continue - continue - } - sources := col.CollectionSources() - - var activeCol client.CollectionDescription - var rootCol client.CollectionDescription - var isActiveFound bool - if len(sources) > 0 { - // For now, we assume that each collection can only have a single source. This will likely need - // to change later. - activeCol, rootCol, isActiveFound = db.getActiveCollectionDown(ctx, colsByID, sources[0].SourceCollectionID) - } - if !isActiveFound { - // We need to look both down and up for the active version - the most recent is not necessarily the active one. - activeCol, isActiveFound = db.getActiveCollectionUp(ctx, colsBySourceID, rootCol.ID) - } - - var newName string - if isActiveFound { - newName = activeCol.Name.Value() - } else { - // If there are no active versions in the collection set, take the name of the schema to be the name of the - // collection. - newName = schema.Name - } - col.Name = immutable.Some(newName) - - _, err = description.SaveCollection(ctx, txn, col) - if err != nil { - return err - } - - if isActiveFound { - // Deactivate the currently active collection by setting its name to none. - activeCol.Name = immutable.None[string]() - _, err = description.SaveCollection(ctx, txn, activeCol) - if err != nil { - return err - } - } - } - - // Load the schema into the clients (e.g. GQL) - return db.loadSchema(ctx) -} - -func (db *db) getActiveCollectionDown( - ctx context.Context, - colsByID map[uint32]client.CollectionDescription, - id uint32, -) (client.CollectionDescription, client.CollectionDescription, bool) { - col, ok := colsByID[id] - if !ok { - return client.CollectionDescription{}, client.CollectionDescription{}, false - } - - if col.Name.HasValue() { - return col, client.CollectionDescription{}, true - } - - sources := col.CollectionSources() - if len(sources) == 0 { - // If a collection has zero sources it is likely the initial collection version, or - // this collection set is currently orphaned (can happen when setting migrations that - // do not yet link all the way back to a non-orphaned set) - return client.CollectionDescription{}, col, false - } - - // For now, we assume that each collection can only have a single source. This will likely need - // to change later. - return db.getActiveCollectionDown(ctx, colsByID, sources[0].SourceCollectionID) -} - -func (db *db) getActiveCollectionUp( - ctx context.Context, - colsBySourceID map[uint32][]client.CollectionDescription, - id uint32, -) (client.CollectionDescription, bool) { - cols, ok := colsBySourceID[id] - if !ok { - // We have reached the top of the set, and have not found an active collection - return client.CollectionDescription{}, false - } - - for _, col := range cols { - if col.Name.HasValue() { - return col, true - } - activeCol, isFound := db.getActiveCollectionUp(ctx, colsBySourceID, col.ID) - if isFound { - return activeCol, isFound - } - } - - return client.CollectionDescription{}, false -} - func (db *db) getCollectionByID(ctx context.Context, id uint32) (client.Collection, error) { txn := mustGetContextTxn(ctx) diff --git a/internal/db/collection_define.go b/internal/db/collection_define.go new file mode 100644 index 0000000000..4712911399 --- /dev/null +++ b/internal/db/collection_define.go @@ -0,0 +1,373 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package db + +import ( + "context" + "encoding/json" + "strings" + + jsonpatch "github.com/evanphx/json-patch/v5" + "github.com/lens-vm/lens/host-go/config/model" + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/client/request" + "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/db/description" +) + +func (db *db) createCollection( + ctx context.Context, + def client.CollectionDefinition, + newDefinitions []client.CollectionDefinition, +) (client.Collection, error) { + schema := def.Schema + desc := def.Description + txn := mustGetContextTxn(ctx) + + if desc.Name.HasValue() { + exists, err := description.HasCollectionByName(ctx, txn, desc.Name.Value()) + if err != nil { + return nil, err + } + if exists { + return nil, ErrCollectionAlreadyExists + } + } + + existingDefinitions, err := db.getAllActiveDefinitions(ctx) + if err != nil { + return nil, err + } + + schemaByName := map[string]client.SchemaDescription{} + for _, existingDefinition := range existingDefinitions { + schemaByName[existingDefinition.Schema.Name] = existingDefinition.Schema + } + for _, newDefinition := range newDefinitions { + schemaByName[newDefinition.Schema.Name] = newDefinition.Schema + } + + _, err = validateUpdateSchemaFields(schemaByName, client.SchemaDescription{}, schema) + if err != nil { + return nil, err + } + + definitionsByName := map[string]client.CollectionDefinition{} + for _, existingDefinition := range existingDefinitions { + definitionsByName[existingDefinition.GetName()] = existingDefinition + } + for _, newDefinition := range newDefinitions { + definitionsByName[newDefinition.GetName()] = newDefinition + } + err = db.validateNewCollection(def, definitionsByName) + if err != nil { + return nil, err + } + + colSeq, err := db.getSequence(ctx, core.CollectionIDSequenceKey{}) + if err != nil { + return nil, err + } + colID, err := colSeq.next(ctx) + if err != nil { + return nil, err + } + + fieldSeq, err := db.getSequence(ctx, core.NewFieldIDSequenceKey(uint32(colID))) + if err != nil { + return nil, err + } + + desc.ID = uint32(colID) + desc.RootID = desc.ID + + schema, err = description.CreateSchemaVersion(ctx, txn, schema) + if err != nil { + return nil, err + } + desc.SchemaVersionID = schema.VersionID + for _, localField := range desc.Fields { + var fieldID uint64 + if localField.Name == request.DocIDFieldName { + // There is no hard technical requirement for this, we just think it looks nicer + // if the doc id is at the zero index. It makes it look a little nicer in commit + // queries too. + fieldID = 0 + } else { + fieldID, err = fieldSeq.next(ctx) + if err != nil { + return nil, err + } + } + + for i := range desc.Fields { + if desc.Fields[i].Name == localField.Name { + desc.Fields[i].ID = client.FieldID(fieldID) + break + } + } + } + + desc, err = description.SaveCollection(ctx, txn, desc) + if err != nil { + return nil, err + } + + col := db.newCollection(desc, schema) + + for _, index := range desc.Indexes { + if _, err := col.createIndex(ctx, index); err != nil { + return nil, err + } + } + + return db.getCollectionByID(ctx, desc.ID) +} + +func (db *db) patchCollection( + ctx context.Context, + patchString string, +) error { + patch, err := jsonpatch.DecodePatch([]byte(patchString)) + if err != nil { + return err + } + txn := mustGetContextTxn(ctx) + cols, err := description.GetCollections(ctx, txn) + if err != nil { + return err + } + + existingColsByID := map[uint32]client.CollectionDescription{} + for _, col := range cols { + existingColsByID[col.ID] = col + } + + existingDescriptionJson, err := json.Marshal(existingColsByID) + if err != nil { + return err + } + + newDescriptionJson, err := patch.Apply(existingDescriptionJson) + if err != nil { + return err + } + + var newColsByID map[uint32]client.CollectionDescription + decoder := json.NewDecoder(strings.NewReader(string(newDescriptionJson))) + decoder.DisallowUnknownFields() + err = decoder.Decode(&newColsByID) + if err != nil { + return err + } + + err = db.validateCollectionChanges(existingColsByID, newColsByID) + if err != nil { + return err + } + + for _, col := range newColsByID { + _, err := description.SaveCollection(ctx, txn, col) + if err != nil { + return err + } + + existingCol, ok := existingColsByID[col.ID] + if ok { + // Clear any existing migrations in the registry, using this semi-hacky way + // to avoid adding more functions to a public interface that we wish to remove. + + for _, src := range existingCol.CollectionSources() { + if src.Transform.HasValue() { + err = db.LensRegistry().SetMigration(ctx, existingCol.ID, model.Lens{}) + if err != nil { + return err + } + } + } + for _, src := range existingCol.QuerySources() { + if src.Transform.HasValue() { + err = db.LensRegistry().SetMigration(ctx, existingCol.ID, model.Lens{}) + if err != nil { + return err + } + } + } + } + + for _, src := range col.CollectionSources() { + if src.Transform.HasValue() { + err = db.LensRegistry().SetMigration(ctx, col.ID, src.Transform.Value()) + if err != nil { + return err + } + } + } + + for _, src := range col.QuerySources() { + if src.Transform.HasValue() { + err = db.LensRegistry().SetMigration(ctx, col.ID, src.Transform.Value()) + if err != nil { + return err + } + } + } + } + + return db.loadSchema(ctx) +} + +// SetActiveSchemaVersion activates all collection versions with the given schema version, and deactivates all +// those without it (if they share the same schema root). +// +// This will affect all operations interacting with the schema where a schema version is not explicitly +// provided. This includes GQL queries and Collection operations. +// +// It will return an error if the provided schema version ID does not exist. +func (db *db) setActiveSchemaVersion( + ctx context.Context, + schemaVersionID string, +) error { + if schemaVersionID == "" { + return ErrSchemaVersionIDEmpty + } + txn := mustGetContextTxn(ctx) + cols, err := description.GetCollectionsBySchemaVersionID(ctx, txn, schemaVersionID) + if err != nil { + return err + } + + schema, err := description.GetSchemaVersion(ctx, txn, schemaVersionID) + if err != nil { + return err + } + + colsWithRoot, err := description.GetCollectionsBySchemaRoot(ctx, txn, schema.Root) + if err != nil { + return err + } + + colsBySourceID := map[uint32][]client.CollectionDescription{} + colsByID := make(map[uint32]client.CollectionDescription, len(colsWithRoot)) + for _, col := range colsWithRoot { + colsByID[col.ID] = col + + sources := col.CollectionSources() + if len(sources) > 0 { + // For now, we assume that each collection can only have a single source. This will likely need + // to change later. + slice := colsBySourceID[sources[0].SourceCollectionID] + slice = append(slice, col) + colsBySourceID[sources[0].SourceCollectionID] = slice + } + } + + for _, col := range cols { + if col.Name.HasValue() { + // The collection is already active, so we can skip it and continue + continue + } + sources := col.CollectionSources() + + var activeCol client.CollectionDescription + var rootCol client.CollectionDescription + var isActiveFound bool + if len(sources) > 0 { + // For now, we assume that each collection can only have a single source. This will likely need + // to change later. + activeCol, rootCol, isActiveFound = db.getActiveCollectionDown(ctx, colsByID, sources[0].SourceCollectionID) + } + if !isActiveFound { + // We need to look both down and up for the active version - the most recent is not necessarily the active one. + activeCol, isActiveFound = db.getActiveCollectionUp(ctx, colsBySourceID, rootCol.ID) + } + + var newName string + if isActiveFound { + newName = activeCol.Name.Value() + } else { + // If there are no active versions in the collection set, take the name of the schema to be the name of the + // collection. + newName = schema.Name + } + col.Name = immutable.Some(newName) + + _, err = description.SaveCollection(ctx, txn, col) + if err != nil { + return err + } + + if isActiveFound { + // Deactivate the currently active collection by setting its name to none. + activeCol.Name = immutable.None[string]() + _, err = description.SaveCollection(ctx, txn, activeCol) + if err != nil { + return err + } + } + } + + // Load the schema into the clients (e.g. GQL) + return db.loadSchema(ctx) +} + +func (db *db) getActiveCollectionDown( + ctx context.Context, + colsByID map[uint32]client.CollectionDescription, + id uint32, +) (client.CollectionDescription, client.CollectionDescription, bool) { + col, ok := colsByID[id] + if !ok { + return client.CollectionDescription{}, client.CollectionDescription{}, false + } + + if col.Name.HasValue() { + return col, client.CollectionDescription{}, true + } + + sources := col.CollectionSources() + if len(sources) == 0 { + // If a collection has zero sources it is likely the initial collection version, or + // this collection set is currently orphaned (can happen when setting migrations that + // do not yet link all the way back to a non-orphaned set) + return client.CollectionDescription{}, col, false + } + + // For now, we assume that each collection can only have a single source. This will likely need + // to change later. + return db.getActiveCollectionDown(ctx, colsByID, sources[0].SourceCollectionID) +} + +func (db *db) getActiveCollectionUp( + ctx context.Context, + colsBySourceID map[uint32][]client.CollectionDescription, + id uint32, +) (client.CollectionDescription, bool) { + cols, ok := colsBySourceID[id] + if !ok { + // We have reached the top of the set, and have not found an active collection + return client.CollectionDescription{}, false + } + + for _, col := range cols { + if col.Name.HasValue() { + return col, true + } + activeCol, isFound := db.getActiveCollectionUp(ctx, colsBySourceID, col.ID) + if isFound { + return activeCol, isFound + } + } + + return client.CollectionDescription{}, false +} diff --git a/internal/db/definition_validation.go b/internal/db/definition_validation.go new file mode 100644 index 0000000000..988ebeb15c --- /dev/null +++ b/internal/db/definition_validation.go @@ -0,0 +1,585 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package db + +import ( + "context" + "reflect" + + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/client/request" +) + +var patchCollectionValidators = []func( + map[uint32]client.CollectionDescription, + map[uint32]client.CollectionDescription, +) error{ + validateCollectionNameUnique, + validateSingleVersionActive, + validateSourcesNotRedefined, + validateIndexesNotModified, + validateFieldsNotModified, + validatePolicyNotModified, + validateIDNotZero, + validateIDUnique, + validateIDExists, + validateRootIDNotMutated, + validateSchemaVersionIDNotMutated, + validateCollectionNotRemoved, +} + +var newCollectionValidators = []func( + client.CollectionDefinition, + map[string]client.CollectionDefinition, +) error{ + validateSecondaryFieldsPairUp, + validateRelationPointsToValidKind, + validateSingleSidePrimary, +} + +func (db *db) validateCollectionChanges( + oldColsByID map[uint32]client.CollectionDescription, + newColsByID map[uint32]client.CollectionDescription, +) error { + for _, validators := range patchCollectionValidators { + err := validators(oldColsByID, newColsByID) + if err != nil { + return err + } + } + + return nil +} + +func (db *db) validateNewCollection( + def client.CollectionDefinition, + defsByName map[string]client.CollectionDefinition, +) error { + for _, validators := range newCollectionValidators { + err := validators(def, defsByName) + if err != nil { + return err + } + } + + return nil +} + +func validateRelationPointsToValidKind( + def client.CollectionDefinition, + defsByName map[string]client.CollectionDefinition, +) error { + for _, field := range def.Description.Fields { + if !field.Kind.HasValue() { + continue + } + + if !field.Kind.Value().IsObject() { + continue + } + + underlying := field.Kind.Value().Underlying() + _, ok := defsByName[underlying] + if !ok { + return NewErrFieldKindNotFound(field.Name, underlying) + } + } + + return nil +} + +func validateSecondaryFieldsPairUp( + def client.CollectionDefinition, + defsByName map[string]client.CollectionDefinition, +) error { + for _, field := range def.Description.Fields { + if !field.Kind.HasValue() { + continue + } + + if !field.Kind.Value().IsObject() { + continue + } + + if !field.RelationName.HasValue() { + continue + } + + _, hasSchemaField := def.Schema.GetFieldByName(field.Name) + if hasSchemaField { + continue + } + + underlying := field.Kind.Value().Underlying() + otherDef, ok := defsByName[underlying] + if !ok { + continue + } + + if len(otherDef.Description.Fields) == 0 { + // Views/embedded objects do not require both sides of the relation to be defined. + continue + } + + otherField, ok := otherDef.Description.GetFieldByRelation( + field.RelationName.Value(), + def.GetName(), + field.Name, + ) + if !ok { + return NewErrRelationMissingField(underlying, field.RelationName.Value()) + } + + _, ok = otherDef.Schema.GetFieldByName(otherField.Name) + if !ok { + // This secondary is paired with another secondary, which is invalid + return NewErrRelationMissingField(underlying, field.RelationName.Value()) + } + } + + return nil +} + +func validateSingleSidePrimary( + def client.CollectionDefinition, + defsByName map[string]client.CollectionDefinition, +) error { + for _, field := range def.Description.Fields { + if !field.Kind.HasValue() { + continue + } + + if !field.Kind.Value().IsObject() { + continue + } + + if !field.RelationName.HasValue() { + continue + } + + _, hasSchemaField := def.Schema.GetFieldByName(field.Name) + if !hasSchemaField { + // This is a secondary field and thus passes this rule + continue + } + + underlying := field.Kind.Value().Underlying() + otherDef, ok := defsByName[underlying] + if !ok { + continue + } + + otherField, ok := otherDef.Description.GetFieldByRelation( + field.RelationName.Value(), + def.GetName(), + field.Name, + ) + if !ok { + // This must be a one-sided relation, in which case it passes this rule + continue + } + + _, ok = otherDef.Schema.GetFieldByName(otherField.Name) + if ok { + // This primary is paired with another primary, which is invalid + return ErrMultipleRelationPrimaries + } + } + + return nil +} + +func validateCollectionNameUnique( + oldColsByID map[uint32]client.CollectionDescription, + newColsByID map[uint32]client.CollectionDescription, +) error { + names := map[string]struct{}{} + for _, col := range newColsByID { + if !col.Name.HasValue() { + continue + } + + if _, ok := names[col.Name.Value()]; ok { + return NewErrCollectionAlreadyExists(col.Name.Value()) + } + names[col.Name.Value()] = struct{}{} + } + + return nil +} + +func validateSingleVersionActive( + oldColsByID map[uint32]client.CollectionDescription, + newColsByID map[uint32]client.CollectionDescription, +) error { + rootsWithActiveCol := map[uint32]struct{}{} + for _, col := range newColsByID { + if !col.Name.HasValue() { + continue + } + + if _, ok := rootsWithActiveCol[col.RootID]; ok { + return NewErrMultipleActiveCollectionVersions(col.Name.Value(), col.RootID) + } + rootsWithActiveCol[col.RootID] = struct{}{} + } + + return nil +} + +// validateSourcesNotRedefined specifies the limitations on how the collection sources +// can be mutated. +// +// Currently new sources cannot be added, existing cannot be removed, and CollectionSources +// cannot be redirected to other collections. +func validateSourcesNotRedefined( + oldColsByID map[uint32]client.CollectionDescription, + newColsByID map[uint32]client.CollectionDescription, +) error { + for _, newCol := range newColsByID { + oldCol, ok := oldColsByID[newCol.ID] + if !ok { + continue + } + + newColSources := newCol.CollectionSources() + oldColSources := oldCol.CollectionSources() + + if len(newColSources) != len(oldColSources) { + return NewErrCollectionSourcesCannotBeAddedRemoved(newCol.ID) + } + + for i := range newColSources { + if newColSources[i].SourceCollectionID != oldColSources[i].SourceCollectionID { + return NewErrCollectionSourceIDMutated( + newCol.ID, + newColSources[i].SourceCollectionID, + oldColSources[i].SourceCollectionID, + ) + } + } + + newQuerySources := newCol.QuerySources() + oldQuerySources := oldCol.QuerySources() + + if len(newQuerySources) != len(oldQuerySources) { + return NewErrCollectionSourcesCannotBeAddedRemoved(newCol.ID) + } + } + + return nil +} + +func validateIndexesNotModified( + oldColsByID map[uint32]client.CollectionDescription, + newColsByID map[uint32]client.CollectionDescription, +) error { + for _, newCol := range newColsByID { + oldCol, ok := oldColsByID[newCol.ID] + if !ok { + continue + } + + // DeepEqual is temporary, as this validation is temporary + if !reflect.DeepEqual(oldCol.Indexes, newCol.Indexes) { + return NewErrCollectionIndexesCannotBeMutated(newCol.ID) + } + } + + return nil +} + +func validateFieldsNotModified( + oldColsByID map[uint32]client.CollectionDescription, + newColsByID map[uint32]client.CollectionDescription, +) error { + for _, newCol := range newColsByID { + oldCol, ok := oldColsByID[newCol.ID] + if !ok { + continue + } + + // DeepEqual is temporary, as this validation is temporary + if !reflect.DeepEqual(oldCol.Fields, newCol.Fields) { + return NewErrCollectionFieldsCannotBeMutated(newCol.ID) + } + } + + return nil +} + +func validatePolicyNotModified( + oldColsByID map[uint32]client.CollectionDescription, + newColsByID map[uint32]client.CollectionDescription, +) error { + for _, newCol := range newColsByID { + oldCol, ok := oldColsByID[newCol.ID] + if !ok { + continue + } + + // DeepEqual is temporary, as this validation is temporary + if !reflect.DeepEqual(oldCol.Policy, newCol.Policy) { + return NewErrCollectionPolicyCannotBeMutated(newCol.ID) + } + } + + return nil +} + +func validateIDNotZero( + oldColsByID map[uint32]client.CollectionDescription, + newColsByID map[uint32]client.CollectionDescription, +) error { + for _, newCol := range newColsByID { + if newCol.ID == 0 { + return ErrCollectionIDCannotBeZero + } + } + + return nil +} + +func validateIDUnique( + oldColsByID map[uint32]client.CollectionDescription, + newColsByID map[uint32]client.CollectionDescription, +) error { + colIds := map[uint32]struct{}{} + for _, newCol := range newColsByID { + if _, ok := colIds[newCol.ID]; ok { + return NewErrCollectionIDAlreadyExists(newCol.ID) + } + colIds[newCol.ID] = struct{}{} + } + + return nil +} + +func validateIDExists( + oldColsByID map[uint32]client.CollectionDescription, + newColsByID map[uint32]client.CollectionDescription, +) error { + for _, newCol := range newColsByID { + if _, ok := oldColsByID[newCol.ID]; !ok { + return NewErrAddCollectionIDWithPatch(newCol.ID) + } + } + + return nil +} + +func validateRootIDNotMutated( + oldColsByID map[uint32]client.CollectionDescription, + newColsByID map[uint32]client.CollectionDescription, +) error { + for _, newCol := range newColsByID { + oldCol, ok := oldColsByID[newCol.ID] + if !ok { + continue + } + + if newCol.RootID != oldCol.RootID { + return NewErrCollectionRootIDCannotBeMutated(newCol.ID) + } + } + + return nil +} + +func validateSchemaVersionIDNotMutated( + oldColsByID map[uint32]client.CollectionDescription, + newColsByID map[uint32]client.CollectionDescription, +) error { + for _, newCol := range newColsByID { + oldCol, ok := oldColsByID[newCol.ID] + if !ok { + continue + } + + if newCol.SchemaVersionID != oldCol.SchemaVersionID { + return NewErrCollectionSchemaVersionIDCannotBeMutated(newCol.ID) + } + } + + return nil +} + +func validateCollectionNotRemoved( + oldColsByID map[uint32]client.CollectionDescription, + newColsByID map[uint32]client.CollectionDescription, +) error { +oldLoop: + for _, oldCol := range oldColsByID { + for _, newCol := range newColsByID { + // It is not enough to just match by the map index, in case the index does not pair + // up with the ID (this can happen if a user moves the collection within the map) + if newCol.ID == oldCol.ID { + continue oldLoop + } + } + + return NewErrCollectionsCannotBeDeleted(oldCol.ID) + } + + return nil +} + +// validateCollectionDefinitionPolicyDesc validates that the policy definition is valid, beyond syntax. +// +// Ensures that the information within the policy definition makes sense, +// this function might also make relevant remote calls using the acp system. +func (db *db) validateCollectionDefinitionPolicyDesc( + ctx context.Context, + policyDesc immutable.Option[client.PolicyDescription], +) error { + if !policyDesc.HasValue() { + // No policy validation needed, whether acp exists or not doesn't matter. + return nil + } + + // If there is a policy specified, but the database does not have + // acp enabled/available return an error, database must have an acp available + // to enable access control (inorder to adhere to the policy specified). + if !db.acp.HasValue() { + return ErrCanNotHavePolicyWithoutACP + } + + // If we have the policy specified on the collection, and acp is available/enabled, + // then using the acp system we need to ensure the policy id specified + // actually exists as a policy, and the resource name exists on that policy + // and that the resource is a valid DPI. + return db.acp.Value().ValidateResourceExistsOnValidDPI( + ctx, + policyDesc.Value().ID, + policyDesc.Value().ResourceName, + ) +} + +// validateUpdateSchema validates that the given schema description is a valid update. +// +// Will return true if the given description differs from the current persisted state of the +// schema. Will return an error if it fails validation. +func (db *db) validateUpdateSchema( + existingDescriptionsByName map[string]client.SchemaDescription, + proposedDescriptionsByName map[string]client.SchemaDescription, + proposedDesc client.SchemaDescription, +) (bool, error) { + if proposedDesc.Name == "" { + return false, ErrSchemaNameEmpty + } + + existingDesc, collectionExists := existingDescriptionsByName[proposedDesc.Name] + if !collectionExists { + return false, NewErrAddCollectionWithPatch(proposedDesc.Name) + } + + if proposedDesc.Root != existingDesc.Root { + return false, NewErrSchemaRootDoesntMatch( + proposedDesc.Name, + existingDesc.Root, + proposedDesc.Root, + ) + } + + if proposedDesc.Name != existingDesc.Name { + // There is actually little reason to not support this atm besides controlling the surface area + // of the new feature. Changing this should not break anything, but it should be tested first. + return false, NewErrCannotModifySchemaName(existingDesc.Name, proposedDesc.Name) + } + + if proposedDesc.VersionID != "" && proposedDesc.VersionID != existingDesc.VersionID { + // If users specify this it will be overwritten, an error is preferred to quietly ignoring it. + return false, ErrCannotSetVersionID + } + + hasChangedFields, err := validateUpdateSchemaFields(proposedDescriptionsByName, existingDesc, proposedDesc) + if err != nil { + return hasChangedFields, err + } + + return hasChangedFields, err +} + +func validateUpdateSchemaFields( + descriptionsByName map[string]client.SchemaDescription, + existingDesc client.SchemaDescription, + proposedDesc client.SchemaDescription, +) (bool, error) { + hasChanged := false + existingFieldsByName := map[string]client.SchemaFieldDescription{} + existingFieldIndexesByName := map[string]int{} + for i, field := range existingDesc.Fields { + existingFieldIndexesByName[field.Name] = i + existingFieldsByName[field.Name] = field + } + + newFieldNames := map[string]struct{}{} + for proposedIndex, proposedField := range proposedDesc.Fields { + existingField, fieldAlreadyExists := existingFieldsByName[proposedField.Name] + + // If the field is new, then the collection has changed + hasChanged = hasChanged || !fieldAlreadyExists + + if !fieldAlreadyExists && proposedField.Kind.IsObject() { + _, relatedDescFound := descriptionsByName[proposedField.Kind.Underlying()] + + if !relatedDescFound { + return false, NewErrFieldKindNotFound(proposedField.Name, proposedField.Kind.Underlying()) + } + + if proposedField.Kind.IsObject() && !proposedField.Kind.IsArray() { + idFieldName := proposedField.Name + request.RelatedObjectID + idField, idFieldFound := proposedDesc.GetFieldByName(idFieldName) + if idFieldFound { + if idField.Kind != client.FieldKind_DocID { + return false, NewErrRelationalFieldIDInvalidType(idField.Name, client.FieldKind_DocID, idField.Kind) + } + } + } + } + + if proposedField.Kind.IsObjectArray() { + return false, NewErrSecondaryFieldOnSchema(proposedField.Name) + } + + if _, isDuplicate := newFieldNames[proposedField.Name]; isDuplicate { + return false, NewErrDuplicateField(proposedField.Name) + } + + if fieldAlreadyExists && proposedField != existingField { + return false, NewErrCannotMutateField(proposedField.Name) + } + + if existingIndex := existingFieldIndexesByName[proposedField.Name]; fieldAlreadyExists && + proposedIndex != existingIndex { + return false, NewErrCannotMoveField(proposedField.Name, proposedIndex, existingIndex) + } + + if !proposedField.Typ.IsSupportedFieldCType() { + return false, client.NewErrInvalidCRDTType(proposedField.Name, proposedField.Typ.String()) + } + + if !proposedField.Typ.IsCompatibleWith(proposedField.Kind) { + return false, client.NewErrCRDTKindMismatch(proposedField.Typ.String(), proposedField.Kind.String()) + } + + newFieldNames[proposedField.Name] = struct{}{} + } + + for _, field := range existingDesc.Fields { + if _, stillExists := newFieldNames[field.Name]; !stillExists { + return false, NewErrCannotDeleteField(field.Name) + } + } + return hasChanged, nil +} diff --git a/internal/db/schema.go b/internal/db/schema.go index eca05f2a1f..8c0ba074dc 100644 --- a/internal/db/schema.go +++ b/internal/db/schema.go @@ -23,6 +23,7 @@ import ( "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/internal/core" "github.com/sourcenetwork/defradb/internal/db/description" ) @@ -323,3 +324,203 @@ func containsLetter(s string) bool { } return false } + +// updateSchema updates the persisted schema description matching the name of the given +// description, to the values in the given description. +// +// It will validate the given description using [validateUpdateSchema] before updating it. +// +// The schema (including the schema version ID) will only be updated if any changes have actually +// been made, if the given description matches the current persisted description then no changes will be +// applied. +func (db *db) updateSchema( + ctx context.Context, + existingSchemaByName map[string]client.SchemaDescription, + proposedDescriptionsByName map[string]client.SchemaDescription, + schema client.SchemaDescription, + migration immutable.Option[model.Lens], + setAsActiveVersion bool, +) error { + hasChanged, err := db.validateUpdateSchema( + existingSchemaByName, + proposedDescriptionsByName, + schema, + ) + if err != nil { + return err + } + + if !hasChanged { + return nil + } + + for _, field := range schema.Fields { + if field.Kind.IsObject() && !field.Kind.IsArray() { + idFieldName := field.Name + "_id" + if _, ok := schema.GetFieldByName(idFieldName); !ok { + schema.Fields = append(schema.Fields, client.SchemaFieldDescription{ + Name: idFieldName, + Kind: client.FieldKind_DocID, + }) + } + } + } + + for i, field := range schema.Fields { + if field.Typ == client.NONE_CRDT { + // If no CRDT Type has been provided, default to LWW_REGISTER. + field.Typ = client.LWW_REGISTER + schema.Fields[i] = field + } + } + + txn := mustGetContextTxn(ctx) + previousVersionID := schema.VersionID + schema, err = description.CreateSchemaVersion(ctx, txn, schema) + if err != nil { + return err + } + + // After creating the new schema version, we need to create new collection versions for + // any collection using the previous version. These will be inactive unless [setAsActiveVersion] + // is true. + + cols, err := description.GetCollectionsBySchemaVersionID(ctx, txn, previousVersionID) + if err != nil { + return err + } + + colSeq, err := db.getSequence(ctx, core.CollectionIDSequenceKey{}) + if err != nil { + return err + } + + for _, col := range cols { + previousID := col.ID + + existingCols, err := description.GetCollectionsBySchemaVersionID(ctx, txn, schema.VersionID) + if err != nil { + return err + } + + // The collection version may exist before the schema version was created locally. This is + // because migrations for the globally known schema version may have been registered locally + // (typically to handle documents synced over P2P at higher versions) before the local schema + // was updated. We need to check for them now, and update them instead of creating new ones + // if they exist. + var isExistingCol bool + existingColLoop: + for _, existingCol := range existingCols { + sources := existingCol.CollectionSources() + for _, source := range sources { + // Make sure that this collection is the parent of the current [col], and not part of + // another collection set that happens to be using the same schema. + if source.SourceCollectionID == previousID { + if existingCol.RootID == client.OrphanRootID { + existingCol.RootID = col.RootID + } + + fieldSeq, err := db.getSequence(ctx, core.NewFieldIDSequenceKey(existingCol.RootID)) + if err != nil { + return err + } + + for _, globalField := range schema.Fields { + var fieldID client.FieldID + // We must check the source collection if the field already exists, and take its ID + // from there, otherwise the field must be generated by the sequence. + existingField, ok := col.GetFieldByName(globalField.Name) + if ok { + fieldID = existingField.ID + } else { + nextFieldID, err := fieldSeq.next(ctx) + if err != nil { + return err + } + fieldID = client.FieldID(nextFieldID) + } + + existingCol.Fields = append( + existingCol.Fields, + client.CollectionFieldDescription{ + Name: globalField.Name, + ID: fieldID, + }, + ) + } + existingCol, err = description.SaveCollection(ctx, txn, existingCol) + if err != nil { + return err + } + isExistingCol = true + break existingColLoop + } + } + } + + if !isExistingCol { + colID, err := colSeq.next(ctx) + if err != nil { + return err + } + + fieldSeq, err := db.getSequence(ctx, core.NewFieldIDSequenceKey(col.RootID)) + if err != nil { + return err + } + + // Create any new collections without a name (inactive), if [setAsActiveVersion] is true + // they will be activated later along with any existing collection versions. + col.Name = immutable.None[string]() + col.ID = uint32(colID) + col.SchemaVersionID = schema.VersionID + col.Sources = []any{ + &client.CollectionSource{ + SourceCollectionID: previousID, + Transform: migration, + }, + } + + for _, globalField := range schema.Fields { + _, exists := col.GetFieldByName(globalField.Name) + if !exists { + fieldID, err := fieldSeq.next(ctx) + if err != nil { + return err + } + + col.Fields = append( + col.Fields, + client.CollectionFieldDescription{ + Name: globalField.Name, + ID: client.FieldID(fieldID), + }, + ) + } + } + + _, err = description.SaveCollection(ctx, txn, col) + if err != nil { + return err + } + + if migration.HasValue() { + err = db.LensRegistry().SetMigration(ctx, col.ID, migration.Value()) + if err != nil { + return err + } + } + } + } + + if setAsActiveVersion { + // activate collection versions using the new schema ID. This call must be made after + // all new collection versions have been saved. + err = db.setActiveSchemaVersion(ctx, schema.VersionID) + if err != nil { + return err + } + } + + return nil +} From d15b3b31713291ba198a49b90c87f3c6457adf24 Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Mon, 10 Jun 2024 16:34:50 -0700 Subject: [PATCH 43/78] feat: Add async transaction callbacks (#2708) ## Relevant issue(s) Resolves #2707 ## Description This PR adds async versions for all transaction callbacks. ## Tasks - [x] I made sure the code is well commented, particularly hard-to-understand areas. - [x] I made sure the repository-held documentation is changed accordingly. - [x] I made sure the pull request title adheres to the conventional commit style (the subset used in the project can be found in [tools/configs/chglog/config.yml](tools/configs/chglog/config.yml)). - [x] I made sure to discuss its limitations such as threats to validity, vulnerability to mistake and misuse, robustness to invalidation of assumptions, resource requirements, ... ## How has this been tested? `make test` Specify the platform(s) on which this was tested: - MacOS --- datastore/concurrent_txn.go | 31 ++----- datastore/mocks/txn.go | 99 +++++++++++++++++++++++ datastore/txn.go | 125 ++++++++++++++++------------- datastore/txn_test.go | 87 +++++++++++++++++++- http/client_tx.go | 12 +++ tests/bench/query/planner/utils.go | 3 + tests/clients/cli/wrapper_tx.go | 12 +++ tests/clients/http/wrapper_tx.go | 12 +++ 8 files changed, 296 insertions(+), 85 deletions(-) diff --git a/datastore/concurrent_txn.go b/datastore/concurrent_txn.go index f46637e99d..409a26223c 100644 --- a/datastore/concurrent_txn.go +++ b/datastore/concurrent_txn.go @@ -15,8 +15,6 @@ import ( "sync" ds "github.com/ipfs/go-datastore" - - "github.com/sourcenetwork/defradb/datastore/iterable" ) type concurrentTxn struct { @@ -32,31 +30,16 @@ type concurrentTxn struct { // NewConcurrentTxnFrom creates a new Txn from rootstore that supports concurrent API calls func NewConcurrentTxnFrom(ctx context.Context, rootstore ds.TxnDatastore, id uint64, readonly bool) (Txn, error) { - var rootTxn ds.Txn - var err error - - // check if our datastore natively supports iterable transaction, transactions or batching - if iterableTxnStore, ok := rootstore.(iterable.IterableTxnDatastore); ok { - rootTxn, err = iterableTxnStore.NewIterableTransaction(ctx, readonly) - if err != nil { - return nil, err - } - } else { - rootTxn, err = rootstore.NewTransaction(ctx, readonly) - if err != nil { - return nil, err - } + rootTxn, err := newTxnFrom(ctx, rootstore, readonly) + if err != nil { + return nil, err } - rootConcurentTxn := &concurrentTxn{Txn: rootTxn} multistore := MultiStoreFrom(rootConcurentTxn) return &txn{ - rootConcurentTxn, - multistore, - id, - []func(){}, - []func(){}, - []func(){}, + t: rootConcurentTxn, + MultiStore: multistore, + id: id, }, nil } @@ -90,7 +73,7 @@ func (t *concurrentTxn) Put(ctx context.Context, key ds.Key, value []byte) error // Sync executes the transaction. func (t *concurrentTxn) Sync(ctx context.Context, prefix ds.Key) error { - return t.Txn.Commit(ctx) + return t.Commit(ctx) } // Close discards the transaction. diff --git a/datastore/mocks/txn.go b/datastore/mocks/txn.go index 0dc71cb46f..711464dc12 100644 --- a/datastore/mocks/txn.go +++ b/datastore/mocks/txn.go @@ -300,6 +300,39 @@ func (_c *Txn_OnDiscard_Call) RunAndReturn(run func(func())) *Txn_OnDiscard_Call return _c } +// OnDiscardAsync provides a mock function with given fields: fn +func (_m *Txn) OnDiscardAsync(fn func()) { + _m.Called(fn) +} + +// Txn_OnDiscardAsync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OnDiscardAsync' +type Txn_OnDiscardAsync_Call struct { + *mock.Call +} + +// OnDiscardAsync is a helper method to define mock.On call +// - fn func() +func (_e *Txn_Expecter) OnDiscardAsync(fn interface{}) *Txn_OnDiscardAsync_Call { + return &Txn_OnDiscardAsync_Call{Call: _e.mock.On("OnDiscardAsync", fn)} +} + +func (_c *Txn_OnDiscardAsync_Call) Run(run func(fn func())) *Txn_OnDiscardAsync_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(func())) + }) + return _c +} + +func (_c *Txn_OnDiscardAsync_Call) Return() *Txn_OnDiscardAsync_Call { + _c.Call.Return() + return _c +} + +func (_c *Txn_OnDiscardAsync_Call) RunAndReturn(run func(func())) *Txn_OnDiscardAsync_Call { + _c.Call.Return(run) + return _c +} + // OnError provides a mock function with given fields: fn func (_m *Txn) OnError(fn func()) { _m.Called(fn) @@ -333,6 +366,39 @@ func (_c *Txn_OnError_Call) RunAndReturn(run func(func())) *Txn_OnError_Call { return _c } +// OnErrorAsync provides a mock function with given fields: fn +func (_m *Txn) OnErrorAsync(fn func()) { + _m.Called(fn) +} + +// Txn_OnErrorAsync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OnErrorAsync' +type Txn_OnErrorAsync_Call struct { + *mock.Call +} + +// OnErrorAsync is a helper method to define mock.On call +// - fn func() +func (_e *Txn_Expecter) OnErrorAsync(fn interface{}) *Txn_OnErrorAsync_Call { + return &Txn_OnErrorAsync_Call{Call: _e.mock.On("OnErrorAsync", fn)} +} + +func (_c *Txn_OnErrorAsync_Call) Run(run func(fn func())) *Txn_OnErrorAsync_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(func())) + }) + return _c +} + +func (_c *Txn_OnErrorAsync_Call) Return() *Txn_OnErrorAsync_Call { + _c.Call.Return() + return _c +} + +func (_c *Txn_OnErrorAsync_Call) RunAndReturn(run func(func())) *Txn_OnErrorAsync_Call { + _c.Call.Return(run) + return _c +} + // OnSuccess provides a mock function with given fields: fn func (_m *Txn) OnSuccess(fn func()) { _m.Called(fn) @@ -366,6 +432,39 @@ func (_c *Txn_OnSuccess_Call) RunAndReturn(run func(func())) *Txn_OnSuccess_Call return _c } +// OnSuccessAsync provides a mock function with given fields: fn +func (_m *Txn) OnSuccessAsync(fn func()) { + _m.Called(fn) +} + +// Txn_OnSuccessAsync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OnSuccessAsync' +type Txn_OnSuccessAsync_Call struct { + *mock.Call +} + +// OnSuccessAsync is a helper method to define mock.On call +// - fn func() +func (_e *Txn_Expecter) OnSuccessAsync(fn interface{}) *Txn_OnSuccessAsync_Call { + return &Txn_OnSuccessAsync_Call{Call: _e.mock.On("OnSuccessAsync", fn)} +} + +func (_c *Txn_OnSuccessAsync_Call) Run(run func(fn func())) *Txn_OnSuccessAsync_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(func())) + }) + return _c +} + +func (_c *Txn_OnSuccessAsync_Call) Return() *Txn_OnSuccessAsync_Call { + _c.Call.Return() + return _c +} + +func (_c *Txn_OnSuccessAsync_Call) RunAndReturn(run func(func())) *Txn_OnSuccessAsync_Call { + _c.Call.Return(run) + return _c +} + // Peerstore provides a mock function with given fields: func (_m *Txn) Peerstore() datastore.DSBatching { ret := _m.Called() diff --git a/datastore/txn.go b/datastore/txn.go index acc7a53193..249903b817 100644 --- a/datastore/txn.go +++ b/datastore/txn.go @@ -43,105 +43,116 @@ type Txn interface { // OnDiscard registers a function to be called when the transaction is discarded. OnDiscard(fn func()) + + // OnSuccessAsync registers a function to be called asynchronously when the transaction is committed. + OnSuccessAsync(fn func()) + + // OnErrorAsync registers a function to be called asynchronously when the transaction is rolled back. + OnErrorAsync(fn func()) + + // OnDiscardAsync registers a function to be called asynchronously when the transaction is discarded. + OnDiscardAsync(fn func()) } type txn struct { - t ds.Txn MultiStore - + t ds.Txn id uint64 successFns []func() errorFns []func() discardFns []func() + + successAsyncFns []func() + errorAsyncFns []func() + discardAsyncFns []func() } var _ Txn = (*txn)(nil) -// NewTxnFrom returns a new Txn from the rootstore. -func NewTxnFrom(ctx context.Context, rootstore ds.TxnDatastore, id uint64, readonly bool) (Txn, error) { +func newTxnFrom(ctx context.Context, rootstore ds.TxnDatastore, readonly bool) (ds.Txn, error) { // check if our datastore natively supports iterable transaction, transactions or batching - if iterableTxnStore, ok := rootstore.(iterable.IterableTxnDatastore); ok { - rootTxn, err := iterableTxnStore.NewIterableTransaction(ctx, readonly) - if err != nil { - return nil, err - } - multistore := MultiStoreFrom(ShimTxnStore{rootTxn}) - return &txn{ - rootTxn, - multistore, - id, - []func(){}, - []func(){}, - []func(){}, - }, nil + switch t := rootstore.(type) { + case iterable.IterableTxnDatastore: + return t.NewIterableTransaction(ctx, readonly) + + default: + return rootstore.NewTransaction(ctx, readonly) } +} - rootTxn, err := rootstore.NewTransaction(ctx, readonly) +// NewTxnFrom returns a new Txn from the rootstore. +func NewTxnFrom(ctx context.Context, rootstore ds.TxnDatastore, id uint64, readonly bool) (Txn, error) { + rootTxn, err := newTxnFrom(ctx, rootstore, readonly) if err != nil { return nil, err } - multistore := MultiStoreFrom(ShimTxnStore{rootTxn}) return &txn{ - rootTxn, - multistore, - id, - []func(){}, - []func(){}, - []func(){}, + t: rootTxn, + MultiStore: multistore, + id: id, }, nil } -// ID returns the unique immutable identifier for this transaction. func (t *txn) ID() uint64 { return t.id } -// Commit finalizes a transaction, attempting to commit it to the Datastore. func (t *txn) Commit(ctx context.Context) error { - if err := t.t.Commit(ctx); err != nil { - runFns(t.errorFns) - return err + var fns []func() + var asyncFns []func() + + err := t.t.Commit(ctx) + if err != nil { + fns = t.errorFns + asyncFns = t.errorAsyncFns + } else { + fns = t.successFns + asyncFns = t.successAsyncFns } - runFns(t.successFns) - return nil + + for _, fn := range asyncFns { + go fn() + } + for _, fn := range fns { + fn() + } + return err } -// Discard throws away changes recorded in a transaction without committing. func (t *txn) Discard(ctx context.Context) { t.t.Discard(ctx) - runFns(t.discardFns) + for _, fn := range t.discardAsyncFns { + go fn() + } + for _, fn := range t.discardFns { + fn() + } } -// OnSuccess registers a function to be called when the transaction is committed. -func (txn *txn) OnSuccess(fn func()) { - if fn == nil { - return - } - txn.successFns = append(txn.successFns, fn) +func (t *txn) OnSuccess(fn func()) { + t.successFns = append(t.successFns, fn) } -// OnError registers a function to be called when the transaction is rolled back. -func (txn *txn) OnError(fn func()) { - if fn == nil { - return - } - txn.errorFns = append(txn.errorFns, fn) +func (t *txn) OnError(fn func()) { + t.errorFns = append(t.errorFns, fn) } -// OnDiscard registers a function to be called when the transaction is discarded. -func (txn *txn) OnDiscard(fn func()) { - if fn == nil { - return - } - txn.discardFns = append(txn.discardFns, fn) +func (t *txn) OnDiscard(fn func()) { + t.discardFns = append(t.discardFns, fn) } -func runFns(fns []func()) { - for _, fn := range fns { - fn() - } +func (t *txn) OnSuccessAsync(fn func()) { + t.successAsyncFns = append(t.successAsyncFns, fn) +} + +func (t *txn) OnErrorAsync(fn func()) { + t.errorAsyncFns = append(t.errorAsyncFns, fn) +} + +func (t *txn) OnDiscardAsync(fn func()) { + t.discardAsyncFns = append(t.discardAsyncFns, fn) } // Shim to make ds.Txn support ds.Datastore. diff --git a/datastore/txn_test.go b/datastore/txn_test.go index 95c2cf7ef0..1a8623600f 100644 --- a/datastore/txn_test.go +++ b/datastore/txn_test.go @@ -12,6 +12,7 @@ package datastore import ( "context" + "sync" "testing" ds "github.com/ipfs/go-datastore" @@ -57,8 +58,6 @@ func TestOnSuccess(t *testing.T) { txn, err := NewTxnFrom(ctx, rootstore, 0, false) require.NoError(t, err) - txn.OnSuccess(nil) - text := "Source" txn.OnSuccess(func() { text += " Inc" @@ -69,7 +68,7 @@ func TestOnSuccess(t *testing.T) { require.Equal(t, text, "Source Inc") } -func TestOnError(t *testing.T) { +func TestOnSuccessAsync(t *testing.T) { ctx := context.Background() opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} rootstore, err := badgerds.NewDatastore("", &opts) @@ -78,7 +77,25 @@ func TestOnError(t *testing.T) { txn, err := NewTxnFrom(ctx, rootstore, 0, false) require.NoError(t, err) - txn.OnError(nil) + var wg sync.WaitGroup + txn.OnSuccessAsync(func() { + wg.Done() + }) + + wg.Add(1) + err = txn.Commit(ctx) + require.NoError(t, err) + wg.Wait() +} + +func TestOnError(t *testing.T) { + ctx := context.Background() + opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} + rootstore, err := badgerds.NewDatastore("", &opts) + require.NoError(t, err) + + txn, err := NewTxnFrom(ctx, rootstore, 0, false) + require.NoError(t, err) text := "Source" txn.OnError(func() { @@ -94,6 +111,68 @@ func TestOnError(t *testing.T) { require.Equal(t, text, "Source Inc") } +func TestOnErrorAsync(t *testing.T) { + ctx := context.Background() + opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} + rootstore, err := badgerds.NewDatastore("", &opts) + require.NoError(t, err) + + txn, err := NewTxnFrom(ctx, rootstore, 0, false) + require.NoError(t, err) + + var wg sync.WaitGroup + txn.OnErrorAsync(func() { + wg.Done() + }) + + rootstore.Close() + require.NoError(t, err) + + wg.Add(1) + err = txn.Commit(ctx) + require.ErrorIs(t, err, badgerds.ErrClosed) + wg.Wait() +} + +func TestOnDiscard(t *testing.T) { + ctx := context.Background() + opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} + rootstore, err := badgerds.NewDatastore("", &opts) + require.NoError(t, err) + + txn, err := NewTxnFrom(ctx, rootstore, 0, false) + require.NoError(t, err) + + text := "Source" + txn.OnDiscard(func() { + text += " Inc" + }) + txn.Discard(ctx) + require.NoError(t, err) + + require.Equal(t, text, "Source Inc") +} + +func TestOnDiscardAsync(t *testing.T) { + ctx := context.Background() + opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} + rootstore, err := badgerds.NewDatastore("", &opts) + require.NoError(t, err) + + txn, err := NewTxnFrom(ctx, rootstore, 0, false) + require.NoError(t, err) + + var wg sync.WaitGroup + txn.OnDiscardAsync(func() { + wg.Done() + }) + + wg.Add(1) + txn.Discard(ctx) + require.NoError(t, err) + wg.Wait() +} + func TestShimTxnStoreSync(t *testing.T) { ctx := context.Background() opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} diff --git a/http/client_tx.go b/http/client_tx.go index f1f2830006..19e5814b51 100644 --- a/http/client_tx.go +++ b/http/client_tx.go @@ -71,6 +71,18 @@ func (c *Transaction) OnDiscard(fn func()) { panic("client side transaction") } +func (c *Transaction) OnSuccessAsync(fn func()) { + panic("client side transaction") +} + +func (c *Transaction) OnErrorAsync(fn func()) { + panic("client side transaction") +} + +func (c *Transaction) OnDiscardAsync(fn func()) { + panic("client side transaction") +} + func (c *Transaction) Rootstore() datastore.DSReaderWriter { panic("client side transaction") } diff --git a/tests/bench/query/planner/utils.go b/tests/bench/query/planner/utils.go index 967f141357..b91b0aa2a3 100644 --- a/tests/bench/query/planner/utils.go +++ b/tests/bench/query/planner/utils.go @@ -143,4 +143,7 @@ func (*dummyTxn) Discard(ctx context.Context) {} func (*dummyTxn) OnSuccess(fn func()) {} func (*dummyTxn) OnError(fn func()) {} func (*dummyTxn) OnDiscard(fn func()) {} +func (*dummyTxn) OnSuccessAsync(fn func()) {} +func (*dummyTxn) OnErrorAsync(fn func()) {} +func (*dummyTxn) OnDiscardAsync(fn func()) {} func (*dummyTxn) ID() uint64 { return 0 } diff --git a/tests/clients/cli/wrapper_tx.go b/tests/clients/cli/wrapper_tx.go index 33bfe43bee..5b5b2c3ea7 100644 --- a/tests/clients/cli/wrapper_tx.go +++ b/tests/clients/cli/wrapper_tx.go @@ -55,6 +55,18 @@ func (w *Transaction) OnDiscard(fn func()) { w.tx.OnDiscard(fn) } +func (w *Transaction) OnSuccessAsync(fn func()) { + w.tx.OnSuccessAsync(fn) +} + +func (w *Transaction) OnErrorAsync(fn func()) { + w.tx.OnErrorAsync(fn) +} + +func (w *Transaction) OnDiscardAsync(fn func()) { + w.tx.OnDiscardAsync(fn) +} + func (w *Transaction) Rootstore() datastore.DSReaderWriter { return w.tx.Rootstore() } diff --git a/tests/clients/http/wrapper_tx.go b/tests/clients/http/wrapper_tx.go index fe63a9ded5..d53d967b3b 100644 --- a/tests/clients/http/wrapper_tx.go +++ b/tests/clients/http/wrapper_tx.go @@ -49,6 +49,18 @@ func (w *TxWrapper) OnDiscard(fn func()) { w.server.OnDiscard(fn) } +func (w *TxWrapper) OnSuccessAsync(fn func()) { + w.server.OnSuccessAsync(fn) +} + +func (w *TxWrapper) OnErrorAsync(fn func()) { + w.server.OnErrorAsync(fn) +} + +func (w *TxWrapper) OnDiscardAsync(fn func()) { + w.server.OnDiscardAsync(fn) +} + func (w *TxWrapper) Rootstore() datastore.DSReaderWriter { return w.server.Rootstore() } From 45107069beff05907ef88638797f44cb06295bf5 Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Wed, 12 Jun 2024 14:22:32 -0700 Subject: [PATCH 44/78] fix: Race condition when testing CLI (#2713) ## Relevant issue(s) Resolves #2712 ## Description This PR fixes a race condition found in #2700 ## Tasks - [x] I made sure the code is well commented, particularly hard-to-understand areas. - [x] I made sure the repository-held documentation is changed accordingly. - [x] I made sure the pull request title adheres to the conventional commit style (the subset used in the project can be found in [tools/configs/chglog/config.yml](tools/configs/chglog/config.yml)). - [x] I made sure to discuss its limitations such as threats to validity, vulnerability to mistake and misuse, robustness to invalidation of assumptions, resource requirements, ... ## How has this been tested? `make test` Specify the platform(s) on which this was tested: - MacOS --- cli/config.go | 49 +++++++++++++++------ cli/config_test.go | 11 +++-- cli/root.go | 92 ++++++++++++++++++--------------------- cli/start.go | 104 +++++++++++++++++++++------------------------ cli/utils.go | 2 +- 5 files changed, 134 insertions(+), 124 deletions(-) diff --git a/cli/config.go b/cli/config.go index 089a1b074f..d559711f8b 100644 --- a/cli/config.go +++ b/cli/config.go @@ -65,6 +65,32 @@ var configFlags = map[string]string{ "no-keyring": "keyring.disabled", } +// configDefaults contains default values for config entries. +var configDefaults = map[string]any{ + "api.address": "127.0.0.1:9181", + "api.allowed-origins": []string{}, + "datastore.badger.path": "data", + "datastore.maxtxnretries": 5, + "datastore.store": "badger", + "datastore.badger.valuelogfilesize": 1 << 30, + "net.p2pdisabled": false, + "net.p2paddresses": []string{"/ip4/127.0.0.1/tcp/9171"}, + "net.peers": []string{}, + "net.pubSubEnabled": true, + "net.relay": false, + "keyring.backend": "file", + "keyring.disabled": false, + "keyring.namespace": "defradb", + "keyring.path": "keys", + "log.caller": false, + "log.colordisabled": false, + "log.format": "text", + "log.level": "info", + "log.output": "stderr", + "log.source": false, + "log.stacktrace": false, +} + // defaultConfig returns a new config with default values. func defaultConfig() *viper.Viper { cfg := viper.New() @@ -76,20 +102,18 @@ func defaultConfig() *viper.Viper { cfg.SetConfigName("config") cfg.SetConfigType("yaml") - cfg.SetDefault("datastore.badger.path", "data") - cfg.SetDefault("net.pubSubEnabled", true) - cfg.SetDefault("net.relay", false) - cfg.SetDefault("log.caller", false) - + for key, val := range configDefaults { + cfg.SetDefault(key, val) + } return cfg } // createConfig writes the default config file if one does not exist. -func createConfig(rootdir string) error { +func createConfig(rootdir string, flags *pflag.FlagSet) error { cfg := defaultConfig() cfg.AddConfigPath(rootdir) - if err := bindConfigFlags(cfg); err != nil { + if err := bindConfigFlags(cfg, flags); err != nil { return err } // make sure rootdir exists @@ -107,7 +131,7 @@ func createConfig(rootdir string) error { } // loadConfig returns a new config with values from the config in the given rootdir. -func loadConfig(rootdir string) (*viper.Viper, error) { +func loadConfig(rootdir string, flags *pflag.FlagSet) (*viper.Viper, error) { cfg := defaultConfig() cfg.AddConfigPath(rootdir) @@ -120,7 +144,7 @@ func loadConfig(rootdir string) (*viper.Viper, error) { return nil, err } // bind cli flags to config keys - if err := bindConfigFlags(cfg); err != nil { + if err := bindConfigFlags(cfg, flags); err != nil { return nil, err } @@ -149,12 +173,9 @@ func loadConfig(rootdir string) (*viper.Viper, error) { } // bindConfigFlags binds the set of cli flags to config values. -func bindConfigFlags(cfg *viper.Viper) error { +func bindConfigFlags(cfg *viper.Viper, flags *pflag.FlagSet) error { var errs []error - rootFlags.VisitAll(func(f *pflag.Flag) { - errs = append(errs, cfg.BindPFlag(configFlags[f.Name], f)) - }) - startFlags.VisitAll(func(f *pflag.Flag) { + flags.VisitAll(func(f *pflag.Flag) { errs = append(errs, cfg.BindPFlag(configFlags[f.Name], f)) }) return errors.Join(errs...) diff --git a/cli/config_test.go b/cli/config_test.go index 8cc5f62bdb..d3f6d954e3 100644 --- a/cli/config_test.go +++ b/cli/config_test.go @@ -14,17 +14,20 @@ import ( "path/filepath" "testing" + "github.com/spf13/pflag" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestCreateConfig(t *testing.T) { rootdir := t.TempDir() - err := createConfig(rootdir) + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + err := createConfig(rootdir, flags) require.NoError(t, err) // ensure no errors when config already exists - err = createConfig(rootdir) + err = createConfig(rootdir, flags) require.NoError(t, err) assert.FileExists(t, filepath.Join(rootdir, "config.yaml")) @@ -32,7 +35,9 @@ func TestCreateConfig(t *testing.T) { func TestLoadConfigNotExist(t *testing.T) { rootdir := t.TempDir() - cfg, err := loadConfig(rootdir) + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + cfg, err := loadConfig(rootdir, flags) require.NoError(t, err) assert.Equal(t, 5, cfg.GetInt("datastore.maxtxnretries")) diff --git a/cli/root.go b/cli/root.go index 37488f9549..51bf0e2ed6 100644 --- a/cli/root.go +++ b/cli/root.go @@ -12,98 +12,90 @@ package cli import ( "github.com/spf13/cobra" - "github.com/spf13/pflag" ) -// rootFlags is a set of persistent flags that are bound to config values. -var rootFlags = pflag.NewFlagSet("root", pflag.ContinueOnError) +func MakeRootCommand() *cobra.Command { + var cmd = &cobra.Command{ + SilenceUsage: true, + Use: "defradb", + Short: "DefraDB Edge Database", + Long: `DefraDB is the edge database to power the user-centric future. -func init() { - rootFlags.String( +Start a DefraDB node, interact with a local or remote node, and much more. +`, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if err := setContextRootDir(cmd); err != nil { + return err + } + return setContextConfig(cmd) + }, + } + // set default flag values from config + cfg := defaultConfig() + cmd.PersistentFlags().String( "rootdir", "", "Directory for persistent data (default: $HOME/.defradb)", ) - rootFlags.String( + cmd.PersistentFlags().String( "log-level", - "info", + cfg.GetString(configFlags["log-level"]), "Log level to use. Options are debug, info, error, fatal", ) - rootFlags.String( + cmd.PersistentFlags().String( "log-output", - "stderr", + cfg.GetString(configFlags["log-output"]), "Log output path. Options are stderr or stdout.", ) - rootFlags.String( + cmd.PersistentFlags().String( "log-format", - "text", + cfg.GetString(configFlags["log-format"]), "Log format to use. Options are text or json", ) - rootFlags.Bool( + cmd.PersistentFlags().Bool( "log-stacktrace", - false, + cfg.GetBool(configFlags["log-stacktrace"]), "Include stacktrace in error and fatal logs", ) - rootFlags.Bool( + cmd.PersistentFlags().Bool( "log-source", - false, + cfg.GetBool(configFlags["log-source"]), "Include source location in logs", ) - rootFlags.String( + cmd.PersistentFlags().String( "log-overrides", - "", + cfg.GetString(configFlags["log-overrides"]), "Logger config overrides. Format ,=,...;,...", ) - rootFlags.Bool( + cmd.PersistentFlags().Bool( "no-log-color", - false, + cfg.GetBool(configFlags["no-log-color"]), "Disable colored log output", ) - rootFlags.String( + cmd.PersistentFlags().String( "url", - "127.0.0.1:9181", + cfg.GetString(configFlags["url"]), "URL of HTTP endpoint to listen on or connect to", ) - rootFlags.String( + cmd.PersistentFlags().String( "keyring-namespace", - "defradb", + cfg.GetString(configFlags["keyring-namespace"]), "Service name to use when using the system backend", ) - rootFlags.String( + cmd.PersistentFlags().String( "keyring-backend", - "file", + cfg.GetString(configFlags["keyring-backend"]), "Keyring backend to use. Options are file or system", ) - rootFlags.String( + cmd.PersistentFlags().String( "keyring-path", - "keys", + cfg.GetString(configFlags["keyring-path"]), "Path to store encrypted keys when using the file backend", ) - rootFlags.Bool( + cmd.PersistentFlags().Bool( "no-keyring", - false, + cfg.GetBool(configFlags["no-keyring"]), "Disable the keyring and generate ephemeral keys", ) -} - -func MakeRootCommand() *cobra.Command { - var cmd = &cobra.Command{ - SilenceUsage: true, - Use: "defradb", - Short: "DefraDB Edge Database", - Long: `DefraDB is the edge database to power the user-centric future. - -Start a DefraDB node, interact with a local or remote node, and much more. -`, - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if err := setContextRootDir(cmd); err != nil { - return err - } - return setContextConfig(cmd) - }, - } - - cmd.PersistentFlags().AddFlagSet(rootFlags) - return cmd } diff --git a/cli/start.go b/cli/start.go index 4ae60b2bb0..ef0067abef 100644 --- a/cli/start.go +++ b/cli/start.go @@ -18,7 +18,6 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/spf13/cobra" - "github.com/spf13/pflag" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/http" @@ -29,57 +28,6 @@ import ( "github.com/sourcenetwork/defradb/node" ) -// startFlags is a set of persistent flags that are bound to config values. -var startFlags = pflag.NewFlagSet("start", pflag.ContinueOnError) - -func init() { - startFlags.StringArray( - "peers", - []string{}, - "List of peers to connect to", - ) - startFlags.Int( - "max-txn-retries", - 5, - "Specify the maximum number of retries per transaction", - ) - startFlags.String( - "store", - "badger", - "Specify the datastore to use (supported: badger, memory)", - ) - startFlags.Int( - "valuelogfilesize", - 1<<30, - "Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize", - ) - startFlags.StringSlice( - "p2paddr", - []string{"/ip4/127.0.0.1/tcp/9171"}, - "Listen addresses for the p2p network (formatted as a libp2p MultiAddr)", - ) - startFlags.Bool( - "no-p2p", - false, - "Disable the peer-to-peer network synchronization system", - ) - startFlags.StringArray( - "allowed-origins", - []string{}, - "List of origins to allow for CORS requests", - ) - startFlags.String( - "pubkeypath", - "", - "Path to the public key for tls", - ) - startFlags.String( - "privkeypath", - "", - "Path to the private key for tls", - ) -} - func MakeStartCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "start", @@ -91,7 +39,7 @@ func MakeStartCommand() *cobra.Command { return err } rootdir := mustGetContextRootDir(cmd) - if err := createConfig(rootdir); err != nil { + if err := createConfig(rootdir, cmd.Flags()); err != nil { return err } return setContextConfig(cmd) @@ -186,8 +134,52 @@ func MakeStartCommand() *cobra.Command { return nil }, } - - cmd.PersistentFlags().AddFlagSet(startFlags) - + // set default flag values from config + cfg := defaultConfig() + cmd.PersistentFlags().StringArray( + "peers", + cfg.GetStringSlice(configFlags["peers"]), + "List of peers to connect to", + ) + cmd.PersistentFlags().Int( + "max-txn-retries", + cfg.GetInt(configFlags["max-txn-retries"]), + "Specify the maximum number of retries per transaction", + ) + cmd.PersistentFlags().String( + "store", + cfg.GetString(configFlags["store"]), + "Specify the datastore to use (supported: badger, memory)", + ) + cmd.PersistentFlags().Int( + "valuelogfilesize", + cfg.GetInt(configFlags["valuelogfilesize"]), + "Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize", + ) + cmd.PersistentFlags().StringSlice( + "p2paddr", + cfg.GetStringSlice(configFlags["p2paddr"]), + "Listen addresses for the p2p network (formatted as a libp2p MultiAddr)", + ) + cmd.PersistentFlags().Bool( + "no-p2p", + cfg.GetBool(configFlags["no-p2p"]), + "Disable the peer-to-peer network synchronization system", + ) + cmd.PersistentFlags().StringArray( + "allowed-origins", + cfg.GetStringSlice(configFlags["allowed-origins"]), + "List of origins to allow for CORS requests", + ) + cmd.PersistentFlags().String( + "pubkeypath", + cfg.GetString(configFlags["pubkeypath"]), + "Path to the public key for tls", + ) + cmd.PersistentFlags().String( + "privkeypath", + cfg.GetString(configFlags["privkeypath"]), + "Path to the private key for tls", + ) return cmd } diff --git a/cli/utils.go b/cli/utils.go index c82bba0990..afd941bc5c 100644 --- a/cli/utils.go +++ b/cli/utils.go @@ -116,7 +116,7 @@ func setContextDB(cmd *cobra.Command) error { // setContextConfig sets teh config for the current command context. func setContextConfig(cmd *cobra.Command) error { rootdir := mustGetContextRootDir(cmd) - cfg, err := loadConfig(rootdir) + cfg, err := loadConfig(rootdir, cmd.Flags()) if err != nil { return err } From 6e9688892a7f8362f0f1ba6d6c37ba21da9e0d75 Mon Sep 17 00:00:00 2001 From: Fred Carle Date: Thu, 13 Jun 2024 13:14:47 -0400 Subject: [PATCH 45/78] chore(i): Update pubsub rpc version (#2715) ## Relevant issue(s) Resolves #2714 ## Description This is a simple update of the pubsub-rpc version. A fix was done on our fork of this package that removes the annoying error log that we would get. ``` 2024-06-12T20:51:37.954-0400 ERROR psrpc go-libp2p-pubsub-rpc@v0.0.13/rpc.go:350 subcription message handler: decoding response id: invalid cid: cid too short ``` --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index a2701648d5..fc838114a6 100644 --- a/go.mod +++ b/go.mod @@ -38,7 +38,7 @@ require ( github.com/multiformats/go-multihash v0.2.3 github.com/sourcenetwork/badger/v4 v4.2.1-0.20231113215945-a63444ca5276 github.com/sourcenetwork/corelog v0.0.7 - github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.13 + github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.14 github.com/sourcenetwork/graphql-go v0.7.10-0.20231113214537-a9560c1898dd github.com/sourcenetwork/immutable v0.3.0 github.com/sourcenetwork/sourcehub v0.2.1-0.20240305165631-9b75b1000724 diff --git a/go.sum b/go.sum index 13d7dc3838..23410e3394 100644 --- a/go.sum +++ b/go.sum @@ -1073,8 +1073,8 @@ github.com/sourcenetwork/badger/v4 v4.2.1-0.20231113215945-a63444ca5276 h1:TpQDD github.com/sourcenetwork/badger/v4 v4.2.1-0.20231113215945-a63444ca5276/go.mod h1:lxiZTDBw0vheFMqSwX2OvB6RTDI1+/UtVCSU4rpThFM= github.com/sourcenetwork/corelog v0.0.7 h1:vztssVAUDcsYN5VUOW3PKYhLprHfzoc8UbKewQuD1qw= github.com/sourcenetwork/corelog v0.0.7/go.mod h1:cMabHgs3kARgYTQeQYSOmaGGP8XMU6sZrHd8LFrL3zA= -github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.13 h1:d/PeGZutd5NcDr6ltAv8ubN5PxsHMp1YUnhHY/QCWB4= -github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.13/go.mod h1:jUoQv592uUX1u7QBjAY4C+l24X9ArhPfifOqXpDHz4U= +github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.14 h1:620zKV4rOn7U5j/WsPkk4SFj0z9/pVV4bBx0BpZQgro= +github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.14/go.mod h1:jUoQv592uUX1u7QBjAY4C+l24X9ArhPfifOqXpDHz4U= github.com/sourcenetwork/graphql-go v0.7.10-0.20231113214537-a9560c1898dd h1:lmpW39/8wPJ0khWRhOcj7Bj0HYKbSmQ8rXMJw1cMB8U= github.com/sourcenetwork/graphql-go v0.7.10-0.20231113214537-a9560c1898dd/go.mod h1:rkahXkgRH/3vZErN1Bx+qt1+w+CV5fgaJyKKWgISe4U= github.com/sourcenetwork/immutable v0.3.0 h1:gHPtGvLrTBTK5YpDAhMU+u+S8v1F6iYmc3nbZLryMdc= From 4ee61f7bdefe6434c9c8934139d1b81f331e74d4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 14 Jun 2024 15:53:02 -0400 Subject: [PATCH 46/78] bot: Bump braces from 3.0.2 to 3.0.3 in /playground (#2716) Bumps [braces](https://github.com/micromatch/braces) from 3.0.2 to 3.0.3.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=braces&package-manager=npm_and_yarn&previous-version=3.0.2&new-version=3.0.3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sourcenetwork/defradb/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- playground/package-lock.json | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/playground/package-lock.json b/playground/package-lock.json index e5a7db5d6e..216c6f78c9 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -2789,12 +2789,12 @@ } }, "node_modules/braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", "dev": true, "dependencies": { - "fill-range": "^7.0.1" + "fill-range": "^7.1.1" }, "engines": { "node": ">=8" @@ -3537,9 +3537,9 @@ } }, "node_modules/fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", "dev": true, "dependencies": { "to-regex-range": "^5.0.1" From 2b701544c4bd0ac15b8ef43e9f08c4ecd46995f8 Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Mon, 17 Jun 2024 09:15:37 -0400 Subject: [PATCH 47/78] refactor: Rework definition validation (#2720) ## Relevant issue(s) Resolves #2537 ## Description Reworks definition validation, standardizing the rule signatures, allowing rule reuse across different contexts, and hopefully improving their readability. Performance of the rules will have decreased slightly, but on col/schema update that is unimportant, performance of `createCollections` (called when creating via SDL docs) has probably improved slightly due to a reduction in datastore calls. --- internal/db/collection_define.go | 169 ++-- internal/db/definition_validation.go | 829 ++++++++++++------ internal/db/errors.go | 14 +- internal/db/schema.go | 59 +- internal/db/view.go | 36 +- .../updates/copy/name_test.go | 2 +- .../updates/replace/name_test.go | 2 +- tests/integration/schema/get_schema_test.go | 12 +- .../schema/migrations/query/simple_test.go | 42 +- .../migrations/query/with_doc_id_test.go | 4 +- .../migrations/query/with_inverse_test.go | 6 +- .../query/with_p2p_schema_branch_test.go | 2 +- .../schema/migrations/query/with_p2p_test.go | 14 +- .../migrations/query/with_restart_test.go | 4 +- .../migrations/query/with_set_default_test.go | 6 +- .../schema/migrations/query/with_txn_test.go | 4 +- .../migrations/query/with_update_test.go | 4 +- .../schema/migrations/simple_test.go | 4 +- tests/integration/schema/one_one_test.go | 4 +- .../updates/add/field/create_update_test.go | 4 +- .../schema/updates/add/field/simple_test.go | 10 +- .../schema/updates/add/simple_test.go | 2 +- .../schema/updates/copy/field/simple_test.go | 2 +- .../schema/updates/copy/simple_test.go | 2 +- .../schema/updates/remove/simple_test.go | 4 +- .../schema/updates/replace/simple_test.go | 2 +- .../schema/updates/with_schema_branch_test.go | 28 +- .../schema/with_update_set_default_test.go | 2 +- 28 files changed, 807 insertions(+), 466 deletions(-) diff --git a/internal/db/collection_define.go b/internal/db/collection_define.go index 4712911399..a8b9fe9abd 100644 --- a/internal/db/collection_define.go +++ b/internal/db/collection_define.go @@ -25,113 +25,126 @@ import ( "github.com/sourcenetwork/defradb/internal/db/description" ) -func (db *db) createCollection( +func (db *db) createCollections( ctx context.Context, - def client.CollectionDefinition, newDefinitions []client.CollectionDefinition, -) (client.Collection, error) { - schema := def.Schema - desc := def.Description - txn := mustGetContextTxn(ctx) - - if desc.Name.HasValue() { - exists, err := description.HasCollectionByName(ctx, txn, desc.Name.Value()) - if err != nil { - return nil, err - } - if exists { - return nil, ErrCollectionAlreadyExists - } - } +) ([]client.CollectionDefinition, error) { + returnDescriptions := make([]client.CollectionDefinition, len(newDefinitions)) existingDefinitions, err := db.getAllActiveDefinitions(ctx) if err != nil { return nil, err } - schemaByName := map[string]client.SchemaDescription{} - for _, existingDefinition := range existingDefinitions { - schemaByName[existingDefinition.Schema.Name] = existingDefinition.Schema - } - for _, newDefinition := range newDefinitions { - schemaByName[newDefinition.Schema.Name] = newDefinition.Schema - } + txn := mustGetContextTxn(ctx) - _, err = validateUpdateSchemaFields(schemaByName, client.SchemaDescription{}, schema) - if err != nil { - return nil, err - } + for i, def := range newDefinitions { + schemaByName := map[string]client.SchemaDescription{} + for _, existingDefinition := range existingDefinitions { + schemaByName[existingDefinition.Schema.Name] = existingDefinition.Schema + } + for _, newDefinition := range newDefinitions { + schemaByName[newDefinition.Schema.Name] = newDefinition.Schema + } - definitionsByName := map[string]client.CollectionDefinition{} - for _, existingDefinition := range existingDefinitions { - definitionsByName[existingDefinition.GetName()] = existingDefinition - } - for _, newDefinition := range newDefinitions { - definitionsByName[newDefinition.GetName()] = newDefinition - } - err = db.validateNewCollection(def, definitionsByName) - if err != nil { - return nil, err + schema, err := description.CreateSchemaVersion(ctx, txn, def.Schema) + if err != nil { + return nil, err + } + newDefinitions[i].Description.SchemaVersionID = schema.VersionID + newDefinitions[i].Schema = schema } - colSeq, err := db.getSequence(ctx, core.CollectionIDSequenceKey{}) - if err != nil { - return nil, err - } - colID, err := colSeq.next(ctx) - if err != nil { - return nil, err - } + for i, def := range newDefinitions { + if len(def.Description.Fields) == 0 { + // This is a schema-only definition, we should not create a collection for it + continue + } - fieldSeq, err := db.getSequence(ctx, core.NewFieldIDSequenceKey(uint32(colID))) - if err != nil { - return nil, err - } + colSeq, err := db.getSequence(ctx, core.CollectionIDSequenceKey{}) + if err != nil { + return nil, err + } + colID, err := colSeq.next(ctx) + if err != nil { + return nil, err + } - desc.ID = uint32(colID) - desc.RootID = desc.ID + fieldSeq, err := db.getSequence(ctx, core.NewFieldIDSequenceKey(uint32(colID))) + if err != nil { + return nil, err + } - schema, err = description.CreateSchemaVersion(ctx, txn, schema) - if err != nil { - return nil, err - } - desc.SchemaVersionID = schema.VersionID - for _, localField := range desc.Fields { - var fieldID uint64 - if localField.Name == request.DocIDFieldName { - // There is no hard technical requirement for this, we just think it looks nicer - // if the doc id is at the zero index. It makes it look a little nicer in commit - // queries too. - fieldID = 0 - } else { - fieldID, err = fieldSeq.next(ctx) - if err != nil { - return nil, err + newDefinitions[i].Description.ID = uint32(colID) + newDefinitions[i].Description.RootID = newDefinitions[i].Description.ID + + for _, localField := range def.Description.Fields { + var fieldID uint64 + if localField.Name == request.DocIDFieldName { + // There is no hard technical requirement for this, we just think it looks nicer + // if the doc id is at the zero index. It makes it look a little nicer in commit + // queries too. + fieldID = 0 + } else { + fieldID, err = fieldSeq.next(ctx) + if err != nil { + return nil, err + } } - } - for i := range desc.Fields { - if desc.Fields[i].Name == localField.Name { - desc.Fields[i].ID = client.FieldID(fieldID) - break + for j := range def.Description.Fields { + if def.Description.Fields[j].Name == localField.Name { + newDefinitions[i].Description.Fields[j].ID = client.FieldID(fieldID) + break + } } } } - desc, err = description.SaveCollection(ctx, txn, desc) + err = db.validateNewCollection( + ctx, + append( + append( + []client.CollectionDefinition{}, + newDefinitions..., + ), + existingDefinitions..., + ), + existingDefinitions, + ) if err != nil { return nil, err } - col := db.newCollection(desc, schema) + for _, def := range newDefinitions { + if len(def.Description.Fields) == 0 { + // This is a schema-only definition, we should not create a collection for it + returnDescriptions = append(returnDescriptions, def) + continue + } + + desc, err := description.SaveCollection(ctx, txn, def.Description) + if err != nil { + return nil, err + } + + col := db.newCollection(desc, def.Schema) + + for _, index := range desc.Indexes { + if _, err := col.createIndex(ctx, index); err != nil { + return nil, err + } + } - for _, index := range desc.Indexes { - if _, err := col.createIndex(ctx, index); err != nil { + result, err := db.getCollectionByID(ctx, desc.ID) + if err != nil { return nil, err } + + returnDescriptions = append(returnDescriptions, result.Definition()) } - return db.getCollectionByID(ctx, desc.ID) + return returnDescriptions, nil } func (db *db) patchCollection( @@ -171,7 +184,7 @@ func (db *db) patchCollection( return err } - err = db.validateCollectionChanges(existingColsByID, newColsByID) + err = db.validateCollectionChanges(ctx, cols, newColsByID) if err != nil { return err } diff --git a/internal/db/definition_validation.go b/internal/db/definition_validation.go index 988ebeb15c..08e6e603a7 100644 --- a/internal/db/definition_validation.go +++ b/internal/db/definition_validation.go @@ -14,18 +14,88 @@ import ( "context" "reflect" - "github.com/sourcenetwork/immutable" - "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" ) -var patchCollectionValidators = []func( - map[uint32]client.CollectionDescription, - map[uint32]client.CollectionDescription, -) error{ - validateCollectionNameUnique, - validateSingleVersionActive, +// definitionState holds collection and schema descriptions in easily accessible +// sets. +// +// It is read only and will not and should not be mutated. +type definitionState struct { + collections []client.CollectionDescription + collectionsByID map[uint32]client.CollectionDescription + + schemaByID map[string]client.SchemaDescription + schemaByName map[string]client.SchemaDescription + + definitionsByName map[string]client.CollectionDefinition +} + +// newDefinitionState creates a new definitionState object given the provided +// descriptions. +func newDefinitionState( + collections []client.CollectionDescription, + schemasByID map[string]client.SchemaDescription, +) *definitionState { + collectionsByID := map[uint32]client.CollectionDescription{} + definitionsByName := map[string]client.CollectionDefinition{} + schemaByName := map[string]client.SchemaDescription{} + schemaVersionsAdded := map[string]struct{}{} + + for _, col := range collections { + if len(col.Fields) == 0 { + continue + } + + schema := schemasByID[col.SchemaVersionID] + definition := client.CollectionDefinition{ + Description: col, + Schema: schema, + } + + definitionsByName[definition.GetName()] = definition + schemaVersionsAdded[schema.VersionID] = struct{}{} + collectionsByID[col.ID] = col + } + + for _, schema := range schemasByID { + schemaByName[schema.Name] = schema + + if _, ok := schemaVersionsAdded[schema.VersionID]; ok { + continue + } + + definitionsByName[schema.Name] = client.CollectionDefinition{ + Schema: schema, + } + } + + return &definitionState{ + collections: collections, + collectionsByID: collectionsByID, + schemaByID: schemasByID, + schemaByName: schemaByName, + definitionsByName: definitionsByName, + } +} + +// definitionValidator aliases the signature that all schema and collection +// validation functions should follow. +type definitionValidator = func( + ctx context.Context, + db *db, + newState *definitionState, + oldState *definitionState, +) error + +// createOnlyValidators are executed on the creation of new descriptions only +// they will not be executed for updates to existing records. +var createOnlyValidators = []definitionValidator{} + +// createOnlyValidators are executed on the update of existing descriptions only +// they will not be executed for new records. +var updateOnlyValidators = []definitionValidator{ validateSourcesNotRedefined, validateIndexesNotModified, validateFieldsNotModified, @@ -36,23 +106,53 @@ var patchCollectionValidators = []func( validateRootIDNotMutated, validateSchemaVersionIDNotMutated, validateCollectionNotRemoved, + validateSingleVersionActive, + validateSchemaNotAdded, + validateSchemaFieldNotDeleted, + validateFieldNotMutated, + validateFieldNotMoved, } -var newCollectionValidators = []func( - client.CollectionDefinition, - map[string]client.CollectionDefinition, -) error{ - validateSecondaryFieldsPairUp, +// globalValidators are run on create and update of records. +var globalValidators = []definitionValidator{ + validateCollectionNameUnique, validateRelationPointsToValidKind, + validateSecondaryFieldsPairUp, validateSingleSidePrimary, + validateCollectionDefinitionPolicyDesc, + validateSchemaNameNotEmpty, + validateRelationalFieldIDType, + validateSecondaryNotOnSchema, + validateTypeSupported, + validateTypeAndKindCompatible, + validateFieldNotDuplicated, } +var updateValidators = append( + append([]definitionValidator{}, updateOnlyValidators...), + globalValidators..., +) + +var createValidators = append( + append([]definitionValidator{}, createOnlyValidators...), + globalValidators..., +) + func (db *db) validateCollectionChanges( - oldColsByID map[uint32]client.CollectionDescription, + ctx context.Context, + oldCols []client.CollectionDescription, newColsByID map[uint32]client.CollectionDescription, ) error { - for _, validators := range patchCollectionValidators { - err := validators(oldColsByID, newColsByID) + newCols := make([]client.CollectionDescription, 0, len(newColsByID)) + for _, col := range newColsByID { + newCols = append(newCols, col) + } + + newState := newDefinitionState(newCols, map[string]client.SchemaDescription{}) + oldState := newDefinitionState(oldCols, map[string]client.SchemaDescription{}) + + for _, validator := range updateValidators { + err := validator(ctx, db, newState, oldState) if err != nil { return err } @@ -62,11 +162,38 @@ func (db *db) validateCollectionChanges( } func (db *db) validateNewCollection( - def client.CollectionDefinition, - defsByName map[string]client.CollectionDefinition, + ctx context.Context, + newDefinitions []client.CollectionDefinition, + oldDefinitions []client.CollectionDefinition, ) error { - for _, validators := range newCollectionValidators { - err := validators(def, defsByName) + newCollections := []client.CollectionDescription{} + newSchemasByID := map[string]client.SchemaDescription{} + + for _, def := range newDefinitions { + if len(def.Description.Fields) != 0 { + newCollections = append(newCollections, def.Description) + } + + newSchemasByID[def.Schema.VersionID] = def.Schema + } + + newState := newDefinitionState(newCollections, newSchemasByID) + + oldCollections := []client.CollectionDescription{} + oldSchemasByID := map[string]client.SchemaDescription{} + + for _, def := range oldDefinitions { + if len(def.Description.Fields) != 0 { + oldCollections = append(oldCollections, def.Description) + } + + oldSchemasByID[def.Schema.VersionID] = def.Schema + } + + oldState := newDefinitionState(oldCollections, oldSchemasByID) + + for _, validator := range createValidators { + err := validator(ctx, db, newState, oldState) if err != nil { return err } @@ -75,75 +202,134 @@ func (db *db) validateNewCollection( return nil } -func validateRelationPointsToValidKind( - def client.CollectionDefinition, - defsByName map[string]client.CollectionDefinition, +func (db *db) validateSchemaUpdate( + ctx context.Context, + newSchemaByName map[string]client.SchemaDescription, + oldSchemaByName map[string]client.SchemaDescription, ) error { - for _, field := range def.Description.Fields { - if !field.Kind.HasValue() { - continue - } + newSchemaByID := make(map[string]client.SchemaDescription, len(newSchemaByName)) + oldSchemaByID := make(map[string]client.SchemaDescription, len(oldSchemaByName)) + for _, schema := range newSchemaByName { + newSchemaByID[schema.VersionID] = schema + } + for _, schema := range oldSchemaByName { + oldSchemaByID[schema.VersionID] = schema + } - if !field.Kind.Value().IsObject() { - continue - } + newState := newDefinitionState([]client.CollectionDescription{}, newSchemaByID) + oldState := newDefinitionState([]client.CollectionDescription{}, oldSchemaByID) - underlying := field.Kind.Value().Underlying() - _, ok := defsByName[underlying] - if !ok { - return NewErrFieldKindNotFound(field.Name, underlying) + for _, validator := range updateValidators { + err := validator(ctx, db, newState, oldState) + if err != nil { + return err } } return nil } -func validateSecondaryFieldsPairUp( - def client.CollectionDefinition, - defsByName map[string]client.CollectionDefinition, +func validateRelationPointsToValidKind( + ctx context.Context, + db *db, + newState *definitionState, + oldState *definitionState, ) error { - for _, field := range def.Description.Fields { - if !field.Kind.HasValue() { - continue - } + for _, newCollection := range newState.collections { + for _, field := range newCollection.Fields { + if !field.Kind.HasValue() { + continue + } - if !field.Kind.Value().IsObject() { - continue - } + if !field.Kind.Value().IsObject() { + continue + } - if !field.RelationName.HasValue() { - continue + underlying := field.Kind.Value().Underlying() + _, ok := newState.definitionsByName[underlying] + if !ok { + return NewErrFieldKindNotFound(field.Name, underlying) + } } + } - _, hasSchemaField := def.Schema.GetFieldByName(field.Name) - if hasSchemaField { - continue + for _, schema := range newState.schemaByName { + for _, field := range schema.Fields { + if !field.Kind.IsObject() { + continue + } + + underlying := field.Kind.Underlying() + _, ok := newState.definitionsByName[underlying] + if !ok { + return NewErrFieldKindNotFound(field.Name, underlying) + } } + } + + return nil +} - underlying := field.Kind.Value().Underlying() - otherDef, ok := defsByName[underlying] +func validateSecondaryFieldsPairUp( + ctx context.Context, + db *db, + newState *definitionState, + oldState *definitionState, +) error { + for _, newCollection := range newState.collections { + schema, ok := newState.schemaByID[newCollection.SchemaVersionID] if !ok { continue } - if len(otherDef.Description.Fields) == 0 { - // Views/embedded objects do not require both sides of the relation to be defined. - continue + definition := client.CollectionDefinition{ + Description: newCollection, + Schema: schema, } - otherField, ok := otherDef.Description.GetFieldByRelation( - field.RelationName.Value(), - def.GetName(), - field.Name, - ) - if !ok { - return NewErrRelationMissingField(underlying, field.RelationName.Value()) - } + for _, field := range newCollection.Fields { + if !field.Kind.HasValue() { + continue + } - _, ok = otherDef.Schema.GetFieldByName(otherField.Name) - if !ok { - // This secondary is paired with another secondary, which is invalid - return NewErrRelationMissingField(underlying, field.RelationName.Value()) + if !field.Kind.Value().IsObject() { + continue + } + + if !field.RelationName.HasValue() { + continue + } + + _, hasSchemaField := schema.GetFieldByName(field.Name) + if hasSchemaField { + continue + } + + underlying := field.Kind.Value().Underlying() + otherDef, ok := newState.definitionsByName[underlying] + if !ok { + continue + } + + if len(otherDef.Description.Fields) == 0 { + // Views/embedded objects do not require both sides of the relation to be defined. + continue + } + + otherField, ok := otherDef.Description.GetFieldByRelation( + field.RelationName.Value(), + definition.GetName(), + field.Name, + ) + if !ok { + return NewErrRelationMissingField(underlying, field.RelationName.Value()) + } + + _, ok = otherDef.Schema.GetFieldByName(otherField.Name) + if !ok { + // This secondary is paired with another secondary, which is invalid + return NewErrRelationMissingField(underlying, field.RelationName.Value()) + } } } @@ -151,48 +337,57 @@ func validateSecondaryFieldsPairUp( } func validateSingleSidePrimary( - def client.CollectionDefinition, - defsByName map[string]client.CollectionDefinition, + ctx context.Context, + db *db, + newState *definitionState, + oldState *definitionState, ) error { - for _, field := range def.Description.Fields { - if !field.Kind.HasValue() { + for _, newCollection := range newState.collections { + schema, ok := newState.schemaByID[newCollection.SchemaVersionID] + if !ok { continue } - if !field.Kind.Value().IsObject() { - continue + definition := client.CollectionDefinition{ + Description: newCollection, + Schema: schema, } - if !field.RelationName.HasValue() { - continue - } + for _, field := range definition.GetFields() { + if !field.Kind.IsObject() { + continue + } - _, hasSchemaField := def.Schema.GetFieldByName(field.Name) - if !hasSchemaField { - // This is a secondary field and thus passes this rule - continue - } + if field.RelationName == "" { + continue + } - underlying := field.Kind.Value().Underlying() - otherDef, ok := defsByName[underlying] - if !ok { - continue - } + if !field.IsPrimaryRelation { + // This is a secondary field and thus passes this rule + continue + } - otherField, ok := otherDef.Description.GetFieldByRelation( - field.RelationName.Value(), - def.GetName(), - field.Name, - ) - if !ok { - // This must be a one-sided relation, in which case it passes this rule - continue - } + underlying := field.Kind.Underlying() + otherDef, ok := newState.definitionsByName[underlying] + if !ok { + continue + } + + otherField, ok := otherDef.Description.GetFieldByRelation( + field.RelationName, + definition.GetName(), + field.Name, + ) + if !ok { + // This must be a one-sided relation, in which case it passes this rule + continue + } - _, ok = otherDef.Schema.GetFieldByName(otherField.Name) - if ok { - // This primary is paired with another primary, which is invalid - return ErrMultipleRelationPrimaries + _, ok = otherDef.Schema.GetFieldByName(otherField.Name) + if ok { + // This primary is paired with another primary, which is invalid + return ErrMultipleRelationPrimaries + } } } @@ -200,11 +395,13 @@ func validateSingleSidePrimary( } func validateCollectionNameUnique( - oldColsByID map[uint32]client.CollectionDescription, - newColsByID map[uint32]client.CollectionDescription, + ctx context.Context, + db *db, + newState *definitionState, + oldState *definitionState, ) error { names := map[string]struct{}{} - for _, col := range newColsByID { + for _, col := range newState.collections { if !col.Name.HasValue() { continue } @@ -219,11 +416,13 @@ func validateCollectionNameUnique( } func validateSingleVersionActive( - oldColsByID map[uint32]client.CollectionDescription, - newColsByID map[uint32]client.CollectionDescription, + ctx context.Context, + db *db, + newState *definitionState, + oldState *definitionState, ) error { rootsWithActiveCol := map[uint32]struct{}{} - for _, col := range newColsByID { + for _, col := range newState.collections { if !col.Name.HasValue() { continue } @@ -243,11 +442,13 @@ func validateSingleVersionActive( // Currently new sources cannot be added, existing cannot be removed, and CollectionSources // cannot be redirected to other collections. func validateSourcesNotRedefined( - oldColsByID map[uint32]client.CollectionDescription, - newColsByID map[uint32]client.CollectionDescription, + ctx context.Context, + db *db, + newState *definitionState, + oldState *definitionState, ) error { - for _, newCol := range newColsByID { - oldCol, ok := oldColsByID[newCol.ID] + for _, newCol := range newState.collections { + oldCol, ok := oldState.collectionsByID[newCol.ID] if !ok { continue } @@ -281,11 +482,13 @@ func validateSourcesNotRedefined( } func validateIndexesNotModified( - oldColsByID map[uint32]client.CollectionDescription, - newColsByID map[uint32]client.CollectionDescription, + ctx context.Context, + db *db, + newState *definitionState, + oldState *definitionState, ) error { - for _, newCol := range newColsByID { - oldCol, ok := oldColsByID[newCol.ID] + for _, newCol := range newState.collections { + oldCol, ok := oldState.collectionsByID[newCol.ID] if !ok { continue } @@ -300,11 +503,13 @@ func validateIndexesNotModified( } func validateFieldsNotModified( - oldColsByID map[uint32]client.CollectionDescription, - newColsByID map[uint32]client.CollectionDescription, + ctx context.Context, + db *db, + newState *definitionState, + oldState *definitionState, ) error { - for _, newCol := range newColsByID { - oldCol, ok := oldColsByID[newCol.ID] + for _, newCol := range newState.collections { + oldCol, ok := oldState.collectionsByID[newCol.ID] if !ok { continue } @@ -319,11 +524,13 @@ func validateFieldsNotModified( } func validatePolicyNotModified( - oldColsByID map[uint32]client.CollectionDescription, - newColsByID map[uint32]client.CollectionDescription, + ctx context.Context, + db *db, + newState *definitionState, + oldState *definitionState, ) error { - for _, newCol := range newColsByID { - oldCol, ok := oldColsByID[newCol.ID] + for _, newCol := range newState.collections { + oldCol, ok := oldState.collectionsByID[newCol.ID] if !ok { continue } @@ -338,10 +545,12 @@ func validatePolicyNotModified( } func validateIDNotZero( - oldColsByID map[uint32]client.CollectionDescription, - newColsByID map[uint32]client.CollectionDescription, + ctx context.Context, + db *db, + newState *definitionState, + oldState *definitionState, ) error { - for _, newCol := range newColsByID { + for _, newCol := range newState.collections { if newCol.ID == 0 { return ErrCollectionIDCannotBeZero } @@ -351,11 +560,13 @@ func validateIDNotZero( } func validateIDUnique( - oldColsByID map[uint32]client.CollectionDescription, - newColsByID map[uint32]client.CollectionDescription, + ctx context.Context, + db *db, + newState *definitionState, + oldState *definitionState, ) error { colIds := map[uint32]struct{}{} - for _, newCol := range newColsByID { + for _, newCol := range newState.collections { if _, ok := colIds[newCol.ID]; ok { return NewErrCollectionIDAlreadyExists(newCol.ID) } @@ -366,11 +577,13 @@ func validateIDUnique( } func validateIDExists( - oldColsByID map[uint32]client.CollectionDescription, - newColsByID map[uint32]client.CollectionDescription, + ctx context.Context, + db *db, + newState *definitionState, + oldState *definitionState, ) error { - for _, newCol := range newColsByID { - if _, ok := oldColsByID[newCol.ID]; !ok { + for _, newCol := range newState.collections { + if _, ok := oldState.collectionsByID[newCol.ID]; !ok { return NewErrAddCollectionIDWithPatch(newCol.ID) } } @@ -379,11 +592,13 @@ func validateIDExists( } func validateRootIDNotMutated( - oldColsByID map[uint32]client.CollectionDescription, - newColsByID map[uint32]client.CollectionDescription, + ctx context.Context, + db *db, + newState *definitionState, + oldState *definitionState, ) error { - for _, newCol := range newColsByID { - oldCol, ok := oldColsByID[newCol.ID] + for _, newCol := range newState.collections { + oldCol, ok := oldState.collectionsByID[newCol.ID] if !ok { continue } @@ -393,15 +608,28 @@ func validateRootIDNotMutated( } } + for _, newSchema := range newState.schemaByName { + oldSchema := oldState.schemaByName[newSchema.Name] + if newSchema.Root != oldSchema.Root { + return NewErrSchemaRootDoesntMatch( + newSchema.Name, + oldSchema.Root, + newSchema.Root, + ) + } + } + return nil } func validateSchemaVersionIDNotMutated( - oldColsByID map[uint32]client.CollectionDescription, - newColsByID map[uint32]client.CollectionDescription, + ctx context.Context, + db *db, + newState *definitionState, + oldState *definitionState, ) error { - for _, newCol := range newColsByID { - oldCol, ok := oldColsByID[newCol.ID] + for _, newCol := range newState.collections { + oldCol, ok := oldState.collectionsByID[newCol.ID] if !ok { continue } @@ -411,16 +639,26 @@ func validateSchemaVersionIDNotMutated( } } + for _, newSchema := range newState.schemaByName { + oldSchema := oldState.schemaByName[newSchema.Name] + if newSchema.VersionID != "" && newSchema.VersionID != oldSchema.VersionID { + // If users specify this it will be overwritten, an error is preferred to quietly ignoring it. + return ErrCannotSetVersionID + } + } + return nil } func validateCollectionNotRemoved( - oldColsByID map[uint32]client.CollectionDescription, - newColsByID map[uint32]client.CollectionDescription, + ctx context.Context, + db *db, + newState *definitionState, + oldState *definitionState, ) error { oldLoop: - for _, oldCol := range oldColsByID { - for _, newCol := range newColsByID { + for _, oldCol := range oldState.collections { + for _, newCol := range newState.collectionsByID { // It is not enough to just match by the map index, in case the index does not pair // up with the ID (this can happen if a user moves the collection within the map) if newCol.ID == oldCol.ID { @@ -438,148 +676,245 @@ oldLoop: // // Ensures that the information within the policy definition makes sense, // this function might also make relevant remote calls using the acp system. -func (db *db) validateCollectionDefinitionPolicyDesc( +func validateCollectionDefinitionPolicyDesc( ctx context.Context, - policyDesc immutable.Option[client.PolicyDescription], + db *db, + newState *definitionState, + oldState *definitionState, ) error { - if !policyDesc.HasValue() { - // No policy validation needed, whether acp exists or not doesn't matter. - return nil - } + for _, newCol := range newState.collections { + if !newCol.Policy.HasValue() { + // No policy validation needed, whether acp exists or not doesn't matter. + continue + } + + // If there is a policy specified, but the database does not have + // acp enabled/available return an error, database must have an acp available + // to enable access control (inorder to adhere to the policy specified). + if !db.acp.HasValue() { + return ErrCanNotHavePolicyWithoutACP + } + + // If we have the policy specified on the collection, and acp is available/enabled, + // then using the acp system we need to ensure the policy id specified + // actually exists as a policy, and the resource name exists on that policy + // and that the resource is a valid DPI. + err := db.acp.Value().ValidateResourceExistsOnValidDPI( + ctx, + newCol.Policy.Value().ID, + newCol.Policy.Value().ResourceName, + ) - // If there is a policy specified, but the database does not have - // acp enabled/available return an error, database must have an acp available - // to enable access control (inorder to adhere to the policy specified). - if !db.acp.HasValue() { - return ErrCanNotHavePolicyWithoutACP + if err != nil { + return err + } } - // If we have the policy specified on the collection, and acp is available/enabled, - // then using the acp system we need to ensure the policy id specified - // actually exists as a policy, and the resource name exists on that policy - // and that the resource is a valid DPI. - return db.acp.Value().ValidateResourceExistsOnValidDPI( - ctx, - policyDesc.Value().ID, - policyDesc.Value().ResourceName, - ) + return nil } -// validateUpdateSchema validates that the given schema description is a valid update. -// -// Will return true if the given description differs from the current persisted state of the -// schema. Will return an error if it fails validation. -func (db *db) validateUpdateSchema( - existingDescriptionsByName map[string]client.SchemaDescription, - proposedDescriptionsByName map[string]client.SchemaDescription, - proposedDesc client.SchemaDescription, -) (bool, error) { - if proposedDesc.Name == "" { - return false, ErrSchemaNameEmpty - } - - existingDesc, collectionExists := existingDescriptionsByName[proposedDesc.Name] - if !collectionExists { - return false, NewErrAddCollectionWithPatch(proposedDesc.Name) - } - - if proposedDesc.Root != existingDesc.Root { - return false, NewErrSchemaRootDoesntMatch( - proposedDesc.Name, - existingDesc.Root, - proposedDesc.Root, - ) - } +func validateSchemaFieldNotDeleted( + ctx context.Context, + db *db, + newState *definitionState, + oldState *definitionState, +) error { + for _, newSchema := range newState.schemaByName { + oldSchema := oldState.schemaByName[newSchema.Name] + + for _, oldField := range oldSchema.Fields { + stillExists := false + for _, newField := range newSchema.Fields { + if newField.Name == oldField.Name { + stillExists = true + break + } + } - if proposedDesc.Name != existingDesc.Name { - // There is actually little reason to not support this atm besides controlling the surface area - // of the new feature. Changing this should not break anything, but it should be tested first. - return false, NewErrCannotModifySchemaName(existingDesc.Name, proposedDesc.Name) + if !stillExists { + return NewErrCannotDeleteField(oldField.Name) + } + } } - if proposedDesc.VersionID != "" && proposedDesc.VersionID != existingDesc.VersionID { - // If users specify this it will be overwritten, an error is preferred to quietly ignoring it. - return false, ErrCannotSetVersionID - } + return nil +} - hasChangedFields, err := validateUpdateSchemaFields(proposedDescriptionsByName, existingDesc, proposedDesc) - if err != nil { - return hasChangedFields, err +func validateTypeAndKindCompatible( + ctx context.Context, + db *db, + newState *definitionState, + oldState *definitionState, +) error { + for _, newSchema := range newState.schemaByName { + for _, newField := range newSchema.Fields { + if !newField.Typ.IsCompatibleWith(newField.Kind) { + return client.NewErrCRDTKindMismatch(newField.Typ.String(), newField.Kind.String()) + } + } } - return hasChangedFields, err + return nil } -func validateUpdateSchemaFields( - descriptionsByName map[string]client.SchemaDescription, - existingDesc client.SchemaDescription, - proposedDesc client.SchemaDescription, -) (bool, error) { - hasChanged := false - existingFieldsByName := map[string]client.SchemaFieldDescription{} - existingFieldIndexesByName := map[string]int{} - for i, field := range existingDesc.Fields { - existingFieldIndexesByName[field.Name] = i - existingFieldsByName[field.Name] = field +func validateTypeSupported( + ctx context.Context, + db *db, + newState *definitionState, + oldState *definitionState, +) error { + for _, newSchema := range newState.schemaByName { + for _, newField := range newSchema.Fields { + if !newField.Typ.IsSupportedFieldCType() { + return client.NewErrInvalidCRDTType(newField.Name, newField.Typ.String()) + } + } } - newFieldNames := map[string]struct{}{} - for proposedIndex, proposedField := range proposedDesc.Fields { - existingField, fieldAlreadyExists := existingFieldsByName[proposedField.Name] - - // If the field is new, then the collection has changed - hasChanged = hasChanged || !fieldAlreadyExists + return nil +} - if !fieldAlreadyExists && proposedField.Kind.IsObject() { - _, relatedDescFound := descriptionsByName[proposedField.Kind.Underlying()] +func validateFieldNotMoved( + ctx context.Context, + db *db, + newState *definitionState, + oldState *definitionState, +) error { + for _, oldSchema := range oldState.schemaByName { + oldFieldIndexesByName := map[string]int{} + for i, field := range oldSchema.Fields { + oldFieldIndexesByName[field.Name] = i + } - if !relatedDescFound { - return false, NewErrFieldKindNotFound(proposedField.Name, proposedField.Kind.Underlying()) - } + newSchema := newState.schemaByName[oldSchema.Name] - if proposedField.Kind.IsObject() && !proposedField.Kind.IsArray() { - idFieldName := proposedField.Name + request.RelatedObjectID - idField, idFieldFound := proposedDesc.GetFieldByName(idFieldName) - if idFieldFound { - if idField.Kind != client.FieldKind_DocID { - return false, NewErrRelationalFieldIDInvalidType(idField.Name, client.FieldKind_DocID, idField.Kind) - } - } + for newIndex, newField := range newSchema.Fields { + if existingIndex, exists := oldFieldIndexesByName[newField.Name]; exists && newIndex != existingIndex { + return NewErrCannotMoveField(newField.Name, newIndex, existingIndex) } } + } - if proposedField.Kind.IsObjectArray() { - return false, NewErrSecondaryFieldOnSchema(proposedField.Name) + return nil +} + +func validateFieldNotMutated( + ctx context.Context, + db *db, + newState *definitionState, + oldState *definitionState, +) error { + for _, oldSchema := range oldState.schemaByName { + oldFieldsByName := map[string]client.SchemaFieldDescription{} + for _, field := range oldSchema.Fields { + oldFieldsByName[field.Name] = field } - if _, isDuplicate := newFieldNames[proposedField.Name]; isDuplicate { - return false, NewErrDuplicateField(proposedField.Name) + newSchema := newState.schemaByName[oldSchema.Name] + + for _, newField := range newSchema.Fields { + oldField, exists := oldFieldsByName[newField.Name] + if exists && oldField != newField { + return NewErrCannotMutateField(newField.Name) + } } + } - if fieldAlreadyExists && proposedField != existingField { - return false, NewErrCannotMutateField(proposedField.Name) + return nil +} + +func validateFieldNotDuplicated( + ctx context.Context, + db *db, + newState *definitionState, + oldState *definitionState, +) error { + for _, schema := range newState.schemaByName { + fieldNames := map[string]struct{}{} + + for _, field := range schema.Fields { + if _, isDuplicate := fieldNames[field.Name]; isDuplicate { + return NewErrDuplicateField(field.Name) + } + fieldNames[field.Name] = struct{}{} } + } + + return nil +} - if existingIndex := existingFieldIndexesByName[proposedField.Name]; fieldAlreadyExists && - proposedIndex != existingIndex { - return false, NewErrCannotMoveField(proposedField.Name, proposedIndex, existingIndex) +func validateSecondaryNotOnSchema( + ctx context.Context, + db *db, + newState *definitionState, + oldState *definitionState, +) error { + for _, newSchema := range newState.schemaByName { + for _, newField := range newSchema.Fields { + if newField.Kind.IsObjectArray() { + return NewErrSecondaryFieldOnSchema(newField.Name) + } } + } + + return nil +} + +func validateRelationalFieldIDType( + ctx context.Context, + db *db, + newState *definitionState, + oldState *definitionState, +) error { + for _, schema := range newState.schemaByName { + fieldsByName := map[string]client.SchemaFieldDescription{} - if !proposedField.Typ.IsSupportedFieldCType() { - return false, client.NewErrInvalidCRDTType(proposedField.Name, proposedField.Typ.String()) + for _, field := range schema.Fields { + fieldsByName[field.Name] = field } - if !proposedField.Typ.IsCompatibleWith(proposedField.Kind) { - return false, client.NewErrCRDTKindMismatch(proposedField.Typ.String(), proposedField.Kind.String()) + for _, field := range schema.Fields { + if field.Kind.IsObject() && !field.Kind.IsArray() { + idFieldName := field.Name + request.RelatedObjectID + idField, idFieldFound := fieldsByName[idFieldName] + if idFieldFound { + if idField.Kind != client.FieldKind_DocID { + return NewErrRelationalFieldIDInvalidType(idField.Name, client.FieldKind_DocID, idField.Kind) + } + } + } } + } + + return nil +} - newFieldNames[proposedField.Name] = struct{}{} +func validateSchemaNotAdded( + ctx context.Context, + db *db, + newState *definitionState, + oldState *definitionState, +) error { + for _, newSchema := range newState.schemaByName { + if _, exists := oldState.schemaByName[newSchema.Name]; !exists { + return NewErrAddSchemaWithPatch(newSchema.Name) + } } - for _, field := range existingDesc.Fields { - if _, stillExists := newFieldNames[field.Name]; !stillExists { - return false, NewErrCannotDeleteField(field.Name) + return nil +} + +func validateSchemaNameNotEmpty( + ctx context.Context, + db *db, + newState *definitionState, + oldState *definitionState, +) error { + for _, schema := range newState.schemaByName { + if schema.Name == "" { + return ErrSchemaNameEmpty } } - return hasChanged, nil + + return nil } diff --git a/internal/db/errors.go b/internal/db/errors.go index 8d3c770bd8..7a81824efe 100644 --- a/internal/db/errors.go +++ b/internal/db/errors.go @@ -24,9 +24,9 @@ const ( errAddingP2PCollection string = "cannot add collection ID" errRemovingP2PCollection string = "cannot remove collection ID" errAddCollectionWithPatch string = "adding collections via patch is not supported" + errAddSchemaWithPatch string = "adding schema via patch is not supported" errCollectionIDDoesntMatch string = "CollectionID does not match existing" errSchemaRootDoesntMatch string = "SchemaRoot does not match existing" - errCannotModifySchemaName string = "modifying the schema name is not supported" errCannotSetVersionID string = "setting the VersionID is not supported" errRelationalFieldInvalidRelationType string = "invalid RelationType" errRelationalFieldMissingIDField string = "missing id field for relation object field" @@ -224,9 +224,9 @@ func NewErrRemovingP2PCollection(inner error) error { return errors.Wrap(errRemovingP2PCollection, inner) } -func NewErrAddCollectionWithPatch(name string) error { +func NewErrAddSchemaWithPatch(name string) error { return errors.New( - errAddCollectionWithPatch, + errAddSchemaWithPatch, errors.NewKV("Name", name), ) } @@ -256,14 +256,6 @@ func NewErrSchemaRootDoesntMatch(name, existingRoot, proposedRoot string) error ) } -func NewErrCannotModifySchemaName(existingName, proposedName string) error { - return errors.New( - errCannotModifySchemaName, - errors.NewKV("ExistingName", existingName), - errors.NewKV("ProposedName", proposedName), - ) -} - func NewErrRelationalFieldMissingIDField(name string, expectedName string) error { return errors.New( errRelationalFieldMissingIDField, diff --git a/internal/db/schema.go b/internal/db/schema.go index 8c0ba074dc..d2aeb8bcb9 100644 --- a/internal/db/schema.go +++ b/internal/db/schema.go @@ -44,19 +44,14 @@ func (db *db) addSchema( return nil, err } - returnDescriptions := make([]client.CollectionDescription, len(newDefinitions)) - for i, definition := range newDefinitions { - // Only accept the schema if policy description is valid, otherwise reject the schema. - err := db.validateCollectionDefinitionPolicyDesc(ctx, definition.Description.Policy) - if err != nil { - return nil, err - } + returnDefinitions, err := db.createCollections(ctx, newDefinitions) + if err != nil { + return nil, err + } - col, err := db.createCollection(ctx, definition, newDefinitions) - if err != nil { - return nil, err - } - returnDescriptions[i] = col.Description() + returnDescriptions := make([]client.CollectionDescription, len(returnDefinitions)) + for i, def := range returnDefinitions { + returnDescriptions[i] = def.Description } err = db.loadSchema(ctx) @@ -341,19 +336,18 @@ func (db *db) updateSchema( migration immutable.Option[model.Lens], setAsActiveVersion bool, ) error { - hasChanged, err := db.validateUpdateSchema( - existingSchemaByName, - proposedDescriptionsByName, - schema, - ) - if err != nil { - return err - } + previousSchema := existingSchemaByName[schema.Name] - if !hasChanged { + areEqual := areSchemasEqual(schema, previousSchema) + if areEqual { return nil } + err := db.validateSchemaUpdate(ctx, proposedDescriptionsByName, existingSchemaByName) + if err != nil { + return err + } + for _, field := range schema.Fields { if field.Kind.IsObject() && !field.Kind.IsArray() { idFieldName := field.Name + "_id" @@ -366,8 +360,13 @@ func (db *db) updateSchema( } } + previousFieldNames := make(map[string]struct{}, len(previousSchema.Fields)) + for _, field := range previousSchema.Fields { + previousFieldNames[field.Name] = struct{}{} + } + for i, field := range schema.Fields { - if field.Typ == client.NONE_CRDT { + if _, existed := previousFieldNames[field.Name]; !existed && field.Typ == client.NONE_CRDT { // If no CRDT Type has been provided, default to LWW_REGISTER. field.Typ = client.LWW_REGISTER schema.Fields[i] = field @@ -524,3 +523,19 @@ func (db *db) updateSchema( return nil } + +func areSchemasEqual(this client.SchemaDescription, that client.SchemaDescription) bool { + if len(this.Fields) != len(that.Fields) { + return false + } + + for i, thisField := range this.Fields { + if thisField != that.Fields[i] { + return false + } + } + + return this.Name == that.Name && + this.Root == that.Root && + this.VersionID == that.VersionID +} diff --git a/internal/db/view.go b/internal/db/view.go index a663da7add..2664dd4a57 100644 --- a/internal/db/view.go +++ b/internal/db/view.go @@ -20,7 +20,6 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/internal/db/description" ) func (db *db) addView( @@ -29,8 +28,6 @@ func (db *db) addView( sdl string, transform immutable.Option[model.Lens], ) ([]client.CollectionDefinition, error) { - txn := mustGetContextTxn(ctx) - // Wrap the given query as part of the GQL query object - this simplifies the syntax for users // and ensures that we can't be given mutations. In the future this line should disappear along // with the all calls to the parser appart from `ParseSDL` when we implement the DQL stuff. @@ -68,30 +65,17 @@ func (db *db) addView( newDefinitions[i].Description.Sources = append(newDefinitions[i].Description.Sources, &source) } - returnDescriptions := make([]client.CollectionDefinition, len(newDefinitions)) - for i, definition := range newDefinitions { - if !definition.Description.Name.HasValue() { - schema, err := description.CreateSchemaVersion(ctx, txn, definition.Schema) - if err != nil { - return nil, err - } - returnDescriptions[i] = client.CollectionDefinition{ - // `Collection` is left as default for embedded types - Schema: schema, - } - } else { - col, err := db.createCollection(ctx, definition, newDefinitions) - if err != nil { - return nil, err - } - returnDescriptions[i] = col.Definition() + returnDescriptions, err := db.createCollections(ctx, newDefinitions) + if err != nil { + return nil, err + } - for _, source := range col.Description().QuerySources() { - if source.Transform.HasValue() { - err = db.LensRegistry().SetMigration(ctx, col.ID(), source.Transform.Value()) - if err != nil { - return nil, err - } + for _, definition := range returnDescriptions { + for _, source := range definition.Description.QuerySources() { + if source.Transform.HasValue() { + err = db.LensRegistry().SetMigration(ctx, definition.Description.ID, source.Transform.Value()) + if err != nil { + return nil, err } } } diff --git a/tests/integration/collection_description/updates/copy/name_test.go b/tests/integration/collection_description/updates/copy/name_test.go index b915d111ac..f5cbd3a83b 100644 --- a/tests/integration/collection_description/updates/copy/name_test.go +++ b/tests/integration/collection_description/updates/copy/name_test.go @@ -40,7 +40,7 @@ func TestColDescrUpdateCopyName_Errors(t *testing.T) { { "op": "copy", "from": "/1/Name", "path": "/2/Name" } ] `, - ExpectedError: "collection already exists. Name: Users", + ExpectedError: "multiple versions of same collection cannot be active. Name: Users, Root: 1", }, }, } diff --git a/tests/integration/collection_description/updates/replace/name_test.go b/tests/integration/collection_description/updates/replace/name_test.go index 98f1ba8c98..55e8160969 100644 --- a/tests/integration/collection_description/updates/replace/name_test.go +++ b/tests/integration/collection_description/updates/replace/name_test.go @@ -99,7 +99,7 @@ func TestColDescrUpdateReplaceName_GivenInactiveCollectionWithSameName_Errors(t { "op": "replace", "path": "/2/Name", "value": "Users" } ] `, - ExpectedError: "collection already exists. Name: Users", + ExpectedError: "multiple versions of same collection cannot be active. Name: Users, Root: 1", }, }, } diff --git a/tests/integration/schema/get_schema_test.go b/tests/integration/schema/get_schema_test.go index a89f4a2eb9..7f04c99c9e 100644 --- a/tests/integration/schema/get_schema_test.go +++ b/tests/integration/schema/get_schema_test.go @@ -72,7 +72,7 @@ func TestGetSchema_GivenNoSchemaGivenUnknownName(t *testing.T) { func TestGetSchema_ReturnsAllSchema(t *testing.T) { usersSchemaVersion1ID := "bafkreia2jn5ecrhtvy4fravk6pm3wqiny46m7mqymvjkgat7xiqupgqoai" - usersSchemaVersion2ID := "bafkreibbsqjeladin2keszmja5kektzgi4eowb6m3oimxssiqge7mmvhva" + usersSchemaVersion2ID := "bafkreialnju2rez4t3quvpobf3463eai3lo64vdrdhdmunz7yy7sv3f5ce" booksSchemaVersion1ID := "bafkreibiu34zrehpq346pwp5z24qkderm7ibhnpcqalhkivhnf5e2afqoy" test := testUtils.TestCase{ @@ -116,7 +116,7 @@ func TestGetSchema_ReturnsAllSchema(t *testing.T) { { Name: "_docID", Kind: client.FieldKind_DocID, - Typ: client.LWW_REGISTER, + Typ: client.NONE_CRDT, }, { Name: "name", @@ -146,7 +146,7 @@ func TestGetSchema_ReturnsAllSchema(t *testing.T) { func TestGetSchema_ReturnsSchemaForGivenRoot(t *testing.T) { usersSchemaVersion1ID := "bafkreia2jn5ecrhtvy4fravk6pm3wqiny46m7mqymvjkgat7xiqupgqoai" - usersSchemaVersion2ID := "bafkreibbsqjeladin2keszmja5kektzgi4eowb6m3oimxssiqge7mmvhva" + usersSchemaVersion2ID := "bafkreialnju2rez4t3quvpobf3463eai3lo64vdrdhdmunz7yy7sv3f5ce" test := testUtils.TestCase{ Actions: []any{ @@ -190,7 +190,7 @@ func TestGetSchema_ReturnsSchemaForGivenRoot(t *testing.T) { { Name: "_docID", Kind: client.FieldKind_DocID, - Typ: client.LWW_REGISTER, + Typ: client.NONE_CRDT, }, { Name: "name", @@ -209,7 +209,7 @@ func TestGetSchema_ReturnsSchemaForGivenRoot(t *testing.T) { func TestGetSchema_ReturnsSchemaForGivenName(t *testing.T) { usersSchemaVersion1ID := "bafkreia2jn5ecrhtvy4fravk6pm3wqiny46m7mqymvjkgat7xiqupgqoai" - usersSchemaVersion2ID := "bafkreibbsqjeladin2keszmja5kektzgi4eowb6m3oimxssiqge7mmvhva" + usersSchemaVersion2ID := "bafkreialnju2rez4t3quvpobf3463eai3lo64vdrdhdmunz7yy7sv3f5ce" test := testUtils.TestCase{ Actions: []any{ @@ -253,7 +253,7 @@ func TestGetSchema_ReturnsSchemaForGivenName(t *testing.T) { { Name: "_docID", Kind: client.FieldKind_DocID, - Typ: client.LWW_REGISTER, + Typ: client.NONE_CRDT, }, { Name: "name", diff --git a/tests/integration/schema/migrations/query/simple_test.go b/tests/integration/schema/migrations/query/simple_test.go index a588e70e87..4e0ca20f2b 100644 --- a/tests/integration/schema/migrations/query/simple_test.go +++ b/tests/integration/schema/migrations/query/simple_test.go @@ -46,7 +46,7 @@ func TestSchemaMigrationQuery(t *testing.T) { testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe", - DestinationSchemaVersionID: "bafkreib5jaawobqqiu6frzacerlj55pxxxuql3igqj4ldmg2pgilke4bty", + DestinationSchemaVersionID: "bafkreiahhaeagyfsxaxmv3d665qvnbtyn3ts6jshhghy5bijwztbe7efpq", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -116,7 +116,7 @@ func TestSchemaMigrationQueryMultipleDocs(t *testing.T) { testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe", - DestinationSchemaVersionID: "bafkreib5jaawobqqiu6frzacerlj55pxxxuql3igqj4ldmg2pgilke4bty", + DestinationSchemaVersionID: "bafkreiahhaeagyfsxaxmv3d665qvnbtyn3ts6jshhghy5bijwztbe7efpq", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -179,7 +179,7 @@ func TestSchemaMigrationQueryWithMigrationRegisteredBeforeSchemaPatch(t *testing testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe", - DestinationSchemaVersionID: "bafkreib5jaawobqqiu6frzacerlj55pxxxuql3igqj4ldmg2pgilke4bty", + DestinationSchemaVersionID: "bafkreiahhaeagyfsxaxmv3d665qvnbtyn3ts6jshhghy5bijwztbe7efpq", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -255,7 +255,7 @@ func TestSchemaMigrationQueryMigratesToIntermediaryVersion(t *testing.T) { // there should be no migration from version 2 to version 3. LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe", - DestinationSchemaVersionID: "bafkreib5jaawobqqiu6frzacerlj55pxxxuql3igqj4ldmg2pgilke4bty", + DestinationSchemaVersionID: "bafkreiahhaeagyfsxaxmv3d665qvnbtyn3ts6jshhghy5bijwztbe7efpq", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -325,8 +325,8 @@ func TestSchemaMigrationQueryMigratesFromIntermediaryVersion(t *testing.T) { // Register a migration from schema version 2 to schema version 3 **only** - // there should be no migration from version 1 to version 2. LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreib5jaawobqqiu6frzacerlj55pxxxuql3igqj4ldmg2pgilke4bty", - DestinationSchemaVersionID: "bafkreib65lld2tdyvlilbumlcccftqwvflpgutugghf5afrnlhdg7dgyv4", + SourceSchemaVersionID: "bafkreiahhaeagyfsxaxmv3d665qvnbtyn3ts6jshhghy5bijwztbe7efpq", + DestinationSchemaVersionID: "bafkreicpdtq27uclgcyeqivvyjvojtk57a573y3upfhi3lvteytktyhlva", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -395,7 +395,7 @@ func TestSchemaMigrationQueryMigratesAcrossMultipleVersions(t *testing.T) { testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe", - DestinationSchemaVersionID: "bafkreib5jaawobqqiu6frzacerlj55pxxxuql3igqj4ldmg2pgilke4bty", + DestinationSchemaVersionID: "bafkreiahhaeagyfsxaxmv3d665qvnbtyn3ts6jshhghy5bijwztbe7efpq", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -411,8 +411,8 @@ func TestSchemaMigrationQueryMigratesAcrossMultipleVersions(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreib5jaawobqqiu6frzacerlj55pxxxuql3igqj4ldmg2pgilke4bty", - DestinationSchemaVersionID: "bafkreib65lld2tdyvlilbumlcccftqwvflpgutugghf5afrnlhdg7dgyv4", + SourceSchemaVersionID: "bafkreiahhaeagyfsxaxmv3d665qvnbtyn3ts6jshhghy5bijwztbe7efpq", + DestinationSchemaVersionID: "bafkreicpdtq27uclgcyeqivvyjvojtk57a573y3upfhi3lvteytktyhlva", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -467,7 +467,7 @@ func TestSchemaMigrationQueryMigratesAcrossMultipleVersionsBeforePatches(t *test testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe", - DestinationSchemaVersionID: "bafkreib5jaawobqqiu6frzacerlj55pxxxuql3igqj4ldmg2pgilke4bty", + DestinationSchemaVersionID: "bafkreiahhaeagyfsxaxmv3d665qvnbtyn3ts6jshhghy5bijwztbe7efpq", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -483,8 +483,8 @@ func TestSchemaMigrationQueryMigratesAcrossMultipleVersionsBeforePatches(t *test }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreib5jaawobqqiu6frzacerlj55pxxxuql3igqj4ldmg2pgilke4bty", - DestinationSchemaVersionID: "bafkreib65lld2tdyvlilbumlcccftqwvflpgutugghf5afrnlhdg7dgyv4", + SourceSchemaVersionID: "bafkreiahhaeagyfsxaxmv3d665qvnbtyn3ts6jshhghy5bijwztbe7efpq", + DestinationSchemaVersionID: "bafkreicpdtq27uclgcyeqivvyjvojtk57a573y3upfhi3lvteytktyhlva", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -553,8 +553,8 @@ func TestSchemaMigrationQueryMigratesAcrossMultipleVersionsBeforePatchesWrongOrd testUtils.ConfigureMigration{ // Declare the migration from v2=>v3 before declaring the migration from v1=>v2 LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreib5jaawobqqiu6frzacerlj55pxxxuql3igqj4ldmg2pgilke4bty", - DestinationSchemaVersionID: "bafkreib65lld2tdyvlilbumlcccftqwvflpgutugghf5afrnlhdg7dgyv4", + SourceSchemaVersionID: "bafkreiahhaeagyfsxaxmv3d665qvnbtyn3ts6jshhghy5bijwztbe7efpq", + DestinationSchemaVersionID: "bafkreicpdtq27uclgcyeqivvyjvojtk57a573y3upfhi3lvteytktyhlva", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -571,7 +571,7 @@ func TestSchemaMigrationQueryMigratesAcrossMultipleVersionsBeforePatchesWrongOrd testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe", - DestinationSchemaVersionID: "bafkreib5jaawobqqiu6frzacerlj55pxxxuql3igqj4ldmg2pgilke4bty", + DestinationSchemaVersionID: "bafkreiahhaeagyfsxaxmv3d665qvnbtyn3ts6jshhghy5bijwztbe7efpq", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -713,7 +713,7 @@ func TestSchemaMigrationQueryMigrationMutatesExistingScalarField(t *testing.T) { testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe", - DestinationSchemaVersionID: "bafkreib5jaawobqqiu6frzacerlj55pxxxuql3igqj4ldmg2pgilke4bty", + DestinationSchemaVersionID: "bafkreiahhaeagyfsxaxmv3d665qvnbtyn3ts6jshhghy5bijwztbe7efpq", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -774,7 +774,7 @@ func TestSchemaMigrationQueryMigrationMutatesExistingInlineArrayField(t *testing testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreicn6ltdovb6y7g3ecoptqkvx2y5y5yntrb5uydmg3jiakskqva2ta", - DestinationSchemaVersionID: "bafkreifv4vhz3dw7upc5u3omsqi6klz3h3e54ogfskp72gtut62fuxqrcu", + DestinationSchemaVersionID: "bafkreigb473jarbms7de62ykdu5necvxukmb6zbzolp4szdjcwzjvomuiq", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -837,7 +837,7 @@ func TestSchemaMigrationQueryMigrationRemovesExistingField(t *testing.T) { testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreihhd6bqrjhl5zidwztgxzeseveplv3cj3fwtn3unjkdx7j2vr2vrq", - DestinationSchemaVersionID: "bafkreiegvk3fkcjxoqqpp7npxqjdjwijiwthvynzmsvtzajpjevgu2krku", + DestinationSchemaVersionID: "bafkreibbnm7nrtnvwo7hmjjxacx7nxlqkp6bfr24vtlbv5vhwttlhrbr4q", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -898,7 +898,7 @@ func TestSchemaMigrationQueryMigrationPreservesExistingFieldWhenFieldNotRequeste testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreihhd6bqrjhl5zidwztgxzeseveplv3cj3fwtn3unjkdx7j2vr2vrq", - DestinationSchemaVersionID: "bafkreiegvk3fkcjxoqqpp7npxqjdjwijiwthvynzmsvtzajpjevgu2krku", + DestinationSchemaVersionID: "bafkreibbnm7nrtnvwo7hmjjxacx7nxlqkp6bfr24vtlbv5vhwttlhrbr4q", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -972,7 +972,7 @@ func TestSchemaMigrationQueryMigrationCopiesExistingFieldWhenSrcFieldNotRequeste testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreihhd6bqrjhl5zidwztgxzeseveplv3cj3fwtn3unjkdx7j2vr2vrq", - DestinationSchemaVersionID: "bafkreidgnuvanzqur3pkp4mmrd77ojwvov2rlczraaks4435e6wsgxpwoq", + DestinationSchemaVersionID: "bafkreifhm3admsxmv3xsbxehfkmtfnxqaq5wchrx47e7zc6vaxr352b3om", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -1034,7 +1034,7 @@ func TestSchemaMigrationQueryMigrationCopiesExistingFieldWhenSrcAndDstFieldNotRe testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreihhd6bqrjhl5zidwztgxzeseveplv3cj3fwtn3unjkdx7j2vr2vrq", - DestinationSchemaVersionID: "bafkreidgnuvanzqur3pkp4mmrd77ojwvov2rlczraaks4435e6wsgxpwoq", + DestinationSchemaVersionID: "bafkreifhm3admsxmv3xsbxehfkmtfnxqaq5wchrx47e7zc6vaxr352b3om", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/query/with_doc_id_test.go b/tests/integration/schema/migrations/query/with_doc_id_test.go index a006441c4f..2bd34a6fd4 100644 --- a/tests/integration/schema/migrations/query/with_doc_id_test.go +++ b/tests/integration/schema/migrations/query/with_doc_id_test.go @@ -53,7 +53,7 @@ func TestSchemaMigrationQueryByDocID(t *testing.T) { testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe", - DestinationSchemaVersionID: "bafkreib5jaawobqqiu6frzacerlj55pxxxuql3igqj4ldmg2pgilke4bty", + DestinationSchemaVersionID: "bafkreiahhaeagyfsxaxmv3d665qvnbtyn3ts6jshhghy5bijwztbe7efpq", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -159,7 +159,7 @@ func TestSchemaMigrationQueryMultipleQueriesByDocID(t *testing.T) { testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe", - DestinationSchemaVersionID: "bafkreib5jaawobqqiu6frzacerlj55pxxxuql3igqj4ldmg2pgilke4bty", + DestinationSchemaVersionID: "bafkreiahhaeagyfsxaxmv3d665qvnbtyn3ts6jshhghy5bijwztbe7efpq", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/query/with_inverse_test.go b/tests/integration/schema/migrations/query/with_inverse_test.go index f436c332c0..11c83c5fd4 100644 --- a/tests/integration/schema/migrations/query/with_inverse_test.go +++ b/tests/integration/schema/migrations/query/with_inverse_test.go @@ -50,7 +50,7 @@ func TestSchemaMigrationQueryInversesAcrossMultipleVersions(t *testing.T) { testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreicdkt3m6mgwuoix7qyijvwxwtj3dlre4a4c6mdnqbucbndwuxjsvi", - DestinationSchemaVersionID: "bafkreibpaw4dxy6bvmuoyegm7bwxyi24nubozmukemwiour4v62kz5ffuu", + DestinationSchemaVersionID: "bafkreigijxrkfpadmnkpagokjdy6zpwtryad32m6nkgsqrd452kjlfp46e", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -66,8 +66,8 @@ func TestSchemaMigrationQueryInversesAcrossMultipleVersions(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreibpaw4dxy6bvmuoyegm7bwxyi24nubozmukemwiour4v62kz5ffuu", - DestinationSchemaVersionID: "bafkreickm4zodm2muw5qcctmssht63g57u7kxujqyoax4zb5c42zs4pdh4", + SourceSchemaVersionID: "bafkreigijxrkfpadmnkpagokjdy6zpwtryad32m6nkgsqrd452kjlfp46e", + DestinationSchemaVersionID: "bafkreibtmdbc3nbdt74xdwvfrez53fxwyz6nh4b6ppwsrxiqpj5zpwgole", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/query/with_p2p_schema_branch_test.go b/tests/integration/schema/migrations/query/with_p2p_schema_branch_test.go index b5e7bdde03..d7dc9f10dd 100644 --- a/tests/integration/schema/migrations/query/with_p2p_schema_branch_test.go +++ b/tests/integration/schema/migrations/query/with_p2p_schema_branch_test.go @@ -47,7 +47,7 @@ func TestSchemaMigrationQueryWithP2PReplicatedDocOnOtherSchemaBranch(t *testing. // Register the migration on both nodes. LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreibpai5hfnalhtn5mgamzkgml4gwftow7pklmjcn6i4sqey6a5u5ce", - DestinationSchemaVersionID: "bafkreidrbhf54zckhmchzw2ngbobfqtkt7sm6ihbliu2wtxesehz5g4xwm", + DestinationSchemaVersionID: "bafkreif7z5sj2ehtmjenverki7c2hqfjgvbajqdlch6yk4kkbx7qvm2yba", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/query/with_p2p_test.go b/tests/integration/schema/migrations/query/with_p2p_test.go index f8b0197d5d..39adf5a5a8 100644 --- a/tests/integration/schema/migrations/query/with_p2p_test.go +++ b/tests/integration/schema/migrations/query/with_p2p_test.go @@ -47,7 +47,7 @@ func TestSchemaMigrationQueryWithP2PReplicatedDocAtOlderSchemaVersion(t *testing // Register the migration on both nodes. LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreibpai5hfnalhtn5mgamzkgml4gwftow7pklmjcn6i4sqey6a5u5ce", - DestinationSchemaVersionID: "bafkreidrbhf54zckhmchzw2ngbobfqtkt7sm6ihbliu2wtxesehz5g4xwm", + DestinationSchemaVersionID: "bafkreif7z5sj2ehtmjenverki7c2hqfjgvbajqdlch6yk4kkbx7qvm2yba", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -146,7 +146,7 @@ func TestSchemaMigrationQueryWithP2PReplicatedDocAtMuchOlderSchemaVersion(t *tes // Register the migration on both nodes. LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreibpai5hfnalhtn5mgamzkgml4gwftow7pklmjcn6i4sqey6a5u5ce", - DestinationSchemaVersionID: "bafkreidrbhf54zckhmchzw2ngbobfqtkt7sm6ihbliu2wtxesehz5g4xwm", + DestinationSchemaVersionID: "bafkreif7z5sj2ehtmjenverki7c2hqfjgvbajqdlch6yk4kkbx7qvm2yba", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -163,8 +163,8 @@ func TestSchemaMigrationQueryWithP2PReplicatedDocAtMuchOlderSchemaVersion(t *tes testUtils.ConfigureMigration{ // Register the migration on both nodes. LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreidrbhf54zckhmchzw2ngbobfqtkt7sm6ihbliu2wtxesehz5g4xwm", - DestinationSchemaVersionID: "bafkreidiohu3klvu4f2fdqcywtpqild4v7spsn7ivsjtg6sea6ome2oc4i", + SourceSchemaVersionID: "bafkreif7z5sj2ehtmjenverki7c2hqfjgvbajqdlch6yk4kkbx7qvm2yba", + DestinationSchemaVersionID: "bafkreiglqiiz6j7d5dokcle6juoz26uixxggc5zawqkgwcivmenvhob5jy", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -254,7 +254,7 @@ func TestSchemaMigrationQueryWithP2PReplicatedDocAtNewerSchemaVersion(t *testing // Register the migration on both nodes. LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreibpai5hfnalhtn5mgamzkgml4gwftow7pklmjcn6i4sqey6a5u5ce", - DestinationSchemaVersionID: "bafkreidrbhf54zckhmchzw2ngbobfqtkt7sm6ihbliu2wtxesehz5g4xwm", + DestinationSchemaVersionID: "bafkreif7z5sj2ehtmjenverki7c2hqfjgvbajqdlch6yk4kkbx7qvm2yba", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -355,8 +355,8 @@ func TestSchemaMigrationQueryWithP2PReplicatedDocAtMuchNewerSchemaVersionWithSch // Register a migration from version 2 to version 3 on both nodes. // There is no migration from version 1 to 2, thus node 1 has no knowledge of schema version 2. LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreib5jaawobqqiu6frzacerlj55pxxxuql3igqj4ldmg2pgilke4bty", - DestinationSchemaVersionID: "bafkreib65lld2tdyvlilbumlcccftqwvflpgutugghf5afrnlhdg7dgyv4", + SourceSchemaVersionID: "bafkreiahhaeagyfsxaxmv3d665qvnbtyn3ts6jshhghy5bijwztbe7efpq", + DestinationSchemaVersionID: "bafkreicpdtq27uclgcyeqivvyjvojtk57a573y3upfhi3lvteytktyhlva", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/query/with_restart_test.go b/tests/integration/schema/migrations/query/with_restart_test.go index f44264312c..4f2c0f4ec7 100644 --- a/tests/integration/schema/migrations/query/with_restart_test.go +++ b/tests/integration/schema/migrations/query/with_restart_test.go @@ -46,7 +46,7 @@ func TestSchemaMigrationQueryWithRestart(t *testing.T) { testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe", - DestinationSchemaVersionID: "bafkreib5jaawobqqiu6frzacerlj55pxxxuql3igqj4ldmg2pgilke4bty", + DestinationSchemaVersionID: "bafkreiahhaeagyfsxaxmv3d665qvnbtyn3ts6jshhghy5bijwztbe7efpq", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -100,7 +100,7 @@ func TestSchemaMigrationQueryWithRestartAndMigrationBeforeSchemaPatch(t *testing testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe", - DestinationSchemaVersionID: "bafkreib5jaawobqqiu6frzacerlj55pxxxuql3igqj4ldmg2pgilke4bty", + DestinationSchemaVersionID: "bafkreiahhaeagyfsxaxmv3d665qvnbtyn3ts6jshhghy5bijwztbe7efpq", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/query/with_set_default_test.go b/tests/integration/schema/migrations/query/with_set_default_test.go index 17c147338c..170a861d89 100644 --- a/tests/integration/schema/migrations/query/with_set_default_test.go +++ b/tests/integration/schema/migrations/query/with_set_default_test.go @@ -22,7 +22,7 @@ import ( ) func TestSchemaMigrationQuery_WithSetDefaultToLatest_AppliesForwardMigration(t *testing.T) { - schemaVersionID2 := "bafkreidrbhf54zckhmchzw2ngbobfqtkt7sm6ihbliu2wtxesehz5g4xwm" + schemaVersionID2 := "bafkreif7z5sj2ehtmjenverki7c2hqfjgvbajqdlch6yk4kkbx7qvm2yba" test := testUtils.TestCase{ Description: "Test schema migration", @@ -84,7 +84,7 @@ func TestSchemaMigrationQuery_WithSetDefaultToLatest_AppliesForwardMigration(t * func TestSchemaMigrationQuery_WithSetDefaultToOriginal_AppliesInverseMigration(t *testing.T) { schemaVersionID1 := "bafkreibpai5hfnalhtn5mgamzkgml4gwftow7pklmjcn6i4sqey6a5u5ce" - schemaVersionID2 := "bafkreidrbhf54zckhmchzw2ngbobfqtkt7sm6ihbliu2wtxesehz5g4xwm" + schemaVersionID2 := "bafkreif7z5sj2ehtmjenverki7c2hqfjgvbajqdlch6yk4kkbx7qvm2yba" test := testUtils.TestCase{ Description: "Test schema migration", @@ -159,7 +159,7 @@ func TestSchemaMigrationQuery_WithSetDefaultToOriginal_AppliesInverseMigration(t func TestSchemaMigrationQuery_WithSetDefaultToOriginalVersionThatDocWasCreatedAt_ClearsMigrations(t *testing.T) { schemaVersionID1 := "bafkreibpai5hfnalhtn5mgamzkgml4gwftow7pklmjcn6i4sqey6a5u5ce" - schemaVersionID2 := "bafkreidrbhf54zckhmchzw2ngbobfqtkt7sm6ihbliu2wtxesehz5g4xwm" + schemaVersionID2 := "bafkreif7z5sj2ehtmjenverki7c2hqfjgvbajqdlch6yk4kkbx7qvm2yba" test := testUtils.TestCase{ Description: "Test schema migration", diff --git a/tests/integration/schema/migrations/query/with_txn_test.go b/tests/integration/schema/migrations/query/with_txn_test.go index 880f9e01ed..79d2d9e825 100644 --- a/tests/integration/schema/migrations/query/with_txn_test.go +++ b/tests/integration/schema/migrations/query/with_txn_test.go @@ -48,7 +48,7 @@ func TestSchemaMigrationQueryWithTxn(t *testing.T) { TransactionID: immutable.Some(0), LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe", - DestinationSchemaVersionID: "bafkreib5jaawobqqiu6frzacerlj55pxxxuql3igqj4ldmg2pgilke4bty", + DestinationSchemaVersionID: "bafkreiahhaeagyfsxaxmv3d665qvnbtyn3ts6jshhghy5bijwztbe7efpq", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -110,7 +110,7 @@ func TestSchemaMigrationQueryWithTxnAndCommit(t *testing.T) { TransactionID: immutable.Some(0), LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe", - DestinationSchemaVersionID: "bafkreib5jaawobqqiu6frzacerlj55pxxxuql3igqj4ldmg2pgilke4bty", + DestinationSchemaVersionID: "bafkreiahhaeagyfsxaxmv3d665qvnbtyn3ts6jshhghy5bijwztbe7efpq", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/query/with_update_test.go b/tests/integration/schema/migrations/query/with_update_test.go index 93a2586e25..bbeabcd062 100644 --- a/tests/integration/schema/migrations/query/with_update_test.go +++ b/tests/integration/schema/migrations/query/with_update_test.go @@ -46,7 +46,7 @@ func TestSchemaMigrationQueryWithUpdateRequest(t *testing.T) { testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe", - DestinationSchemaVersionID: "bafkreib5jaawobqqiu6frzacerlj55pxxxuql3igqj4ldmg2pgilke4bty", + DestinationSchemaVersionID: "bafkreiahhaeagyfsxaxmv3d665qvnbtyn3ts6jshhghy5bijwztbe7efpq", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -130,7 +130,7 @@ func TestSchemaMigrationQueryWithMigrationRegisteredAfterUpdate(t *testing.T) { testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe", - DestinationSchemaVersionID: "bafkreib5jaawobqqiu6frzacerlj55pxxxuql3igqj4ldmg2pgilke4bty", + DestinationSchemaVersionID: "bafkreiahhaeagyfsxaxmv3d665qvnbtyn3ts6jshhghy5bijwztbe7efpq", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/simple_test.go b/tests/integration/schema/migrations/simple_test.go index a7826f5366..e36c9ec836 100644 --- a/tests/integration/schema/migrations/simple_test.go +++ b/tests/integration/schema/migrations/simple_test.go @@ -107,7 +107,7 @@ func TestSchemaMigrationGetMigrationsReturnsMultiple(t *testing.T) { testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe", - DestinationSchemaVersionID: "bafkreib5jaawobqqiu6frzacerlj55pxxxuql3igqj4ldmg2pgilke4bty", + DestinationSchemaVersionID: "bafkreiahhaeagyfsxaxmv3d665qvnbtyn3ts6jshhghy5bijwztbe7efpq", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -158,7 +158,7 @@ func TestSchemaMigrationGetMigrationsReturnsMultiple(t *testing.T) { }, { ID: 4, - SchemaVersionID: "bafkreib5jaawobqqiu6frzacerlj55pxxxuql3igqj4ldmg2pgilke4bty", + SchemaVersionID: "bafkreiahhaeagyfsxaxmv3d665qvnbtyn3ts6jshhghy5bijwztbe7efpq", Sources: []any{ &client.CollectionSource{ SourceCollectionID: 3, diff --git a/tests/integration/schema/one_one_test.go b/tests/integration/schema/one_one_test.go index b5bc75bb48..8bc1e5a1fe 100644 --- a/tests/integration/schema/one_one_test.go +++ b/tests/integration/schema/one_one_test.go @@ -30,7 +30,9 @@ func TestSchemaOneOne_NoPrimary_Errors(t *testing.T) { owner: User } `, - ExpectedError: "relation missing field. Object: Dog, RelationName: dog_user", + // This error is dependent upon the order in which definitions are validated, so + // we only assert that the error is the correct type, and do not check the key-values + ExpectedError: "relation missing field", }, }, } diff --git a/tests/integration/schema/updates/add/field/create_update_test.go b/tests/integration/schema/updates/add/field/create_update_test.go index cd3a0b1267..53b892e0ae 100644 --- a/tests/integration/schema/updates/add/field/create_update_test.go +++ b/tests/integration/schema/updates/add/field/create_update_test.go @@ -18,7 +18,7 @@ import ( func TestSchemaUpdatesAddFieldWithCreateWithUpdateAfterSchemaUpdateAndVersionJoin(t *testing.T) { initialSchemaVersionID := "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe" - updatedSchemaVersionID := "bafkreibz4g6rkxanzn6ro74ezmbwoe5hvcguwvi34judrk2kfuqqtk5ak4" + updatedSchemaVersionID := "bafkreidt4i22v4bzga3aezlcxsrfbvuhzcbqo5bnfe2x2dgkpz3eds2afe" test := testUtils.TestCase{ Description: "Test schema update, add field with update after schema update, version join", @@ -106,7 +106,7 @@ func TestSchemaUpdatesAddFieldWithCreateWithUpdateAfterSchemaUpdateAndVersionJoi func TestSchemaUpdatesAddFieldWithCreateWithUpdateAfterSchemaUpdateAndCommitQuery(t *testing.T) { initialSchemaVersionID := "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe" - updatedSchemaVersionID := "bafkreibz4g6rkxanzn6ro74ezmbwoe5hvcguwvi34judrk2kfuqqtk5ak4" + updatedSchemaVersionID := "bafkreidt4i22v4bzga3aezlcxsrfbvuhzcbqo5bnfe2x2dgkpz3eds2afe" test := testUtils.TestCase{ Description: "Test schema update, add field with update after schema update, commits query", diff --git a/tests/integration/schema/updates/add/field/simple_test.go b/tests/integration/schema/updates/add/field/simple_test.go index 80aaec32d6..d315791dfa 100644 --- a/tests/integration/schema/updates/add/field/simple_test.go +++ b/tests/integration/schema/updates/add/field/simple_test.go @@ -21,7 +21,7 @@ import ( func TestSchemaUpdatesAddFieldSimple(t *testing.T) { schemaVersion1ID := "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe" - schemaVersion2ID := "bafkreibz4g6rkxanzn6ro74ezmbwoe5hvcguwvi34judrk2kfuqqtk5ak4" + schemaVersion2ID := "bafkreidt4i22v4bzga3aezlcxsrfbvuhzcbqo5bnfe2x2dgkpz3eds2afe" test := testUtils.TestCase{ Description: "Test schema update, add field", @@ -60,7 +60,7 @@ func TestSchemaUpdatesAddFieldSimple(t *testing.T) { { Name: "_docID", Kind: client.FieldKind_DocID, - Typ: client.LWW_REGISTER, + Typ: client.NONE_CRDT, }, { Name: "name", @@ -116,7 +116,7 @@ func TestSchemaUpdates_AddFieldSimpleDoNotSetDefault_Errors(t *testing.T) { func TestSchemaUpdates_AddFieldSimpleDoNotSetDefault_VersionIsQueryable(t *testing.T) { schemaVersion1ID := "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe" - schemaVersion2ID := "bafkreibz4g6rkxanzn6ro74ezmbwoe5hvcguwvi34judrk2kfuqqtk5ak4" + schemaVersion2ID := "bafkreidt4i22v4bzga3aezlcxsrfbvuhzcbqo5bnfe2x2dgkpz3eds2afe" test := testUtils.TestCase{ Description: "Test schema update, add field", @@ -149,7 +149,7 @@ func TestSchemaUpdates_AddFieldSimpleDoNotSetDefault_VersionIsQueryable(t *testi { Name: "_docID", Kind: client.FieldKind_DocID, - Typ: client.LWW_REGISTER, + Typ: client.NONE_CRDT, }, { Name: "name", @@ -362,7 +362,7 @@ func TestSchemaUpdatesAddFieldSimpleDuplicateOfExistingField(t *testing.T) { { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "name", "Kind": 11} } ] `, - ExpectedError: "duplicate field. Name: name", + ExpectedError: "mutating an existing field is not supported. ProposedName: name", }, }, } diff --git a/tests/integration/schema/updates/add/simple_test.go b/tests/integration/schema/updates/add/simple_test.go index 88d36680b0..65d04a7af6 100644 --- a/tests/integration/schema/updates/add/simple_test.go +++ b/tests/integration/schema/updates/add/simple_test.go @@ -33,7 +33,7 @@ func TestSchemaUpdatesAddSimpleErrorsAddingSchema(t *testing.T) { { "op": "add", "path": "/-", "value": {"Name": "books"} } ] `, - ExpectedError: "adding collections via patch is not supported. Name: books", + ExpectedError: "adding schema via patch is not supported. Name: books", }, testUtils.Request{ Request: `query { diff --git a/tests/integration/schema/updates/copy/field/simple_test.go b/tests/integration/schema/updates/copy/field/simple_test.go index a2c631a515..5baf640ab8 100644 --- a/tests/integration/schema/updates/copy/field/simple_test.go +++ b/tests/integration/schema/updates/copy/field/simple_test.go @@ -34,7 +34,7 @@ func TestSchemaUpdatesCopyFieldErrors(t *testing.T) { { "op": "copy", "from": "/Users/Fields/1", "path": "/Users/Fields/2" } ] `, - ExpectedError: "duplicate field. Name: email", + ExpectedError: "moving fields is not currently supported. Name: email", }, testUtils.Request{ Request: `query { diff --git a/tests/integration/schema/updates/copy/simple_test.go b/tests/integration/schema/updates/copy/simple_test.go index cdda8abaf8..0f5a691149 100644 --- a/tests/integration/schema/updates/copy/simple_test.go +++ b/tests/integration/schema/updates/copy/simple_test.go @@ -38,7 +38,7 @@ func TestSchemaUpdatesCopyCollectionWithRemoveIDAndReplaceName(t *testing.T) { { "op": "replace", "path": "/Book/Name", "value": "Book" } ] `, - ExpectedError: "adding collections via patch is not supported. Name: Book", + ExpectedError: "adding schema via patch is not supported. Name: Book", }, }, } diff --git a/tests/integration/schema/updates/remove/simple_test.go b/tests/integration/schema/updates/remove/simple_test.go index e9e4f139ae..d0343484e5 100644 --- a/tests/integration/schema/updates/remove/simple_test.go +++ b/tests/integration/schema/updates/remove/simple_test.go @@ -34,7 +34,7 @@ func TestSchemaUpdatesRemoveCollectionNameErrors(t *testing.T) { { "op": "remove", "path": "/Users/Name" } ] `, - ExpectedError: "schema name can't be empty", + ExpectedError: "SchemaRoot does not match existing. Name: ", }, }, } @@ -118,7 +118,7 @@ func TestSchemaUpdatesRemoveSchemaNameErrors(t *testing.T) { { "op": "remove", "path": "/Users/Name" } ] `, - ExpectedError: "schema name can't be empty", + ExpectedError: "SchemaRoot does not match existing. Name: ", }, }, } diff --git a/tests/integration/schema/updates/replace/simple_test.go b/tests/integration/schema/updates/replace/simple_test.go index 722ff36f9b..7e403f7d03 100644 --- a/tests/integration/schema/updates/replace/simple_test.go +++ b/tests/integration/schema/updates/replace/simple_test.go @@ -44,7 +44,7 @@ func TestSchemaUpdatesReplaceCollectionErrors(t *testing.T) { // WARNING: An error is still expected if/when we allow the adding of collections, as this also // implies that the "Users" collection is to be deleted. Only once we support the adding *and* // removal of collections should this not error. - ExpectedError: "adding collections via patch is not supported. Name: Book", + ExpectedError: "adding schema via patch is not supported. Name: Book", }, }, } diff --git a/tests/integration/schema/updates/with_schema_branch_test.go b/tests/integration/schema/updates/with_schema_branch_test.go index d8f7d1afc2..58759f3edd 100644 --- a/tests/integration/schema/updates/with_schema_branch_test.go +++ b/tests/integration/schema/updates/with_schema_branch_test.go @@ -21,8 +21,8 @@ import ( func TestSchemaUpdates_WithBranchingSchema(t *testing.T) { schemaVersion1ID := "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe" - schemaVersion2ID := "bafkreibz4g6rkxanzn6ro74ezmbwoe5hvcguwvi34judrk2kfuqqtk5ak4" - schemaVersion3ID := "bafkreifswbi23wxvq2zpqnoldolsxk2fhtj5t6rs3pidil3j6tybc62q3m" + schemaVersion2ID := "bafkreidt4i22v4bzga3aezlcxsrfbvuhzcbqo5bnfe2x2dgkpz3eds2afe" + schemaVersion3ID := "bafkreifc46y7pk2xfwc3nc442r7iqf6cjixxerxrrnrsouky544gmz4zve" test := testUtils.TestCase{ Description: "Test schema update, with branching schema", @@ -74,7 +74,7 @@ func TestSchemaUpdates_WithBranchingSchema(t *testing.T) { { Name: "_docID", Kind: client.FieldKind_DocID, - Typ: client.LWW_REGISTER, + Typ: client.NONE_CRDT, }, { Name: "name", @@ -112,7 +112,7 @@ func TestSchemaUpdates_WithBranchingSchema(t *testing.T) { { Name: "_docID", Kind: client.FieldKind_DocID, - Typ: client.LWW_REGISTER, + Typ: client.NONE_CRDT, }, { Name: "name", @@ -170,9 +170,9 @@ func TestSchemaUpdates_WithBranchingSchema(t *testing.T) { func TestSchemaUpdates_WithPatchOnBranchedSchema(t *testing.T) { schemaVersion1ID := "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe" - schemaVersion2ID := "bafkreibz4g6rkxanzn6ro74ezmbwoe5hvcguwvi34judrk2kfuqqtk5ak4" - schemaVersion3ID := "bafkreifswbi23wxvq2zpqnoldolsxk2fhtj5t6rs3pidil3j6tybc62q3m" - schemaVersion4ID := "bafkreid4ulxeclzgpzhznge7zdin6docxvklugvr6gt4jxfyanz5i2r2hu" + schemaVersion2ID := "bafkreidt4i22v4bzga3aezlcxsrfbvuhzcbqo5bnfe2x2dgkpz3eds2afe" + schemaVersion3ID := "bafkreifc46y7pk2xfwc3nc442r7iqf6cjixxerxrrnrsouky544gmz4zve" + schemaVersion4ID := "bafkreic2heai3vgufxcxs6bfvil2oyz27w3bzkwoqehjevlnkewq3ffp4e" test := testUtils.TestCase{ Description: "Test schema update, with patch on branching schema", @@ -234,7 +234,7 @@ func TestSchemaUpdates_WithPatchOnBranchedSchema(t *testing.T) { { Name: "_docID", Kind: client.FieldKind_DocID, - Typ: client.LWW_REGISTER, + Typ: client.NONE_CRDT, }, { Name: "name", @@ -308,8 +308,8 @@ func TestSchemaUpdates_WithPatchOnBranchedSchema(t *testing.T) { func TestSchemaUpdates_WithBranchingSchemaAndSetActiveSchemaToOtherBranch(t *testing.T) { schemaVersion1ID := "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe" - schemaVersion2ID := "bafkreibz4g6rkxanzn6ro74ezmbwoe5hvcguwvi34judrk2kfuqqtk5ak4" - schemaVersion3ID := "bafkreifswbi23wxvq2zpqnoldolsxk2fhtj5t6rs3pidil3j6tybc62q3m" + schemaVersion2ID := "bafkreidt4i22v4bzga3aezlcxsrfbvuhzcbqo5bnfe2x2dgkpz3eds2afe" + schemaVersion3ID := "bafkreifc46y7pk2xfwc3nc442r7iqf6cjixxerxrrnrsouky544gmz4zve" test := testUtils.TestCase{ Description: "Test schema update, with branching schema toggling between branches", @@ -404,9 +404,9 @@ func TestSchemaUpdates_WithBranchingSchemaAndSetActiveSchemaToOtherBranch(t *tes func TestSchemaUpdates_WithBranchingSchemaAndSetActiveSchemaToOtherBranchThenPatch(t *testing.T) { schemaVersion1ID := "bafkreia3o3cetvcnnxyu5spucimoos77ifungfmacxdkva4zah2is3aooe" - schemaVersion2ID := "bafkreibz4g6rkxanzn6ro74ezmbwoe5hvcguwvi34judrk2kfuqqtk5ak4" - schemaVersion3ID := "bafkreifswbi23wxvq2zpqnoldolsxk2fhtj5t6rs3pidil3j6tybc62q3m" - schemaVersion4ID := "bafkreidjuyxhakc5yx7fucunoxijnfjvgqohf4sjoryzf27mqxidh37kne" + schemaVersion2ID := "bafkreidt4i22v4bzga3aezlcxsrfbvuhzcbqo5bnfe2x2dgkpz3eds2afe" + schemaVersion3ID := "bafkreifc46y7pk2xfwc3nc442r7iqf6cjixxerxrrnrsouky544gmz4zve" + schemaVersion4ID := "bafkreifdkkauc4b4rkazmzijiu2nxlikqatxa5zbmjc4sn3wrtlcqqcrt4" test := testUtils.TestCase{ Description: "Test schema update, with branching schema toggling between branches then patch", @@ -472,7 +472,7 @@ func TestSchemaUpdates_WithBranchingSchemaAndSetActiveSchemaToOtherBranchThenPat { Name: "_docID", Kind: client.FieldKind_DocID, - Typ: client.LWW_REGISTER, + Typ: client.NONE_CRDT, }, { Name: "name", diff --git a/tests/integration/schema/with_update_set_default_test.go b/tests/integration/schema/with_update_set_default_test.go index f46e0540e3..22f05a4d73 100644 --- a/tests/integration/schema/with_update_set_default_test.go +++ b/tests/integration/schema/with_update_set_default_test.go @@ -129,7 +129,7 @@ func TestSchema_WithUpdateAndSetDefaultVersionToNew_AllowsQueryingOfNewField(t * SetAsDefaultVersion: immutable.Some(false), }, testUtils.SetActiveSchemaVersion{ - SchemaVersionID: "bafkreibz4g6rkxanzn6ro74ezmbwoe5hvcguwvi34judrk2kfuqqtk5ak4", + SchemaVersionID: "bafkreidt4i22v4bzga3aezlcxsrfbvuhzcbqo5bnfe2x2dgkpz3eds2afe", }, testUtils.Request{ Request: `query { From 66264414b7377a670b78208cf24a95124cd065d3 Mon Sep 17 00:00:00 2001 From: Fred Carle Date: Mon, 17 Jun 2024 12:45:08 -0400 Subject: [PATCH 48/78] fix: Resolve incorrect merge conflict (#2723) ## Relevant issue(s) Resolves #2673 ## Description This PR fixes the incorrect merge conflict in our memory store. --- datastore/badger/v4/datastore_test.go | 7 ++----- datastore/memory/txn.go | 21 ++++++++++++++++----- datastore/memory/txn_test.go | 15 ++++++++++----- datastore/txn_test.go | 17 ++++------------- 4 files changed, 32 insertions(+), 28 deletions(-) diff --git a/datastore/badger/v4/datastore_test.go b/datastore/badger/v4/datastore_test.go index 69a24981df..c72ff988db 100644 --- a/datastore/badger/v4/datastore_test.go +++ b/datastore/badger/v4/datastore_test.go @@ -1130,7 +1130,7 @@ func TestTxnWithConflict(t *testing.T) { require.ErrorIs(t, err, ErrTxnConflict) } -func TestTxnWithConflictAfterDelete(t *testing.T) { +func TestTxnWithNoConflictAfterDelete(t *testing.T) { ctx := context.Background() s := newLoadedDatastore(ctx, t) defer func() { @@ -1144,9 +1144,6 @@ func TestTxnWithConflictAfterDelete(t *testing.T) { tx2, err := s.NewTransaction(ctx, false) require.NoError(t, err) - _, err = tx.GetSize(ctx, testKey2) - require.NoError(t, err) - err = tx.Put(ctx, testKey2, testValue3) require.NoError(t, err) @@ -1157,7 +1154,7 @@ func TestTxnWithConflictAfterDelete(t *testing.T) { require.NoError(t, err) err = tx.Commit(ctx) - require.ErrorIs(t, err, ErrTxnConflict) + require.NoError(t, err) } func TestTxnWithNoConflictAfterGet(t *testing.T) { diff --git a/datastore/memory/txn.go b/datastore/memory/txn.go index 7430077e46..f1086332b6 100644 --- a/datastore/memory/txn.go +++ b/datastore/memory/txn.go @@ -80,6 +80,11 @@ func (t *basicTxn) get(ctx context.Context, key ds.Key) dsItem { if result.key == "" { result = t.ds.get(ctx, key, t.getDSVersion()) result.isGet = true + if result.key == "" { + // If the datastore doesn't have the item, we still need to track it + // to check for merge conflicts. + result.key = key.String() + } t.ops.Set(result) } return result @@ -97,7 +102,7 @@ func (t *basicTxn) Get(ctx context.Context, key ds.Key) ([]byte, error) { return nil, ErrTxnDiscarded } result := t.get(ctx, key) - if result.key == "" || result.isDeleted { + if result.version == 0 || result.isDeleted { return nil, ds.ErrNotFound } return result.val, nil @@ -115,7 +120,7 @@ func (t *basicTxn) GetSize(ctx context.Context, key ds.Key) (size int, err error return 0, ErrTxnDiscarded } result := t.get(ctx, key) - if result.key == "" || result.isDeleted { + if result.version == 0 || result.isDeleted { return 0, ds.ErrNotFound } return len(result.val), nil @@ -133,7 +138,7 @@ func (t *basicTxn) Has(ctx context.Context, key ds.Key) (exists bool, err error) return false, ErrTxnDiscarded } result := t.get(ctx, key) - if result.key == "" || result.isDeleted { + if result.version == 0 || result.isDeleted { return false, nil } return true, nil @@ -270,8 +275,14 @@ func (t *basicTxn) checkForConflicts(ctx context.Context) error { iter := t.ops.Iter() defer iter.Release() for iter.Next() { - expectedItem := t.ds.get(ctx, ds.NewKey(iter.Item().key), t.getDSVersion()) - latestItem := t.ds.get(ctx, ds.NewKey(iter.Item().key), t.ds.getVersion()) + item := iter.Item() + if !item.isGet { + // Conflict should only occur if an item has been updated + // after we've read it within the transaction. + continue + } + expectedItem := t.ds.get(ctx, ds.NewKey(item.key), t.getDSVersion()) + latestItem := t.ds.get(ctx, ds.NewKey(item.key), t.ds.getVersion()) if latestItem.version != expectedItem.version { return ErrTxnConflict } diff --git a/datastore/memory/txn_test.go b/datastore/memory/txn_test.go index 0dae48f2dc..5a0d1a2d8c 100644 --- a/datastore/memory/txn_test.go +++ b/datastore/memory/txn_test.go @@ -707,11 +707,16 @@ func TestTxnWithConflict(t *testing.T) { require.NoError(t, err) }() - tx := s.newTransaction(false) + tx, err := s.NewTransaction(ctx, false) + require.NoError(t, err) - tx2 := s.newTransaction(false) + tx2, err := s.NewTransaction(ctx, false) + require.NoError(t, err) - err := tx.Put(ctx, testKey3, testValue3) + _, err = tx.GetSize(ctx, testKey3) + require.ErrorIs(t, err, ds.ErrNotFound) + + err = tx.Put(ctx, testKey3, testValue3) require.NoError(t, err) err = tx2.Put(ctx, testKey3, testValue4) @@ -724,7 +729,7 @@ func TestTxnWithConflict(t *testing.T) { require.ErrorIs(t, err, ErrTxnConflict) } -func TestTxnWithConflictAfterDelete(t *testing.T) { +func TestTxnWithNoConflictAfterDelete(t *testing.T) { ctx := context.Background() s := newLoadedDatastore(ctx) defer func() { @@ -746,7 +751,7 @@ func TestTxnWithConflictAfterDelete(t *testing.T) { require.NoError(t, err) err = tx.Commit(ctx) - require.ErrorIs(t, err, ErrTxnConflict) + require.NoError(t, err) } func TestTxnWithConflictAfterGet(t *testing.T) { diff --git a/datastore/txn_test.go b/datastore/txn_test.go index 1a8623600f..b11ca3acfe 100644 --- a/datastore/txn_test.go +++ b/datastore/txn_test.go @@ -201,8 +201,7 @@ func TestShimTxnStoreClose(t *testing.T) { require.NoError(t, err) } -// This test documents https://github.com/sourcenetwork/defradb/issues/2673 -func TestMemoryStoreTxn_TwoTransactionsWithPutConflict_ShouldErrorWithConflict(t *testing.T) { +func TestMemoryStoreTxn_TwoTransactionsWithPutConflict_ShouldSucceed(t *testing.T) { ctx := context.Background() rootstore := memory.NewDatastore(ctx) @@ -223,7 +222,7 @@ func TestMemoryStoreTxn_TwoTransactionsWithPutConflict_ShouldErrorWithConflict(t require.NoError(t, err) err = txn1.Commit(ctx) - require.ErrorIs(t, err, badger.ErrConflict) + require.NoError(t, err) } func TestMemoryStoreTxn_TwoTransactionsWithGetPutConflict_ShouldErrorWithConflict(t *testing.T) { @@ -284,8 +283,7 @@ func TestMemoryStoreTxn_TwoTransactionsWithHasPutConflict_ShouldErrorWithConflic require.ErrorIs(t, err, badger.ErrConflict) } -// This test documents https://github.com/sourcenetwork/defradb/issues/2673 -func TestBadgerMemoryStoreTxn_TwoTransactionsWithPutConflict_ShouldErrorWithConflict(t *testing.T) { +func TestBadgerMemoryStoreTxn_TwoTransactionsWithPutConflict_ShouldSucceed(t *testing.T) { ctx := context.Background() opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} rootstore, err := badgerds.NewDatastore("", &opts) @@ -308,9 +306,6 @@ func TestBadgerMemoryStoreTxn_TwoTransactionsWithPutConflict_ShouldErrorWithConf require.NoError(t, err) err = txn1.Commit(ctx) - // We are expecting this to fail because of the conflict but badger does not return an error. - // Conflicts in badger only occurs when the value of a key was changed between the time you read and you rewrite it. - // require.ErrorIs(t, err, badger.ErrConflict) require.NoError(t, err) } @@ -376,8 +371,7 @@ func TestBadgerMemoryStoreTxn_TwoTransactionsWithHasPutConflict_ShouldErrorWithC require.ErrorIs(t, err, badger.ErrConflict) } -// This test documents https://github.com/sourcenetwork/defradb/issues/2673 -func TestBadgerFileStoreTxn_TwoTransactionsWithPutConflict_ShouldErrorWithConflict(t *testing.T) { +func TestBadgerFileStoreTxn_TwoTransactionsWithPutConflict_ShouldSucceed(t *testing.T) { ctx := context.Background() opts := badgerds.Options{Options: badger.DefaultOptions("")} rootstore, err := badgerds.NewDatastore(t.TempDir(), &opts) @@ -400,9 +394,6 @@ func TestBadgerFileStoreTxn_TwoTransactionsWithPutConflict_ShouldErrorWithConfli require.NoError(t, err) err = txn1.Commit(ctx) - // We are expecting this to fail because of the conflict but badger does not return an error. - // Conflicts in badger only occurs when the value of a key was changed between the time you read and you rewrite it. - // require.ErrorIs(t, err, badger.ErrConflict) require.NoError(t, err) } From 07e431d399510d9501a9d498ce8e26a0650ae486 Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Mon, 17 Jun 2024 10:40:03 -0700 Subject: [PATCH 49/78] fix: Merge retry logic (#2719) ## Relevant issue(s) Resolves #2718 Resolves #2721 ## Description This PR fixes issues with merge retry and DAG sync processes. It also moves the `docQueue` from the `net` package into the `db` package. ## Tasks - [x] I made sure the code is well commented, particularly hard-to-understand areas. - [x] I made sure the repository-held documentation is changed accordingly. - [x] I made sure the pull request title adheres to the conventional commit style (the subset used in the project can be found in [tools/configs/chglog/config.yml](tools/configs/chglog/config.yml)). - [x] I made sure to discuss its limitations such as threats to validity, vulnerability to mistake and misuse, robustness to invalidation of assumptions, resource requirements, ... ## How has this been tested? `make test` Specify the platform(s) on which this was tested: - MacOS --- events/dag_sync.go | 3 +- go.mod | 1 + go.sum | 4 ++ internal/db/merge.go | 124 +++++++++++++++++++++++--------------- internal/db/merge_test.go | 28 +++++++++ net/process.go | 124 -------------------------------------- net/server.go | 76 +++-------------------- net/server_test.go | 25 -------- net/sync_dag.go | 91 ++++++++++++++++++++++++++++ 9 files changed, 209 insertions(+), 267 deletions(-) delete mode 100644 net/process.go create mode 100644 net/sync_dag.go diff --git a/events/dag_sync.go b/events/dag_sync.go index 4ab568b7d0..d6150c9118 100644 --- a/events/dag_sync.go +++ b/events/dag_sync.go @@ -14,7 +14,6 @@ import ( "sync" "github.com/ipfs/go-cid" - "github.com/sourcenetwork/immutable" ) @@ -23,6 +22,8 @@ type DAGMergeChannel = immutable.Option[Channel[DAGMerge]] // DAGMerge is a notification that a merge can be performed up to the provided CID. type DAGMerge struct { + // DocID is the unique identifier for the document being merged. + DocID string // Cid is the id of the composite commit that formed this update in the DAG. Cid cid.Cid // SchemaRoot is the root identifier of the schema that defined the shape of the document that was updated. diff --git a/go.mod b/go.mod index fc838114a6..de665e784f 100644 --- a/go.mod +++ b/go.mod @@ -24,6 +24,7 @@ require ( github.com/ipfs/go-log/v2 v2.5.1 github.com/ipld/go-ipld-prime v0.21.0 github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20240322071758-198d7dba8fb8 + github.com/ipld/go-ipld-prime/storage/bsrvadapter v0.0.0-20240322071758-198d7dba8fb8 github.com/jbenet/goprocess v0.1.4 github.com/lens-vm/lens/host-go v0.0.0-20231127204031-8d858ed2926c github.com/lestrrat-go/jwx/v2 v2.0.21 diff --git a/go.sum b/go.sum index 23410e3394..eade42f4c3 100644 --- a/go.sum +++ b/go.sum @@ -636,6 +636,8 @@ github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20240322071758-198d7dba8fb8 h1:WQVfplCGOHtFNyZH7eOaEqGsbbje3NP8EFeGggUvEQs= github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20240322071758-198d7dba8fb8/go.mod h1:PVDd/V/Zz9IW+Diz9LEhD+ZYS9pKzawmtVQhVd0hcgQ= +github.com/ipld/go-ipld-prime/storage/bsrvadapter v0.0.0-20240322071758-198d7dba8fb8 h1:adq3fTx2YXmpTPNvBRIM0Zi5lX4JjQTRjdLYKhXMkQg= +github.com/ipld/go-ipld-prime/storage/bsrvadapter v0.0.0-20240322071758-198d7dba8fb8/go.mod h1:ej/GTRX+HjlHMs/M3zg9fM8mUlQXgHqRvPJjtp+atHw= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= @@ -1166,6 +1168,8 @@ github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49u github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/vito/go-sse v1.0.0 h1:e6/iTrrvy8BRrOwJwmQmlndlil+TLdxXvHi55ZDzH6M= github.com/vito/go-sse v1.0.0/go.mod h1:2wkcaQ+jtlZ94Uve8gYZjFpL68luAjssTINA2hpgcZs= +github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsXyEHU0s= +github.com/warpfork/go-testmark v0.12.1/go.mod h1:kHwy7wfvGSPh1rQJYKayD4AbtNaeyZdcGi9tNJTaa5Y= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/wasmerio/wasmer-go v1.0.4 h1:MnqHoOGfiQ8MMq2RF6wyCeebKOe84G88h5yv+vmxJgs= diff --git a/internal/db/merge.go b/internal/db/merge.go index 323f7ae92c..7618d31309 100644 --- a/internal/db/merge.go +++ b/internal/db/merge.go @@ -13,6 +13,7 @@ package db import ( "container/list" "context" + "sync" "github.com/ipfs/go-cid" "github.com/ipld/go-ipld-prime/linking" @@ -34,6 +35,7 @@ import ( ) func (db *db) handleMerges(ctx context.Context, merges events.Subscription[events.DAGMerge]) { + queue := newMergeQueue() for { select { case <-ctx.Done(): @@ -43,15 +45,33 @@ func (db *db) handleMerges(ctx context.Context, merges events.Subscription[event return } go func() { - err := db.executeMerge(ctx, merge) + // ensure only one merge per docID + queue.add(merge.DocID) + defer queue.done(merge.DocID) + + // retry the merge process if a conflict occurs + // + // conficts occur when a user updates a document + // while a merge is in progress. + var err error + for i := 0; i < db.MaxTxnRetries(); i++ { + err = db.executeMerge(ctx, merge) + if errors.Is(err, badger.ErrTxnConflict) { + continue // retry merge + } + break // merge success or error + } + if err != nil { log.ErrorContextE( ctx, "Failed to execute merge", err, - corelog.String("CID", merge.Cid.String()), - corelog.String("Error", err.Error()), - ) + corelog.Any("Error", err), + corelog.Any("Event", merge)) + } + if merge.Wg != nil { + merge.Wg.Done() } }() } @@ -59,12 +79,6 @@ func (db *db) handleMerges(ctx context.Context, merges events.Subscription[event } func (db *db) executeMerge(ctx context.Context, dagMerge events.DAGMerge) error { - defer func() { - // Notify the caller that the merge is complete. - if dagMerge.Wg != nil { - dagMerge.Wg.Done() - } - }() ctx, txn, err := ensureContextTxn(ctx, db, false) if err != nil { return err @@ -79,7 +93,7 @@ func (db *db) executeMerge(ctx context.Context, dagMerge events.DAGMerge) error ls := cidlink.DefaultLinkSystem() ls.SetReadStorage(txn.DAGstore().AsIPLDStorage()) - docID, err := getDocIDFromBlock(ctx, ls, dagMerge.Cid) + docID, err := client.NewDocIDFromString(dagMerge.DocID) if err != nil { return err } @@ -100,35 +114,57 @@ func (db *db) executeMerge(ctx context.Context, dagMerge events.DAGMerge) error return err } - for retry := 0; retry < db.MaxTxnRetries(); retry++ { - err := mp.mergeComposites(ctx) - if err != nil { - return err - } - err = syncIndexedDoc(ctx, docID, col) - if err != nil { - return err - } - err = txn.Commit(ctx) - if err != nil { - if errors.Is(err, badger.ErrTxnConflict) { - txn, err = db.NewTxn(ctx, false) - if err != nil { - return err - } - ctx = SetContextTxn(ctx, txn) - mp.txn = txn - mp.lsys.SetReadStorage(txn.DAGstore().AsIPLDStorage()) - // Reset the CRDTs to avoid reusing the old transaction. - mp.mCRDTs = make(map[string]merklecrdt.MerkleCRDT) - continue - } - return err - } - break + err = mp.mergeComposites(ctx) + if err != nil { + return err } - return nil + err = syncIndexedDoc(ctx, docID, col) + if err != nil { + return err + } + + return txn.Commit(ctx) +} + +// mergeQueue is synchronization source to ensure that concurrent +// document merges do not cause transaction conflicts. +type mergeQueue struct { + docs map[string]chan struct{} + mutex sync.Mutex +} + +func newMergeQueue() *mergeQueue { + return &mergeQueue{ + docs: make(map[string]chan struct{}), + } +} + +// add adds a docID to the queue. If the docID is already in the queue, it will +// wait for the docID to be removed from the queue. For every add call, done must +// be called to remove the docID from the queue. Otherwise, subsequent add calls will +// block forever. +func (m *mergeQueue) add(docID string) { + m.mutex.Lock() + done, ok := m.docs[docID] + if !ok { + m.docs[docID] = make(chan struct{}) + } + m.mutex.Unlock() + if ok { + <-done + m.add(docID) + } +} + +func (m *mergeQueue) done(docID string) { + m.mutex.Lock() + defer m.mutex.Unlock() + done, ok := m.docs[docID] + if ok { + delete(m.docs, docID) + close(done) + } } type mergeProcessor struct { @@ -333,18 +369,6 @@ func (mp *mergeProcessor) initCRDTForType( return mcrdt, nil } -func getDocIDFromBlock(ctx context.Context, ls linking.LinkSystem, cid cid.Cid) (client.DocID, error) { - nd, err := ls.Load(linking.LinkContext{Ctx: ctx}, cidlink.Link{Cid: cid}, coreblock.SchemaPrototype) - if err != nil { - return client.DocID{}, err - } - block, err := coreblock.GetFromNode(nd) - if err != nil { - return client.DocID{}, err - } - return client.NewDocIDFromString(string(block.Delta.GetDocID())) -} - func getCollectionFromRootSchema(ctx context.Context, db *db, rootSchema string) (*collection, error) { cols, err := db.getCollections( ctx, diff --git a/internal/db/merge_test.go b/internal/db/merge_test.go index b8671a5171..9f0e0b34af 100644 --- a/internal/db/merge_test.go +++ b/internal/db/merge_test.go @@ -13,6 +13,7 @@ package db import ( "context" "testing" + "time" "github.com/fxamacker/cbor/v2" "github.com/ipld/go-ipld-prime" @@ -59,6 +60,7 @@ func TestMerge_SingleBranch_NoError(t *testing.T) { require.NoError(t, err) err = db.executeMerge(ctx, events.DAGMerge{ + DocID: docID.String(), Cid: compInfo2.link.Cid, SchemaRoot: col.SchemaRoot(), }) @@ -103,6 +105,7 @@ func TestMerge_DualBranch_NoError(t *testing.T) { require.NoError(t, err) err = db.executeMerge(ctx, events.DAGMerge{ + DocID: docID.String(), Cid: compInfo2.link.Cid, SchemaRoot: col.SchemaRoot(), }) @@ -112,6 +115,7 @@ func TestMerge_DualBranch_NoError(t *testing.T) { require.NoError(t, err) err = db.executeMerge(ctx, events.DAGMerge{ + DocID: docID.String(), Cid: compInfo3.link.Cid, SchemaRoot: col.SchemaRoot(), }) @@ -159,6 +163,7 @@ func TestMerge_DualBranchWithOneIncomplete_CouldNotFindCID(t *testing.T) { require.NoError(t, err) err = db.executeMerge(ctx, events.DAGMerge{ + DocID: docID.String(), Cid: compInfo2.link.Cid, SchemaRoot: col.SchemaRoot(), }) @@ -177,6 +182,7 @@ func TestMerge_DualBranchWithOneIncomplete_CouldNotFindCID(t *testing.T) { require.NoError(t, err) err = db.executeMerge(ctx, events.DAGMerge{ + DocID: docID.String(), Cid: compInfo3.link.Cid, SchemaRoot: col.SchemaRoot(), }) @@ -292,3 +298,25 @@ func encodeValue(val any) []byte { } return b } + +func TestMergeQueue(t *testing.T) { + q := newMergeQueue() + + testDocID := "test" + + q.add(testDocID) + go q.add(testDocID) + // give time for the goroutine to block + time.Sleep(10 * time.Millisecond) + require.Len(t, q.docs, 1) + q.done(testDocID) + // give time for the goroutine to add the docID + time.Sleep(10 * time.Millisecond) + q.mutex.Lock() + require.Len(t, q.docs, 1) + q.mutex.Unlock() + q.done(testDocID) + q.mutex.Lock() + require.Len(t, q.docs, 0) + q.mutex.Unlock() +} diff --git a/net/process.go b/net/process.go deleted file mode 100644 index b4f85134fb..0000000000 --- a/net/process.go +++ /dev/null @@ -1,124 +0,0 @@ -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package net - -import ( - "context" - "sync" - "time" - - "github.com/ipfs/boxo/blockservice" - "github.com/ipfs/go-cid" - "github.com/ipld/go-ipld-prime/linking" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - "github.com/sourcenetwork/corelog" - - coreblock "github.com/sourcenetwork/defradb/internal/core/block" -) - -var ( - dagSyncTimeout = time.Second * 60 -) - -type blockProcessor struct { - *Peer - wg *sync.WaitGroup - bsSession *blockservice.Session - queuedChildren *sync.Map -} - -func newBlockProcessor( - ctx context.Context, - p *Peer, -) *blockProcessor { - return &blockProcessor{ - Peer: p, - wg: &sync.WaitGroup{}, - bsSession: blockservice.NewSession(ctx, p.bserv), - queuedChildren: &sync.Map{}, - } -} - -// processRemoteBlock stores the block in the DAG store and initiates a sync of the block's children. -func (bp *blockProcessor) processRemoteBlock( - ctx context.Context, - block *coreblock.Block, -) error { - // Store the block in the DAG store - lsys := cidlink.DefaultLinkSystem() - lsys.SetWriteStorage(bp.db.Blockstore().AsIPLDStorage()) - _, err := lsys.Store(linking.LinkContext{Ctx: ctx}, coreblock.GetLinkPrototype(), block.GenerateNode()) - if err != nil { - return err - } - // Initiate a sync of the block's children - bp.wg.Add(1) - bp.handleChildBlocks(ctx, block) - - return nil -} - -func (bp *blockProcessor) handleChildBlocks( - ctx context.Context, - block *coreblock.Block, -) { - defer bp.wg.Done() - - if len(block.Links) == 0 { - return - } - - links := make([]cid.Cid, 0, len(block.Links)) - for _, link := range block.Links { - exists, err := bp.db.Blockstore().Has(ctx, link.Cid) - if err != nil { - log.ErrorContextE( - ctx, - "Failed to check if block exists", - err, - corelog.Any("CID", link.Cid), - ) - continue - } - if exists { - continue - } - if _, loaded := bp.queuedChildren.LoadOrStore(link.Cid, struct{}{}); !loaded { - links = append(links, link.Cid) - } - } - - getCtx, cancel := context.WithTimeout(ctx, dagSyncTimeout) - defer cancel() - - childBlocks := bp.bsSession.GetBlocks(getCtx, links) - - for rawBlock := range childBlocks { - block, err := coreblock.GetFromBytes(rawBlock.RawData()) - if err != nil { - log.ErrorContextE( - ctx, - "Failed to get block from bytes", - err, - corelog.Any("CID", rawBlock.Cid()), - ) - continue - } - bp.wg.Add(1) - go bp.handleChildBlocks(ctx, block) - } - - for _, link := range links { - bp.queuedChildren.Delete(link) - } -} diff --git a/net/server.go b/net/server.go index 94d791854f..2d0fcdaaa3 100644 --- a/net/server.go +++ b/net/server.go @@ -17,7 +17,7 @@ import ( "fmt" "sync" - "github.com/ipfs/go-cid" + cid "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p/core/event" libpeer "github.com/libp2p/go-libp2p/core/peer" "github.com/sourcenetwork/corelog" @@ -51,11 +51,6 @@ type server struct { pubSubEmitter event.Emitter pushLogEmitter event.Emitter - // docQueue is used to track which documents are currently being processed. - // This is used to prevent multiple concurrent processing of the same document and - // limit unecessary transaction conflicts. - docQueue *docQueue - pb.UnimplementedServiceServer } @@ -73,9 +68,6 @@ func newServer(p *Peer, opts ...grpc.DialOption) (*server, error) { peer: p, conns: make(map[libpeer.ID]*grpc.ClientConn), topics: make(map[string]pubsubTopic), - docQueue: &docQueue{ - docs: make(map[string]chan struct{}), - }, } cred := insecure.NewCredentials() @@ -152,45 +144,13 @@ func (s *server) GetLog(ctx context.Context, req *pb.GetLogRequest) (*pb.GetLogR return nil, nil } -type docQueue struct { - docs map[string]chan struct{} - mu sync.Mutex -} - -// add adds a docID to the queue. If the docID is already in the queue, it will -// wait for the docID to be removed from the queue. For every add call, done must -// be called to remove the docID from the queue. Otherwise, subsequent add calls will -// block forever. -func (dq *docQueue) add(docID string) { - dq.mu.Lock() - done, ok := dq.docs[docID] - if !ok { - dq.docs[docID] = make(chan struct{}) - } - dq.mu.Unlock() - if ok { - <-done - dq.add(docID) - } -} - -func (dq *docQueue) done(docID string) { - dq.mu.Lock() - defer dq.mu.Unlock() - done, ok := dq.docs[docID] - if ok { - delete(dq.docs, docID) - close(done) - } -} - // PushLog receives a push log request func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushLogReply, error) { pid, err := peerIDFromContext(ctx) if err != nil { return nil, err } - cid, err := cid.Cast(req.Body.Cid) + headCID, err := cid.Cast(req.Body.Cid) if err != nil { return nil, err } @@ -198,10 +158,12 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL if err != nil { return nil, err } + block, err := coreblock.GetFromBytes(req.Body.Log.Block) + if err != nil { + return nil, err + } - s.docQueue.add(docID.String()) defer func() { - s.docQueue.done(docID.String()) if s.pushLogEmitter != nil { byPeer, err := libpeer.Decode(req.Body.Creator) if err != nil { @@ -219,37 +181,17 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL } }() - // check if we already have this block - exists, err := s.peer.db.Blockstore().Has(ctx, cid) - if err != nil { - return nil, NewErrCheckingForExistingBlock(err, cid.String()) - } - if exists { - return &pb.PushLogReply{}, nil - } - - block, err := coreblock.GetFromBytes(req.Body.Log.Block) + err = syncDAG(ctx, s.peer.bserv, block) if err != nil { return nil, err } - bp := newBlockProcessor(ctx, s.peer) - err = bp.processRemoteBlock(ctx, block) - if err != nil { - log.ErrorContextE( - ctx, - "Failed to process remote block", - err, - corelog.String("DocID", docID.String()), - corelog.Any("CID", cid), - ) - } - bp.wg.Wait() if s.peer.db.Events().DAGMerges.HasValue() { wg := &sync.WaitGroup{} wg.Add(1) s.peer.db.Events().DAGMerges.Value().Publish(events.DAGMerge{ - Cid: cid, + DocID: docID.String(), + Cid: headCID, SchemaRoot: string(req.Body.SchemaRoot), Wg: wg, }) diff --git a/net/server_test.go b/net/server_test.go index 47d6a68aa8..d17705b404 100644 --- a/net/server_test.go +++ b/net/server_test.go @@ -13,7 +13,6 @@ package net import ( "context" "testing" - "time" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore/query" @@ -227,30 +226,6 @@ func TestGetHeadLog(t *testing.T) { require.Nil(t, err) } -func TestDocQueue(t *testing.T) { - q := docQueue{ - docs: make(map[string]chan struct{}), - } - - testDocID := "test" - - q.add(testDocID) - go q.add(testDocID) - // give time for the goroutine to block - time.Sleep(10 * time.Millisecond) - require.Len(t, q.docs, 1) - q.done(testDocID) - // give time for the goroutine to add the docID - time.Sleep(10 * time.Millisecond) - q.mu.Lock() - require.Len(t, q.docs, 1) - q.mu.Unlock() - q.done(testDocID) - q.mu.Lock() - require.Len(t, q.docs, 0) - q.mu.Unlock() -} - func getHead(ctx context.Context, db client.DB, docID client.DocID) (cid.Cid, error) { prefix := core.DataStoreKeyFromDocID(docID).ToHeadStoreKey().WithFieldId(core.COMPOSITE_NAMESPACE).ToString() results, err := db.Headstore().Query(ctx, query.Query{Prefix: prefix}) diff --git a/net/sync_dag.go b/net/sync_dag.go new file mode 100644 index 0000000000..6e9801ebd7 --- /dev/null +++ b/net/sync_dag.go @@ -0,0 +1,91 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package net + +import ( + "context" + "time" + + "github.com/ipfs/boxo/blockservice" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/datamodel" + "github.com/ipld/go-ipld-prime/linking" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/ipld/go-ipld-prime/linking/preload" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/schema" + "github.com/ipld/go-ipld-prime/storage/bsrvadapter" + "github.com/ipld/go-ipld-prime/traversal" + "github.com/ipld/go-ipld-prime/traversal/selector" + "github.com/ipld/go-ipld-prime/traversal/selector/builder" + + coreblock "github.com/sourcenetwork/defradb/internal/core/block" +) + +// syncDAGTimeout is the maximum amount of time +// to wait for a dag to be fetched. +var syncDAGTimeout = 60 * time.Second + +// syncDAG synchronizes the DAG starting with the given block +// using the blockservice to fetch remote blocks. +// +// This process walks the entire DAG until the issue below is resolved. +// https://github.com/sourcenetwork/defradb/issues/2722 +func syncDAG(ctx context.Context, bserv blockservice.BlockService, block *coreblock.Block) error { + ctx, cancel := context.WithTimeout(ctx, syncDAGTimeout) + defer cancel() + + store := &bsrvadapter.Adapter{Wrapped: bserv} + lsys := cidlink.DefaultLinkSystem() + lsys.SetWriteStorage(store) + lsys.SetReadStorage(store) + lsys.TrustedStorage = true + + // Store the block in the DAG store + _, err := lsys.Store(linking.LinkContext{Ctx: ctx}, coreblock.GetLinkPrototype(), block.GenerateNode()) + if err != nil { + return err + } + + ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) + matchAllSelector, err := ssb.ExploreRecursive(selector.RecursionLimitNone(), ssb.ExploreUnion( + ssb.Matcher(), + ssb.ExploreAll(ssb.ExploreRecursiveEdge()), + )).Selector() + if err != nil { + return err + } + + // prototypeChooser returns the node prototype to use when traversing + prototypeChooser := func(lnk ipld.Link, lnkCtx ipld.LinkContext) (ipld.NodePrototype, error) { + if tlnkNd, ok := lnkCtx.LinkNode.(schema.TypedLinkNode); ok { + return tlnkNd.LinkTargetNodePrototype(), nil + } + return basicnode.Prototype.Any, nil + } + // preloader is used to asynchronously load blocks before traversing + // + // any errors encountered during preload are ignored + preloader := func(pctx preload.PreloadContext, l preload.Link) { + go lsys.Load(linking.LinkContext{Ctx: pctx.Ctx}, l.Link, basicnode.Prototype.Any) //nolint:errcheck + } + config := traversal.Config{ + Ctx: ctx, + LinkSystem: lsys, + LinkVisitOnlyOnce: true, + LinkTargetNodePrototypeChooser: prototypeChooser, + Preloader: preloader, + } + visit := func(p traversal.Progress, n datamodel.Node) error { + return nil + } + return traversal.Progress{Cfg: &config}.WalkMatching(block.GenerateNode(), matchAllSelector, visit) +} From 5c0adb63baf58838a7b0626f338ed73e622abbd7 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 17 Jun 2024 15:28:27 -0400 Subject: [PATCH 50/78] bot: Update dependencies (bulk dependabot PRs) 06-17-2024 (#2730) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ✅ This PR was created by combining the following PRs: #2729 bot: Bump github.com/getkin/kin-openapi from 0.124.0 to 0.125.0 #2728 bot: Bump github.com/spf13/cobra from 1.8.0 to 1.8.1 #2727 bot: Bump @typescript-eslint/eslint-plugin from 7.12.0 to 7.13.0 in /playground #2725 bot: Bump graphql from 16.8.1 to 16.8.2 in /playground #2724 bot: Bump vite from 5.2.13 to 5.3.1 in /playground ⚠️ The following PRs were resolved manually due to merge conflicts: #2726 bot: Bump @typescript-eslint/parser from 7.12.0 to 7.13.0 in /playground --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Shahzad Lone --- go.mod | 6 +- go.sum | 12 +- playground/package-lock.json | 517 ++++++++++++++++++++++++++--------- playground/package.json | 8 +- 4 files changed, 399 insertions(+), 144 deletions(-) diff --git a/go.mod b/go.mod index de665e784f..b4d0086c1a 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 github.com/evanphx/json-patch/v5 v5.9.0 github.com/fxamacker/cbor/v2 v2.6.0 - github.com/getkin/kin-openapi v0.124.0 + github.com/getkin/kin-openapi v0.125.0 github.com/go-chi/chi/v5 v5.0.12 github.com/go-chi/cors v1.2.1 github.com/go-errors/errors v1.5.1 @@ -43,7 +43,7 @@ require ( github.com/sourcenetwork/graphql-go v0.7.10-0.20231113214537-a9560c1898dd github.com/sourcenetwork/immutable v0.3.0 github.com/sourcenetwork/sourcehub v0.2.1-0.20240305165631-9b75b1000724 - github.com/spf13/cobra v1.8.0 + github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.9.0 @@ -108,7 +108,7 @@ require ( github.com/cosmos/iavl v1.1.2 // indirect github.com/cosmos/ics23/go v0.10.0 // indirect github.com/cosmos/ledger-cosmos-go v0.13.3 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/cskr/pubsub v1.0.2 // indirect github.com/danieljoos/wincred v1.2.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect diff --git a/go.sum b/go.sum index eade42f4c3..98241458ed 100644 --- a/go.sum +++ b/go.sum @@ -214,8 +214,8 @@ github.com/cosmos/ledger-cosmos-go v0.13.3/go.mod h1:HENcEP+VtahZFw38HZ3+LS3Iv5X github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668 h1:ZFUue+PNxmHlu7pYv+IYMtqlaO/0VwaGEqKepZf9JpA= github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= @@ -310,8 +310,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA= github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/getkin/kin-openapi v0.124.0 h1:VSFNMB9C9rTKBnQ/fpyDU8ytMTr4dWI9QovSKj9kz/M= -github.com/getkin/kin-openapi v0.124.0/go.mod h1:wb1aSZA/iWmorQP9KTAS/phLj/t17B5jT7+fS8ed9NM= +github.com/getkin/kin-openapi v0.125.0 h1:jyQCyf2qXS1qvs2U00xQzkGCqYPhEhZDmSmVt65fXno= +github.com/getkin/kin-openapi v0.125.0/go.mod h1:wb1aSZA/iWmorQP9KTAS/phLj/t17B5jT7+fS8ed9NM= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -1098,8 +1098,8 @@ github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= diff --git a/playground/package-lock.json b/playground/package-lock.json index 216c6f78c9..d6f8c2297d 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -9,7 +9,7 @@ "version": "0.0.0", "dependencies": { "graphiql": "^3.2.3", - "graphql": "^16.8.1", + "graphql": "^16.8.2", "react": "^18.3.1", "react-dom": "^18.3.1", "swagger-ui-react": "^5.17.14" @@ -18,14 +18,14 @@ "@types/react": "^18.3.3", "@types/react-dom": "^18.3.0", "@types/swagger-ui-react": "^4.18.3", - "@typescript-eslint/eslint-plugin": "^7.12.0", - "@typescript-eslint/parser": "^7.12.0", + "@typescript-eslint/eslint-plugin": "^7.13.0", + "@typescript-eslint/parser": "^7.13.0", "@vitejs/plugin-react-swc": "^3.7.0", "eslint": "^8.57.0", "eslint-plugin-react-hooks": "^4.6.2", "eslint-plugin-react-refresh": "^0.4.7", "typescript": "^5.4.5", - "vite": "^5.2.13" + "vite": "^5.3.1" } }, "node_modules/@babel/runtime": { @@ -103,9 +103,9 @@ "optional": true }, "node_modules/@esbuild/aix-ppc64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.20.2.tgz", - "integrity": "sha512-D+EBOJHXdNZcLJRBkhENNG8Wji2kgc9AZ9KiPr1JuZjsNtyHzrsfLRrY0tk2H2aoFu6RANO1y1iPPUCDYWkb5g==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz", + "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==", "cpu": [ "ppc64" ], @@ -119,9 +119,9 @@ } }, "node_modules/@esbuild/android-arm": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.20.2.tgz", - "integrity": "sha512-t98Ra6pw2VaDhqNWO2Oph2LXbz/EJcnLmKLGBJwEwXX/JAN83Fym1rU8l0JUWK6HkIbWONCSSatf4sf2NBRx/w==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz", + "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==", "cpu": [ "arm" ], @@ -135,9 +135,9 @@ } }, "node_modules/@esbuild/android-arm64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.20.2.tgz", - "integrity": "sha512-mRzjLacRtl/tWU0SvD8lUEwb61yP9cqQo6noDZP/O8VkwafSYwZ4yWy24kan8jE/IMERpYncRt2dw438LP3Xmg==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz", + "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==", "cpu": [ "arm64" ], @@ -151,9 +151,9 @@ } }, "node_modules/@esbuild/android-x64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.20.2.tgz", - "integrity": "sha512-btzExgV+/lMGDDa194CcUQm53ncxzeBrWJcncOBxuC6ndBkKxnHdFJn86mCIgTELsooUmwUm9FkhSp5HYu00Rg==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz", + "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==", "cpu": [ "x64" ], @@ -167,9 +167,9 @@ } }, "node_modules/@esbuild/darwin-arm64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.20.2.tgz", - "integrity": "sha512-4J6IRT+10J3aJH3l1yzEg9y3wkTDgDk7TSDFX+wKFiWjqWp/iCfLIYzGyasx9l0SAFPT1HwSCR+0w/h1ES/MjA==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz", + "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==", "cpu": [ "arm64" ], @@ -183,9 +183,9 @@ } }, "node_modules/@esbuild/darwin-x64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.20.2.tgz", - "integrity": "sha512-tBcXp9KNphnNH0dfhv8KYkZhjc+H3XBkF5DKtswJblV7KlT9EI2+jeA8DgBjp908WEuYll6pF+UStUCfEpdysA==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz", + "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==", "cpu": [ "x64" ], @@ -199,9 +199,9 @@ } }, "node_modules/@esbuild/freebsd-arm64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.20.2.tgz", - "integrity": "sha512-d3qI41G4SuLiCGCFGUrKsSeTXyWG6yem1KcGZVS+3FYlYhtNoNgYrWcvkOoaqMhwXSMrZRl69ArHsGJ9mYdbbw==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz", + "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==", "cpu": [ "arm64" ], @@ -215,9 +215,9 @@ } }, "node_modules/@esbuild/freebsd-x64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.20.2.tgz", - "integrity": "sha512-d+DipyvHRuqEeM5zDivKV1KuXn9WeRX6vqSqIDgwIfPQtwMP4jaDsQsDncjTDDsExT4lR/91OLjRo8bmC1e+Cw==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz", + "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==", "cpu": [ "x64" ], @@ -231,9 +231,9 @@ } }, "node_modules/@esbuild/linux-arm": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.20.2.tgz", - "integrity": "sha512-VhLPeR8HTMPccbuWWcEUD1Az68TqaTYyj6nfE4QByZIQEQVWBB8vup8PpR7y1QHL3CpcF6xd5WVBU/+SBEvGTg==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz", + "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==", "cpu": [ "arm" ], @@ -247,9 +247,9 @@ } }, "node_modules/@esbuild/linux-arm64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.20.2.tgz", - "integrity": "sha512-9pb6rBjGvTFNira2FLIWqDk/uaf42sSyLE8j1rnUpuzsODBq7FvpwHYZxQ/It/8b+QOS1RYfqgGFNLRI+qlq2A==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz", + "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==", "cpu": [ "arm64" ], @@ -263,9 +263,9 @@ } }, "node_modules/@esbuild/linux-ia32": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.20.2.tgz", - "integrity": "sha512-o10utieEkNPFDZFQm9CoP7Tvb33UutoJqg3qKf1PWVeeJhJw0Q347PxMvBgVVFgouYLGIhFYG0UGdBumROyiig==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz", + "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==", "cpu": [ "ia32" ], @@ -279,9 +279,9 @@ } }, "node_modules/@esbuild/linux-loong64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.20.2.tgz", - "integrity": "sha512-PR7sp6R/UC4CFVomVINKJ80pMFlfDfMQMYynX7t1tNTeivQ6XdX5r2XovMmha/VjR1YN/HgHWsVcTRIMkymrgQ==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz", + "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==", "cpu": [ "loong64" ], @@ -295,9 +295,9 @@ } }, "node_modules/@esbuild/linux-mips64el": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.20.2.tgz", - "integrity": "sha512-4BlTqeutE/KnOiTG5Y6Sb/Hw6hsBOZapOVF6njAESHInhlQAghVVZL1ZpIctBOoTFbQyGW+LsVYZ8lSSB3wkjA==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz", + "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==", "cpu": [ "mips64el" ], @@ -311,9 +311,9 @@ } }, "node_modules/@esbuild/linux-ppc64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.20.2.tgz", - "integrity": "sha512-rD3KsaDprDcfajSKdn25ooz5J5/fWBylaaXkuotBDGnMnDP1Uv5DLAN/45qfnf3JDYyJv/ytGHQaziHUdyzaAg==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz", + "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==", "cpu": [ "ppc64" ], @@ -327,9 +327,9 @@ } }, "node_modules/@esbuild/linux-riscv64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.20.2.tgz", - "integrity": "sha512-snwmBKacKmwTMmhLlz/3aH1Q9T8v45bKYGE3j26TsaOVtjIag4wLfWSiZykXzXuE1kbCE+zJRmwp+ZbIHinnVg==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz", + "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==", "cpu": [ "riscv64" ], @@ -343,9 +343,9 @@ } }, "node_modules/@esbuild/linux-s390x": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.20.2.tgz", - "integrity": "sha512-wcWISOobRWNm3cezm5HOZcYz1sKoHLd8VL1dl309DiixxVFoFe/o8HnwuIwn6sXre88Nwj+VwZUvJf4AFxkyrQ==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz", + "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==", "cpu": [ "s390x" ], @@ -359,9 +359,9 @@ } }, "node_modules/@esbuild/linux-x64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.20.2.tgz", - "integrity": "sha512-1MdwI6OOTsfQfek8sLwgyjOXAu+wKhLEoaOLTjbijk6E2WONYpH9ZU2mNtR+lZ2B4uwr+usqGuVfFT9tMtGvGw==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz", + "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==", "cpu": [ "x64" ], @@ -375,9 +375,9 @@ } }, "node_modules/@esbuild/netbsd-x64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.20.2.tgz", - "integrity": "sha512-K8/DhBxcVQkzYc43yJXDSyjlFeHQJBiowJ0uVL6Tor3jGQfSGHNNJcWxNbOI8v5k82prYqzPuwkzHt3J1T1iZQ==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz", + "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==", "cpu": [ "x64" ], @@ -391,9 +391,9 @@ } }, "node_modules/@esbuild/openbsd-x64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.20.2.tgz", - "integrity": "sha512-eMpKlV0SThJmmJgiVyN9jTPJ2VBPquf6Kt/nAoo6DgHAoN57K15ZghiHaMvqjCye/uU4X5u3YSMgVBI1h3vKrQ==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz", + "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==", "cpu": [ "x64" ], @@ -407,9 +407,9 @@ } }, "node_modules/@esbuild/sunos-x64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.20.2.tgz", - "integrity": "sha512-2UyFtRC6cXLyejf/YEld4Hajo7UHILetzE1vsRcGL3earZEW77JxrFjH4Ez2qaTiEfMgAXxfAZCm1fvM/G/o8w==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz", + "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==", "cpu": [ "x64" ], @@ -423,9 +423,9 @@ } }, "node_modules/@esbuild/win32-arm64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.20.2.tgz", - "integrity": "sha512-GRibxoawM9ZCnDxnP3usoUDO9vUkpAxIIZ6GQI+IlVmr5kP3zUq+l17xELTHMWTWzjxa2guPNyrpq1GWmPvcGQ==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz", + "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==", "cpu": [ "arm64" ], @@ -439,9 +439,9 @@ } }, "node_modules/@esbuild/win32-ia32": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.20.2.tgz", - "integrity": "sha512-HfLOfn9YWmkSKRQqovpnITazdtquEW8/SoHW7pWpuEeguaZI4QnCRW6b+oZTztdBnZOS2hqJ6im/D5cPzBTTlQ==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz", + "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==", "cpu": [ "ia32" ], @@ -455,9 +455,9 @@ } }, "node_modules/@esbuild/win32-x64": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.20.2.tgz", - "integrity": "sha512-N49X4lJX27+l9jbLKSqZ6bKNjzQvHaT8IIFUy+YIqmXQdjYCToGWwOItDrfby14c78aDd5NHQl29xingXfCdLQ==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz", + "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==", "cpu": [ "x64" ], @@ -2429,16 +2429,16 @@ "integrity": "sha512-EwmlvuaxPNej9+T4v5AuBPJa2x2UOJVdjCtDHgcDqitUeOtjnJKJ+apYjVcAoBEMjKW1VVFGZLUb5+qqa09XFA==" }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "7.12.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-7.12.0.tgz", - "integrity": "sha512-7F91fcbuDf/d3S8o21+r3ZncGIke/+eWk0EpO21LXhDfLahriZF9CGj4fbAetEjlaBdjdSm9a6VeXbpbT6Z40Q==", + "version": "7.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-7.13.0.tgz", + "integrity": "sha512-FX1X6AF0w8MdVFLSdqwqN/me2hyhuQg4ykN6ZpVhh1ij/80pTvDKclX1sZB9iqex8SjQfVhwMKs3JtnnMLzG9w==", "dev": true, "dependencies": { "@eslint-community/regexpp": "^4.10.0", - "@typescript-eslint/scope-manager": "7.12.0", - "@typescript-eslint/type-utils": "7.12.0", - "@typescript-eslint/utils": "7.12.0", - "@typescript-eslint/visitor-keys": "7.12.0", + "@typescript-eslint/scope-manager": "7.13.0", + "@typescript-eslint/type-utils": "7.13.0", + "@typescript-eslint/utils": "7.13.0", + "@typescript-eslint/visitor-keys": "7.13.0", "graphemer": "^1.4.0", "ignore": "^5.3.1", "natural-compare": "^1.4.0", @@ -2461,16 +2461,63 @@ } } }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/scope-manager": { + "version": "7.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.13.0.tgz", + "integrity": "sha512-ZrMCe1R6a01T94ilV13egvcnvVJ1pxShkE0+NDjDzH4nvG1wXpwsVI5bZCvE7AEDH1mXEx5tJSVR68bLgG7Dng==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.13.0", + "@typescript-eslint/visitor-keys": "7.13.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/types": { + "version": "7.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.13.0.tgz", + "integrity": "sha512-QWuwm9wcGMAuTsxP+qz6LBBd3Uq8I5Nv8xb0mk54jmNoCyDspnMvVsOxI6IsMmway5d1S9Su2+sCKv1st2l6eA==", + "dev": true, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/visitor-keys": { + "version": "7.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.13.0.tgz", + "integrity": "sha512-nxn+dozQx+MK61nn/JP+M4eCkHDSxSLDpgE3WcQo0+fkjEolnaB5jswvIKC4K56By8MMgIho7f1PVxERHEo8rw==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.13.0", + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, "node_modules/@typescript-eslint/parser": { - "version": "7.12.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-7.12.0.tgz", - "integrity": "sha512-dm/J2UDY3oV3TKius2OUZIFHsomQmpHtsV0FTh1WO8EKgHLQ1QCADUqscPgTpU+ih1e21FQSRjXckHn3txn6kQ==", + "version": "7.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-7.13.0.tgz", + "integrity": "sha512-EjMfl69KOS9awXXe83iRN7oIEXy9yYdqWfqdrFAYAAr6syP8eLEFI7ZE4939antx2mNgPRW/o1ybm2SFYkbTVA==", "dev": true, "dependencies": { - "@typescript-eslint/scope-manager": "7.12.0", - "@typescript-eslint/types": "7.12.0", - "@typescript-eslint/typescript-estree": "7.12.0", - "@typescript-eslint/visitor-keys": "7.12.0", + "@typescript-eslint/scope-manager": "7.13.0", + "@typescript-eslint/types": "7.13.0", + "@typescript-eslint/typescript-estree": "7.13.0", + "@typescript-eslint/visitor-keys": "7.13.0", "debug": "^4.3.4" }, "engines": { @@ -2489,6 +2536,81 @@ } } }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/scope-manager": { + "version": "7.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.13.0.tgz", + "integrity": "sha512-ZrMCe1R6a01T94ilV13egvcnvVJ1pxShkE0+NDjDzH4nvG1wXpwsVI5bZCvE7AEDH1mXEx5tJSVR68bLgG7Dng==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.13.0", + "@typescript-eslint/visitor-keys": "7.13.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/types": { + "version": "7.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.13.0.tgz", + "integrity": "sha512-QWuwm9wcGMAuTsxP+qz6LBBd3Uq8I5Nv8xb0mk54jmNoCyDspnMvVsOxI6IsMmway5d1S9Su2+sCKv1st2l6eA==", + "dev": true, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/typescript-estree": { + "version": "7.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.13.0.tgz", + "integrity": "sha512-cAvBvUoobaoIcoqox1YatXOnSl3gx92rCZoMRPzMNisDiM12siGilSM4+dJAekuuHTibI2hVC2fYK79iSFvWjw==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.13.0", + "@typescript-eslint/visitor-keys": "7.13.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^1.3.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/visitor-keys": { + "version": "7.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.13.0.tgz", + "integrity": "sha512-nxn+dozQx+MK61nn/JP+M4eCkHDSxSLDpgE3WcQo0+fkjEolnaB5jswvIKC4K56By8MMgIho7f1PVxERHEo8rw==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.13.0", + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, "node_modules/@typescript-eslint/scope-manager": { "version": "7.12.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.12.0.tgz", @@ -2507,13 +2629,13 @@ } }, "node_modules/@typescript-eslint/type-utils": { - "version": "7.12.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-7.12.0.tgz", - "integrity": "sha512-lib96tyRtMhLxwauDWUp/uW3FMhLA6D0rJ8T7HmH7x23Gk1Gwwu8UZ94NMXBvOELn6flSPiBrCKlehkiXyaqwA==", + "version": "7.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-7.13.0.tgz", + "integrity": "sha512-xMEtMzxq9eRkZy48XuxlBFzpVMDurUAfDu5Rz16GouAtXm0TaAoTFzqWUFPPuQYXI/CDaH/Bgx/fk/84t/Bc9A==", "dev": true, "dependencies": { - "@typescript-eslint/typescript-estree": "7.12.0", - "@typescript-eslint/utils": "7.12.0", + "@typescript-eslint/typescript-estree": "7.13.0", + "@typescript-eslint/utils": "7.13.0", "debug": "^4.3.4", "ts-api-utils": "^1.3.0" }, @@ -2533,6 +2655,64 @@ } } }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/types": { + "version": "7.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.13.0.tgz", + "integrity": "sha512-QWuwm9wcGMAuTsxP+qz6LBBd3Uq8I5Nv8xb0mk54jmNoCyDspnMvVsOxI6IsMmway5d1S9Su2+sCKv1st2l6eA==", + "dev": true, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/typescript-estree": { + "version": "7.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.13.0.tgz", + "integrity": "sha512-cAvBvUoobaoIcoqox1YatXOnSl3gx92rCZoMRPzMNisDiM12siGilSM4+dJAekuuHTibI2hVC2fYK79iSFvWjw==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.13.0", + "@typescript-eslint/visitor-keys": "7.13.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^1.3.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/visitor-keys": { + "version": "7.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.13.0.tgz", + "integrity": "sha512-nxn+dozQx+MK61nn/JP+M4eCkHDSxSLDpgE3WcQo0+fkjEolnaB5jswvIKC4K56By8MMgIho7f1PVxERHEo8rw==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.13.0", + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, "node_modules/@typescript-eslint/types": { "version": "7.12.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.12.0.tgz", @@ -2575,15 +2755,15 @@ } }, "node_modules/@typescript-eslint/utils": { - "version": "7.12.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-7.12.0.tgz", - "integrity": "sha512-Y6hhwxwDx41HNpjuYswYp6gDbkiZ8Hin9Bf5aJQn1bpTs3afYY4GX+MPYxma8jtoIV2GRwTM/UJm/2uGCVv+DQ==", + "version": "7.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-7.13.0.tgz", + "integrity": "sha512-jceD8RgdKORVnB4Y6BqasfIkFhl4pajB1wVxrF4akxD2QPM8GNYjgGwEzYS+437ewlqqrg7Dw+6dhdpjMpeBFQ==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.4.0", - "@typescript-eslint/scope-manager": "7.12.0", - "@typescript-eslint/types": "7.12.0", - "@typescript-eslint/typescript-estree": "7.12.0" + "@typescript-eslint/scope-manager": "7.13.0", + "@typescript-eslint/types": "7.13.0", + "@typescript-eslint/typescript-estree": "7.13.0" }, "engines": { "node": "^18.18.0 || >=20.0.0" @@ -2596,6 +2776,81 @@ "eslint": "^8.56.0" } }, + "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/scope-manager": { + "version": "7.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.13.0.tgz", + "integrity": "sha512-ZrMCe1R6a01T94ilV13egvcnvVJ1pxShkE0+NDjDzH4nvG1wXpwsVI5bZCvE7AEDH1mXEx5tJSVR68bLgG7Dng==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.13.0", + "@typescript-eslint/visitor-keys": "7.13.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/types": { + "version": "7.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.13.0.tgz", + "integrity": "sha512-QWuwm9wcGMAuTsxP+qz6LBBd3Uq8I5Nv8xb0mk54jmNoCyDspnMvVsOxI6IsMmway5d1S9Su2+sCKv1st2l6eA==", + "dev": true, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/typescript-estree": { + "version": "7.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.13.0.tgz", + "integrity": "sha512-cAvBvUoobaoIcoqox1YatXOnSl3gx92rCZoMRPzMNisDiM12siGilSM4+dJAekuuHTibI2hVC2fYK79iSFvWjw==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.13.0", + "@typescript-eslint/visitor-keys": "7.13.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^1.3.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/visitor-keys": { + "version": "7.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.13.0.tgz", + "integrity": "sha512-nxn+dozQx+MK61nn/JP+M4eCkHDSxSLDpgE3WcQo0+fkjEolnaB5jswvIKC4K56By8MMgIho7f1PVxERHEo8rw==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "7.13.0", + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, "node_modules/@typescript-eslint/visitor-keys": { "version": "7.12.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.12.0.tgz", @@ -3209,9 +3464,9 @@ } }, "node_modules/esbuild": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.20.2.tgz", - "integrity": "sha512-WdOOppmUNU+IbZ0PaDiTst80zjnrOkyJNHoKupIcVyU8Lvla3Ugx94VzkQ32Ijqd7UhHJy75gNWDMUekcrSJ6g==", + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz", + "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==", "dev": true, "hasInstallScript": true, "bin": { @@ -3221,29 +3476,29 @@ "node": ">=12" }, "optionalDependencies": { - "@esbuild/aix-ppc64": "0.20.2", - "@esbuild/android-arm": "0.20.2", - "@esbuild/android-arm64": "0.20.2", - "@esbuild/android-x64": "0.20.2", - "@esbuild/darwin-arm64": "0.20.2", - "@esbuild/darwin-x64": "0.20.2", - "@esbuild/freebsd-arm64": "0.20.2", - "@esbuild/freebsd-x64": "0.20.2", - "@esbuild/linux-arm": "0.20.2", - "@esbuild/linux-arm64": "0.20.2", - "@esbuild/linux-ia32": "0.20.2", - "@esbuild/linux-loong64": "0.20.2", - "@esbuild/linux-mips64el": "0.20.2", - "@esbuild/linux-ppc64": "0.20.2", - "@esbuild/linux-riscv64": "0.20.2", - "@esbuild/linux-s390x": "0.20.2", - "@esbuild/linux-x64": "0.20.2", - "@esbuild/netbsd-x64": "0.20.2", - "@esbuild/openbsd-x64": "0.20.2", - "@esbuild/sunos-x64": "0.20.2", - "@esbuild/win32-arm64": "0.20.2", - "@esbuild/win32-ia32": "0.20.2", - "@esbuild/win32-x64": "0.20.2" + "@esbuild/aix-ppc64": "0.21.5", + "@esbuild/android-arm": "0.21.5", + "@esbuild/android-arm64": "0.21.5", + "@esbuild/android-x64": "0.21.5", + "@esbuild/darwin-arm64": "0.21.5", + "@esbuild/darwin-x64": "0.21.5", + "@esbuild/freebsd-arm64": "0.21.5", + "@esbuild/freebsd-x64": "0.21.5", + "@esbuild/linux-arm": "0.21.5", + "@esbuild/linux-arm64": "0.21.5", + "@esbuild/linux-ia32": "0.21.5", + "@esbuild/linux-loong64": "0.21.5", + "@esbuild/linux-mips64el": "0.21.5", + "@esbuild/linux-ppc64": "0.21.5", + "@esbuild/linux-riscv64": "0.21.5", + "@esbuild/linux-s390x": "0.21.5", + "@esbuild/linux-x64": "0.21.5", + "@esbuild/netbsd-x64": "0.21.5", + "@esbuild/openbsd-x64": "0.21.5", + "@esbuild/sunos-x64": "0.21.5", + "@esbuild/win32-arm64": "0.21.5", + "@esbuild/win32-ia32": "0.21.5", + "@esbuild/win32-x64": "0.21.5" } }, "node_modules/escape-string-regexp": { @@ -3841,9 +4096,9 @@ } }, "node_modules/graphql": { - "version": "16.8.1", - "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.8.1.tgz", - "integrity": "sha512-59LZHPdGZVh695Ud9lRzPBVTtlX9ZCV150Er2W43ro37wVof0ctenSaskPPjN7lVTIN8mSZt8PHUNKZuNQUuxw==", + "version": "16.8.2", + "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.8.2.tgz", + "integrity": "sha512-cvVIBILwuoSyD54U4cF/UXDh5yAobhNV/tPygI4lZhgOIJQE/WLWC4waBRb4I6bDVYb3OVx3lfHbaQOEoUD5sg==", "engines": { "node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0" } @@ -5939,12 +6194,12 @@ "optional": true }, "node_modules/vite": { - "version": "5.2.13", - "resolved": "https://registry.npmjs.org/vite/-/vite-5.2.13.tgz", - "integrity": "sha512-SSq1noJfY9pR3I1TUENL3rQYDQCFqgD+lM6fTRAM8Nv6Lsg5hDLaXkjETVeBt+7vZBCMoibD+6IWnT2mJ+Zb/A==", + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.3.1.tgz", + "integrity": "sha512-XBmSKRLXLxiaPYamLv3/hnP/KXDai1NDexN0FpkTaZXTfycHvkRHoenpgl/fvuK/kPbB6xAgoyiryAhQNxYmAQ==", "dev": true, "dependencies": { - "esbuild": "^0.20.1", + "esbuild": "^0.21.3", "postcss": "^8.4.38", "rollup": "^4.13.0" }, diff --git a/playground/package.json b/playground/package.json index 9eb354177e..afd65bf37d 100644 --- a/playground/package.json +++ b/playground/package.json @@ -11,7 +11,7 @@ }, "dependencies": { "graphiql": "^3.2.3", - "graphql": "^16.8.1", + "graphql": "^16.8.2", "react": "^18.3.1", "react-dom": "^18.3.1", "swagger-ui-react": "^5.17.14" @@ -20,13 +20,13 @@ "@types/react": "^18.3.3", "@types/react-dom": "^18.3.0", "@types/swagger-ui-react": "^4.18.3", - "@typescript-eslint/eslint-plugin": "^7.12.0", - "@typescript-eslint/parser": "^7.12.0", + "@typescript-eslint/eslint-plugin": "^7.13.0", + "@typescript-eslint/parser": "^7.13.0", "@vitejs/plugin-react-swc": "^3.7.0", "eslint": "^8.57.0", "eslint-plugin-react-hooks": "^4.6.2", "eslint-plugin-react-refresh": "^0.4.7", "typescript": "^5.4.5", - "vite": "^5.2.13" + "vite": "^5.3.1" } } From 72734a319393e6eaa94a91a699fde45a2f459cc5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Jun 2024 16:43:29 -0400 Subject: [PATCH 51/78] bot: Bump @typescript-eslint/eslint-plugin from 7.13.0 to 7.13.1 in /playground (#2734) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [@typescript-eslint/eslint-plugin](https://github.com/typescript-eslint/typescript-eslint/tree/HEAD/packages/eslint-plugin) from 7.13.0 to 7.13.1.
Release notes

Sourced from @​typescript-eslint/eslint-plugin's releases.

v7.13.1

7.13.1 (2024-06-17)

🩹 Fixes

  • eslint-plugin: [prefer-readonly] refine report locations (#8894)
  • eslint-plugin: [return-await] support explicit resource management (#9044)
  • eslint-plugin: [no-unsafe-member-access] differentiate a types-error any from a true any (#9291)

❤️ Thank You

You can read about our versioning strategy and releases on our website.

Changelog

Sourced from @​typescript-eslint/eslint-plugin's changelog.

7.13.1 (2024-06-17)

🩹 Fixes

  • eslint-plugin: [prefer-readonly] refine report locations

  • eslint-plugin: [return-await] support explicit resource management

  • eslint-plugin: [no-unsafe-member-access] differentiate a types-error any from a true any

❤️ Thank You

  • Kirk Waiblinger
  • Yukihiro Hasegawa

You can read about our versioning strategy and releases on our website.

Commits
  • 1212a8f chore(release): publish 7.13.1
  • af34737 docs: improve return-await description about motivation (#9201)
  • bf48dc5 fix(eslint-plugin): [no-unsafe-member-access] differentiate a types-error any...
  • 33acc50 fix(eslint-plugin): [return-await] support explicit resource management (#9044)
  • 50ed604 fix(eslint-plugin): [prefer-readonly] refine report locations (#8894)
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=@typescript-eslint/eslint-plugin&package-manager=npm_and_yarn&previous-version=7.13.0&new-version=7.13.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- playground/package-lock.json | 252 +++++------------------------------ playground/package.json | 2 +- 2 files changed, 37 insertions(+), 217 deletions(-) diff --git a/playground/package-lock.json b/playground/package-lock.json index d6f8c2297d..f54faee369 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -18,7 +18,7 @@ "@types/react": "^18.3.3", "@types/react-dom": "^18.3.0", "@types/swagger-ui-react": "^4.18.3", - "@typescript-eslint/eslint-plugin": "^7.13.0", + "@typescript-eslint/eslint-plugin": "^7.13.1", "@typescript-eslint/parser": "^7.13.0", "@vitejs/plugin-react-swc": "^3.7.0", "eslint": "^8.57.0", @@ -2429,16 +2429,16 @@ "integrity": "sha512-EwmlvuaxPNej9+T4v5AuBPJa2x2UOJVdjCtDHgcDqitUeOtjnJKJ+apYjVcAoBEMjKW1VVFGZLUb5+qqa09XFA==" }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-7.13.0.tgz", - "integrity": "sha512-FX1X6AF0w8MdVFLSdqwqN/me2hyhuQg4ykN6ZpVhh1ij/80pTvDKclX1sZB9iqex8SjQfVhwMKs3JtnnMLzG9w==", + "version": "7.13.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-7.13.1.tgz", + "integrity": "sha512-kZqi+WZQaZfPKnsflLJQCz6Ze9FFSMfXrrIOcyargekQxG37ES7DJNpJUE9Q/X5n3yTIP/WPutVNzgknQ7biLg==", "dev": true, "dependencies": { "@eslint-community/regexpp": "^4.10.0", - "@typescript-eslint/scope-manager": "7.13.0", - "@typescript-eslint/type-utils": "7.13.0", - "@typescript-eslint/utils": "7.13.0", - "@typescript-eslint/visitor-keys": "7.13.0", + "@typescript-eslint/scope-manager": "7.13.1", + "@typescript-eslint/type-utils": "7.13.1", + "@typescript-eslint/utils": "7.13.1", + "@typescript-eslint/visitor-keys": "7.13.1", "graphemer": "^1.4.0", "ignore": "^5.3.1", "natural-compare": "^1.4.0", @@ -2461,53 +2461,6 @@ } } }, - "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/scope-manager": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.13.0.tgz", - "integrity": "sha512-ZrMCe1R6a01T94ilV13egvcnvVJ1pxShkE0+NDjDzH4nvG1wXpwsVI5bZCvE7AEDH1mXEx5tJSVR68bLgG7Dng==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.13.0", - "@typescript-eslint/visitor-keys": "7.13.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/types": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.13.0.tgz", - "integrity": "sha512-QWuwm9wcGMAuTsxP+qz6LBBd3Uq8I5Nv8xb0mk54jmNoCyDspnMvVsOxI6IsMmway5d1S9Su2+sCKv1st2l6eA==", - "dev": true, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/visitor-keys": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.13.0.tgz", - "integrity": "sha512-nxn+dozQx+MK61nn/JP+M4eCkHDSxSLDpgE3WcQo0+fkjEolnaB5jswvIKC4K56By8MMgIho7f1PVxERHEo8rw==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.13.0", - "eslint-visitor-keys": "^3.4.3" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, "node_modules/@typescript-eslint/parser": { "version": "7.13.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-7.13.0.tgz", @@ -2612,13 +2565,13 @@ } }, "node_modules/@typescript-eslint/scope-manager": { - "version": "7.12.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.12.0.tgz", - "integrity": "sha512-itF1pTnN6F3unPak+kutH9raIkL3lhH1YRPGgt7QQOh43DQKVJXmWkpb+vpc/TiDHs6RSd9CTbDsc/Y+Ygq7kg==", + "version": "7.13.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.13.1.tgz", + "integrity": "sha512-adbXNVEs6GmbzaCpymHQ0MB6E4TqoiVbC0iqG3uijR8ZYfpAXMGttouQzF4Oat3P2GxDVIrg7bMI/P65LiQZdg==", "dev": true, "dependencies": { - "@typescript-eslint/types": "7.12.0", - "@typescript-eslint/visitor-keys": "7.12.0" + "@typescript-eslint/types": "7.13.1", + "@typescript-eslint/visitor-keys": "7.13.1" }, "engines": { "node": "^18.18.0 || >=20.0.0" @@ -2629,13 +2582,13 @@ } }, "node_modules/@typescript-eslint/type-utils": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-7.13.0.tgz", - "integrity": "sha512-xMEtMzxq9eRkZy48XuxlBFzpVMDurUAfDu5Rz16GouAtXm0TaAoTFzqWUFPPuQYXI/CDaH/Bgx/fk/84t/Bc9A==", + "version": "7.13.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-7.13.1.tgz", + "integrity": "sha512-aWDbLu1s9bmgPGXSzNCxELu+0+HQOapV/y+60gPXafR8e2g1Bifxzevaa+4L2ytCWm+CHqpELq4CSoN9ELiwCg==", "dev": true, "dependencies": { - "@typescript-eslint/typescript-estree": "7.13.0", - "@typescript-eslint/utils": "7.13.0", + "@typescript-eslint/typescript-estree": "7.13.1", + "@typescript-eslint/utils": "7.13.1", "debug": "^4.3.4", "ts-api-utils": "^1.3.0" }, @@ -2655,68 +2608,10 @@ } } }, - "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/types": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.13.0.tgz", - "integrity": "sha512-QWuwm9wcGMAuTsxP+qz6LBBd3Uq8I5Nv8xb0mk54jmNoCyDspnMvVsOxI6IsMmway5d1S9Su2+sCKv1st2l6eA==", - "dev": true, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/typescript-estree": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.13.0.tgz", - "integrity": "sha512-cAvBvUoobaoIcoqox1YatXOnSl3gx92rCZoMRPzMNisDiM12siGilSM4+dJAekuuHTibI2hVC2fYK79iSFvWjw==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.13.0", - "@typescript-eslint/visitor-keys": "7.13.0", - "debug": "^4.3.4", - "globby": "^11.1.0", - "is-glob": "^4.0.3", - "minimatch": "^9.0.4", - "semver": "^7.6.0", - "ts-api-utils": "^1.3.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/visitor-keys": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.13.0.tgz", - "integrity": "sha512-nxn+dozQx+MK61nn/JP+M4eCkHDSxSLDpgE3WcQo0+fkjEolnaB5jswvIKC4K56By8MMgIho7f1PVxERHEo8rw==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.13.0", - "eslint-visitor-keys": "^3.4.3" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, "node_modules/@typescript-eslint/types": { - "version": "7.12.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.12.0.tgz", - "integrity": "sha512-o+0Te6eWp2ppKY3mLCU+YA9pVJxhUJE15FV7kxuD9jgwIAa+w/ycGJBMrYDTpVGUM/tgpa9SeMOugSabWFq7bg==", + "version": "7.13.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.13.1.tgz", + "integrity": "sha512-7K7HMcSQIAND6RBL4kDl24sG/xKM13cA85dc7JnmQXw2cBDngg7c19B++JzvJHRG3zG36n9j1i451GBzRuHchw==", "dev": true, "engines": { "node": "^18.18.0 || >=20.0.0" @@ -2727,13 +2622,13 @@ } }, "node_modules/@typescript-eslint/typescript-estree": { - "version": "7.12.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.12.0.tgz", - "integrity": "sha512-5bwqLsWBULv1h6pn7cMW5dXX/Y2amRqLaKqsASVwbBHMZSnHqE/HN4vT4fE0aFsiwxYvr98kqOWh1a8ZKXalCQ==", + "version": "7.13.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.13.1.tgz", + "integrity": "sha512-uxNr51CMV7npU1BxZzYjoVz9iyjckBduFBP0S5sLlh1tXYzHzgZ3BR9SVsNed+LmwKrmnqN3Kdl5t7eZ5TS1Yw==", "dev": true, "dependencies": { - "@typescript-eslint/types": "7.12.0", - "@typescript-eslint/visitor-keys": "7.12.0", + "@typescript-eslint/types": "7.13.1", + "@typescript-eslint/visitor-keys": "7.13.1", "debug": "^4.3.4", "globby": "^11.1.0", "is-glob": "^4.0.3", @@ -2755,15 +2650,15 @@ } }, "node_modules/@typescript-eslint/utils": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-7.13.0.tgz", - "integrity": "sha512-jceD8RgdKORVnB4Y6BqasfIkFhl4pajB1wVxrF4akxD2QPM8GNYjgGwEzYS+437ewlqqrg7Dw+6dhdpjMpeBFQ==", + "version": "7.13.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-7.13.1.tgz", + "integrity": "sha512-h5MzFBD5a/Gh/fvNdp9pTfqJAbuQC4sCN2WzuXme71lqFJsZtLbjxfSk4r3p02WIArOF9N94pdsLiGutpDbrXQ==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.4.0", - "@typescript-eslint/scope-manager": "7.13.0", - "@typescript-eslint/types": "7.13.0", - "@typescript-eslint/typescript-estree": "7.13.0" + "@typescript-eslint/scope-manager": "7.13.1", + "@typescript-eslint/types": "7.13.1", + "@typescript-eslint/typescript-estree": "7.13.1" }, "engines": { "node": "^18.18.0 || >=20.0.0" @@ -2776,88 +2671,13 @@ "eslint": "^8.56.0" } }, - "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/scope-manager": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.13.0.tgz", - "integrity": "sha512-ZrMCe1R6a01T94ilV13egvcnvVJ1pxShkE0+NDjDzH4nvG1wXpwsVI5bZCvE7AEDH1mXEx5tJSVR68bLgG7Dng==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.13.0", - "@typescript-eslint/visitor-keys": "7.13.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/types": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.13.0.tgz", - "integrity": "sha512-QWuwm9wcGMAuTsxP+qz6LBBd3Uq8I5Nv8xb0mk54jmNoCyDspnMvVsOxI6IsMmway5d1S9Su2+sCKv1st2l6eA==", - "dev": true, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/typescript-estree": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.13.0.tgz", - "integrity": "sha512-cAvBvUoobaoIcoqox1YatXOnSl3gx92rCZoMRPzMNisDiM12siGilSM4+dJAekuuHTibI2hVC2fYK79iSFvWjw==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.13.0", - "@typescript-eslint/visitor-keys": "7.13.0", - "debug": "^4.3.4", - "globby": "^11.1.0", - "is-glob": "^4.0.3", - "minimatch": "^9.0.4", - "semver": "^7.6.0", - "ts-api-utils": "^1.3.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/visitor-keys": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.13.0.tgz", - "integrity": "sha512-nxn+dozQx+MK61nn/JP+M4eCkHDSxSLDpgE3WcQo0+fkjEolnaB5jswvIKC4K56By8MMgIho7f1PVxERHEo8rw==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.13.0", - "eslint-visitor-keys": "^3.4.3" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, "node_modules/@typescript-eslint/visitor-keys": { - "version": "7.12.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.12.0.tgz", - "integrity": "sha512-uZk7DevrQLL3vSnfFl5bj4sL75qC9D6EdjemIdbtkuUmIheWpuiiylSY01JxJE7+zGrOWDZrp1WxOuDntvKrHQ==", + "version": "7.13.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.13.1.tgz", + "integrity": "sha512-k/Bfne7lrP7hcb7m9zSsgcBmo+8eicqqfNAJ7uUY+jkTFpKeH2FSkWpFRtimBxgkyvqfu9jTPRbYOvud6isdXA==", "dev": true, "dependencies": { - "@typescript-eslint/types": "7.12.0", + "@typescript-eslint/types": "7.13.1", "eslint-visitor-keys": "^3.4.3" }, "engines": { diff --git a/playground/package.json b/playground/package.json index afd65bf37d..77a31c460c 100644 --- a/playground/package.json +++ b/playground/package.json @@ -20,7 +20,7 @@ "@types/react": "^18.3.3", "@types/react-dom": "^18.3.0", "@types/swagger-ui-react": "^4.18.3", - "@typescript-eslint/eslint-plugin": "^7.13.0", + "@typescript-eslint/eslint-plugin": "^7.13.1", "@typescript-eslint/parser": "^7.13.0", "@vitejs/plugin-react-swc": "^3.7.0", "eslint": "^8.57.0", From 38f9b9f325ba18de234d46756e64ad1fdb7658df Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Jun 2024 18:25:04 -0400 Subject: [PATCH 52/78] bot: Bump @typescript-eslint/parser from 7.13.0 to 7.13.1 in /playground (#2733) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [@typescript-eslint/parser](https://github.com/typescript-eslint/typescript-eslint/tree/HEAD/packages/parser) from 7.13.0 to 7.13.1.
Release notes

Sourced from @​typescript-eslint/parser's releases.

v7.13.1

7.13.1 (2024-06-17)

🩹 Fixes

  • eslint-plugin: [prefer-readonly] refine report locations (#8894)
  • eslint-plugin: [return-await] support explicit resource management (#9044)
  • eslint-plugin: [no-unsafe-member-access] differentiate a types-error any from a true any (#9291)

❤️ Thank You

You can read about our versioning strategy and releases on our website.

Changelog

Sourced from @​typescript-eslint/parser's changelog.

7.13.1 (2024-06-17)

This was a version bump only for parser to align it with other projects, there were no code changes.

You can read about our versioning strategy and releases on our website.

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=@typescript-eslint/parser&package-manager=npm_and_yarn&previous-version=7.13.0&new-version=7.13.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- playground/package-lock.json | 91 ++++-------------------------------- playground/package.json | 2 +- 2 files changed, 9 insertions(+), 84 deletions(-) diff --git a/playground/package-lock.json b/playground/package-lock.json index f54faee369..d7626f6caa 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -19,7 +19,7 @@ "@types/react-dom": "^18.3.0", "@types/swagger-ui-react": "^4.18.3", "@typescript-eslint/eslint-plugin": "^7.13.1", - "@typescript-eslint/parser": "^7.13.0", + "@typescript-eslint/parser": "^7.13.1", "@vitejs/plugin-react-swc": "^3.7.0", "eslint": "^8.57.0", "eslint-plugin-react-hooks": "^4.6.2", @@ -2462,15 +2462,15 @@ } }, "node_modules/@typescript-eslint/parser": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-7.13.0.tgz", - "integrity": "sha512-EjMfl69KOS9awXXe83iRN7oIEXy9yYdqWfqdrFAYAAr6syP8eLEFI7ZE4939antx2mNgPRW/o1ybm2SFYkbTVA==", + "version": "7.13.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-7.13.1.tgz", + "integrity": "sha512-1ELDPlnLvDQ5ybTSrMhRTFDfOQEOXNM+eP+3HT/Yq7ruWpciQw+Avi73pdEbA4SooCawEWo3dtYbF68gN7Ed1A==", "dev": true, "dependencies": { - "@typescript-eslint/scope-manager": "7.13.0", - "@typescript-eslint/types": "7.13.0", - "@typescript-eslint/typescript-estree": "7.13.0", - "@typescript-eslint/visitor-keys": "7.13.0", + "@typescript-eslint/scope-manager": "7.13.1", + "@typescript-eslint/types": "7.13.1", + "@typescript-eslint/typescript-estree": "7.13.1", + "@typescript-eslint/visitor-keys": "7.13.1", "debug": "^4.3.4" }, "engines": { @@ -2489,81 +2489,6 @@ } } }, - "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/scope-manager": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.13.0.tgz", - "integrity": "sha512-ZrMCe1R6a01T94ilV13egvcnvVJ1pxShkE0+NDjDzH4nvG1wXpwsVI5bZCvE7AEDH1mXEx5tJSVR68bLgG7Dng==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.13.0", - "@typescript-eslint/visitor-keys": "7.13.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/types": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.13.0.tgz", - "integrity": "sha512-QWuwm9wcGMAuTsxP+qz6LBBd3Uq8I5Nv8xb0mk54jmNoCyDspnMvVsOxI6IsMmway5d1S9Su2+sCKv1st2l6eA==", - "dev": true, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/typescript-estree": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.13.0.tgz", - "integrity": "sha512-cAvBvUoobaoIcoqox1YatXOnSl3gx92rCZoMRPzMNisDiM12siGilSM4+dJAekuuHTibI2hVC2fYK79iSFvWjw==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.13.0", - "@typescript-eslint/visitor-keys": "7.13.0", - "debug": "^4.3.4", - "globby": "^11.1.0", - "is-glob": "^4.0.3", - "minimatch": "^9.0.4", - "semver": "^7.6.0", - "ts-api-utils": "^1.3.0" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/visitor-keys": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.13.0.tgz", - "integrity": "sha512-nxn+dozQx+MK61nn/JP+M4eCkHDSxSLDpgE3WcQo0+fkjEolnaB5jswvIKC4K56By8MMgIho7f1PVxERHEo8rw==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "7.13.0", - "eslint-visitor-keys": "^3.4.3" - }, - "engines": { - "node": "^18.18.0 || >=20.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, "node_modules/@typescript-eslint/scope-manager": { "version": "7.13.1", "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.13.1.tgz", diff --git a/playground/package.json b/playground/package.json index 77a31c460c..9b9638cde3 100644 --- a/playground/package.json +++ b/playground/package.json @@ -21,7 +21,7 @@ "@types/react-dom": "^18.3.0", "@types/swagger-ui-react": "^4.18.3", "@typescript-eslint/eslint-plugin": "^7.13.1", - "@typescript-eslint/parser": "^7.13.0", + "@typescript-eslint/parser": "^7.13.1", "@vitejs/plugin-react-swc": "^3.7.0", "eslint": "^8.57.0", "eslint-plugin-react-hooks": "^4.6.2", From 3c0a14ad13aa504f7f46cca3cb78267ff3ff9ce6 Mon Sep 17 00:00:00 2001 From: Shahzad Lone Date: Tue, 18 Jun 2024 09:43:47 -0400 Subject: [PATCH 53/78] ci: Cache dependencies to speed up test runs (#2732) ## Relevant issue(s) Resolves #2735 ## Description In this PR we cache go and rust build/dep states, and restore them if an action run has same dependency graph. Go cache is around 877 MB and cargo/rust cache is about 189MB (99MB for macos) so we can roughly have room for 5 caches (x1 for linux and x1 for macos) before a cache is dropped / lost. Notes: - Github gives us 10GB of cache storage by default. - Rather than going with the standard practice of using lockfiles to generate the caching key we will use the mod and toml files, in order to maximize cache hits. Incase troublesome we can go back to lockfiles. - This change does not help the change detector. IMO change detector should move to a separate job, or a new workflow file. (opened https://github.com/sourcenetwork/defradb/issues/2736) ### Demo - Without cache run (current state): https://github.com/sourcenetwork/defradb/actions/runs/9554784255?pr=2732 - With cache run (after this PR): https://github.com/sourcenetwork/defradb/actions/runs/9555174699 ## How has this been tested? - Manual - `act` tool --- .../workflows/test-and-upload-coverage.yml | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/.github/workflows/test-and-upload-coverage.yml b/.github/workflows/test-and-upload-coverage.yml index f20860bb7c..64d05cfb87 100644 --- a/.github/workflows/test-and-upload-coverage.yml +++ b/.github/workflows/test-and-upload-coverage.yml @@ -110,6 +110,45 @@ jobs: go-version: "1.21" check-latest: true + - name: Set cache paths + id: cache-paths + shell: bash + run: | + echo "GO_CACHE=$(go env GOCACHE)" >> "${GITHUB_OUTPUT}" + echo "GO_MODCACHE=$(go env GOMODCACHE)" >> "${GITHUB_OUTPUT}" + echo "CARGO_CACHE=~/.cargo" >> "${GITHUB_OUTPUT}" + + - name: Go cache/restore + uses: actions/cache@v4 + with: + key: ${{ runner.os }}-go-${{ hashFiles('**/go.mod') }} + path: | + ${{ steps.cache-paths.outputs.GO_CACHE }} + ${{ steps.cache-paths.outputs.GO_MODCACHE }} + + - name: Cargo cache/restore + # A very cool post: https://blog.arriven.wtf/posts/rust-ci-cache + uses: actions/cache@v4 + with: + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.toml') }} + # Here are some directories we shouldn't forget about: + # ~/.cargo/.* + # ~/.cargo/bin/ + # ~/.cargo/git/db/ + # ~/.cargo/registry/cache/ + # ~/.cargo/registry/index/ + # **/target/*/*.d + # **/target/*/*.rlib + # **/target/*/.fingerprint + # **/target/*/build + # **/target/*/deps + path: | + ${{ steps.cache-paths.outputs.CARGO_CACHE }} + **/target/ + + - name: Restore modified time + uses: chetan/git-restore-mtime-action@v2 + - name: Build dependencies run: | make deps:modules From 9f50a73fb87277aaaccf1e5f4579850353ddf5cf Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Tue, 18 Jun 2024 12:49:31 -0700 Subject: [PATCH 54/78] fix(i): Convert error logs to `ErrorE` func (#2738) ## Relevant issue(s) Resolves #2737 ## Description This PR bumps corelog to v0.0.8 and converts error logs to `ErrorE` or `ErrorContextE`. ## Tasks - [x] I made sure the code is well commented, particularly hard-to-understand areas. - [x] I made sure the repository-held documentation is changed accordingly. - [x] I made sure the pull request title adheres to the conventional commit style (the subset used in the project can be found in [tools/configs/chglog/config.yml](tools/configs/chglog/config.yml)). - [x] I made sure to discuss its limitations such as threats to validity, vulnerability to mistake and misuse, robustness to invalidation of assumptions, resource requirements, ... ## How has this been tested? `make test` Specify the platform(s) on which this was tested: - MacOS --- go.mod | 2 +- go.sum | 4 ++-- internal/db/merge.go | 1 - net/node.go | 2 +- net/peer.go | 8 +++----- net/server.go | 10 +++++----- 6 files changed, 12 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index b4d0086c1a..b88e71219a 100644 --- a/go.mod +++ b/go.mod @@ -38,7 +38,7 @@ require ( github.com/multiformats/go-multicodec v0.9.0 github.com/multiformats/go-multihash v0.2.3 github.com/sourcenetwork/badger/v4 v4.2.1-0.20231113215945-a63444ca5276 - github.com/sourcenetwork/corelog v0.0.7 + github.com/sourcenetwork/corelog v0.0.8 github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.14 github.com/sourcenetwork/graphql-go v0.7.10-0.20231113214537-a9560c1898dd github.com/sourcenetwork/immutable v0.3.0 diff --git a/go.sum b/go.sum index 98241458ed..851e498e63 100644 --- a/go.sum +++ b/go.sum @@ -1073,8 +1073,8 @@ github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIK github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/sourcenetwork/badger/v4 v4.2.1-0.20231113215945-a63444ca5276 h1:TpQDDPfucDgCNH0NVqVUk6SSq6T6G8p9HIocmwZh9Tg= github.com/sourcenetwork/badger/v4 v4.2.1-0.20231113215945-a63444ca5276/go.mod h1:lxiZTDBw0vheFMqSwX2OvB6RTDI1+/UtVCSU4rpThFM= -github.com/sourcenetwork/corelog v0.0.7 h1:vztssVAUDcsYN5VUOW3PKYhLprHfzoc8UbKewQuD1qw= -github.com/sourcenetwork/corelog v0.0.7/go.mod h1:cMabHgs3kARgYTQeQYSOmaGGP8XMU6sZrHd8LFrL3zA= +github.com/sourcenetwork/corelog v0.0.8 h1:jCo0mFBpWrfhUCGzzN3uUtPGyQv3jnITdPO1s2ME3RY= +github.com/sourcenetwork/corelog v0.0.8/go.mod h1:cMabHgs3kARgYTQeQYSOmaGGP8XMU6sZrHd8LFrL3zA= github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.14 h1:620zKV4rOn7U5j/WsPkk4SFj0z9/pVV4bBx0BpZQgro= github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.14/go.mod h1:jUoQv592uUX1u7QBjAY4C+l24X9ArhPfifOqXpDHz4U= github.com/sourcenetwork/graphql-go v0.7.10-0.20231113214537-a9560c1898dd h1:lmpW39/8wPJ0khWRhOcj7Bj0HYKbSmQ8rXMJw1cMB8U= diff --git a/internal/db/merge.go b/internal/db/merge.go index 7618d31309..7f78deb77e 100644 --- a/internal/db/merge.go +++ b/internal/db/merge.go @@ -67,7 +67,6 @@ func (db *db) handleMerges(ctx context.Context, merges events.Subscription[event ctx, "Failed to execute merge", err, - corelog.Any("Error", err), corelog.Any("Event", merge)) } if merge.Wg != nil { diff --git a/net/node.go b/net/node.go index 1fa8da6316..70fd12c016 100644 --- a/net/node.go +++ b/net/node.go @@ -226,7 +226,7 @@ func (n *Node) Bootstrap(addrs []peer.AddrInfo) { defer wg.Done() err := n.host.Connect(n.ctx, pinfo) if err != nil { - log.InfoContext(n.ctx, "Cannot connect to peer", corelog.Any("Error", err)) + log.ErrorContextE(n.ctx, "Cannot connect to peer", err) return } log.InfoContext(n.ctx, "Connected", corelog.Any("PeerID", pinfo.ID)) diff --git a/net/peer.go b/net/peer.go index fc49aec7ec..a6ca285e2c 100644 --- a/net/peer.go +++ b/net/peer.go @@ -130,9 +130,7 @@ func (p *Peer) Start() error { log.InfoContext( p.ctx, "Failure while reconnecting to a known peer", - corelog.Any("peer", id), - corelog.Any("error", err), - ) + corelog.Any("peer", id)) } }(id) } @@ -193,12 +191,12 @@ func (p *Peer) Close() { // close event emitters if p.server.pubSubEmitter != nil { if err := p.server.pubSubEmitter.Close(); err != nil { - log.InfoContext(p.ctx, "Could not close pubsub event emitter", corelog.Any("Error", err.Error())) + log.ErrorContextE(p.ctx, "Could not close pubsub event emitter", err) } } if p.server.pushLogEmitter != nil { if err := p.server.pushLogEmitter.Close(); err != nil { - log.InfoContext(p.ctx, "Could not close push log event emitter", corelog.Any("Error", err.Error())) + log.ErrorContextE(p.ctx, "Could not close push log event emitter", err) } } diff --git a/net/server.go b/net/server.go index 2d0fcdaaa3..0eb0d27058 100644 --- a/net/server.go +++ b/net/server.go @@ -113,11 +113,11 @@ func newServer(p *Peer, opts ...grpc.DialOption) (*server, error) { var err error s.pubSubEmitter, err = s.peer.host.EventBus().Emitter(new(EvtPubSub)) if err != nil { - log.InfoContext(s.peer.ctx, "could not create event emitter", corelog.String("Error", err.Error())) + log.ErrorContextE(s.peer.ctx, "could not create event emitter", err) } s.pushLogEmitter, err = s.peer.host.EventBus().Emitter(new(EvtReceivedPushLog)) if err != nil { - log.InfoContext(s.peer.ctx, "could not create event emitter", corelog.String("Error", err.Error())) + log.ErrorContextE(s.peer.ctx, "could not create event emitter", err) } return s, nil @@ -167,7 +167,7 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL if s.pushLogEmitter != nil { byPeer, err := libpeer.Decode(req.Body.Creator) if err != nil { - log.InfoContext(ctx, "could not decode the PeerID of the log creator", corelog.String("Error", err.Error())) + log.ErrorContextE(ctx, "could not decode the PeerID of the log creator", err) } err = s.pushLogEmitter.Emit(EvtReceivedPushLog{ FromPeer: pid, @@ -176,7 +176,7 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL if err != nil { // logging instead of returning an error because the event bus should // not break the PushLog execution. - log.InfoContext(ctx, "could not emit push log event", corelog.String("Error", err.Error())) + log.ErrorContextE(ctx, "could not emit push log event", err) } } }() @@ -349,7 +349,7 @@ func (s *server) pubSubEventHandler(from libpeer.ID, topic string, msg []byte) { Peer: from, }) if err != nil { - log.InfoContext(s.peer.ctx, "could not emit pubsub event", corelog.Any("Error", err.Error())) + log.ErrorContextE(s.peer.ctx, "could not emit pubsub event", err) } } } From 6573d8cb427d60b9419b6e0b466bd480b5dd444f Mon Sep 17 00:00:00 2001 From: Bruno Gomes <23224552+Lodek@users.noreply.github.com> Date: Wed, 19 Jun 2024 14:38:29 -0300 Subject: [PATCH 55/78] refactor: Change local_acp implementation to use acp_core (#2691) ## Relevant issue(s) Also the related issue won't close upon merge, nor is it picked up by github at the moment, because you just have #2694 in the PR description. So please don't forget to add the resolving key word (close, closes, closed, fix, fixes, fixed, resolve, resolves, resolved) with it. So you can put for example: Resolve #2694 ## Description This PR changes the underlying implementation of the `ACPLocal` Reference Monitor from SourceHub to ACP Core. The changes aim to allow the WASM build which was broken since the inclusion of SourceHub as a dependency. Notable changes from this refactor include: - Replace SourceHub as a dependency for acp_core for ACP Local - The field `name` in a `Policy` is now correctly marked as required. This was a consequence of an issue on SourceHub side which allowed unnamed policies to be included. Therefore, in order to correct the tests, a name was given to all policies in the codebase. - The previous implementation of ACP Local made use of a special notion of account which had no sequence numbers. As such, re-submitting a Policy generated the same ID. This behavior isn't compatible with how SourceHub deals with IDs and the new implementation using ACP Core reflects that. That is, every created Policy, independent of its payload, will have unique IDs. - Updated the `Identity` struct to generate a DID instead of a SourceHub address ## Tasks - [x] I made sure the code is well commented, particularly hard-to-understand areas. - [] I made sure the repository-held documentation is changed accordingly. - [x] I made sure the pull request title adheres to the conventional commit style (the subset used in the project can be found in [tools/configs/chglog/config.yml](tools/configs/chglog/config.yml)). - [x] I made sure to discuss its limitations such as threats to validity, vulnerability to mistake and misuse, robustness to invalidation of assumptions, resource requirements, ... ## How has this been tested? Test suite ie `make test` Specify the platform(s) on which this was tested: - Arch Linux --- acp/acp_local.go | 201 ++++-- acp/acp_local_test.go | 114 +++- acp/errors.go | 14 + acp/identity/errors.go | 30 + acp/identity/identity.go | 53 +- acp/identity/identity_test.go | 44 ++ acp/source_hub_client.go | 50 +- acp/types.go | 54 ++ cli/utils.go | 7 +- go.mod | 89 +-- go.sum | 608 +----------------- http/auth.go | 2 +- http/auth_test.go | 26 +- internal/db/db.go | 2 +- internal/db/permission/check.go | 2 +- internal/db/permission/register.go | 2 +- net/peer_test.go | 18 +- .../integration/acp/add_policy/basic_test.go | 6 +- .../acp/add_policy/with_empty_args_test.go | 1 + .../with_extra_perms_and_relations_test.go | 3 +- .../acp/add_policy/with_extra_perms_test.go | 4 +- .../add_policy/with_extra_relations_test.go | 4 +- .../with_invalid_creator_arg_test.go | 10 +- .../add_policy/with_invalid_relations_test.go | 2 + .../with_invalid_required_relation_test.go | 2 + .../add_policy/with_invalid_resource_test.go | 1 + .../add_policy/with_managed_relation_test.go | 3 +- .../add_policy/with_multi_policies_test.go | 31 +- .../with_multiple_resources_test.go | 5 +- .../acp/add_policy/with_no_perms_test.go | 12 +- .../acp/add_policy/with_no_resources_test.go | 13 +- .../acp/add_policy/with_perm_expr_test.go | 6 +- .../add_policy/with_perm_invalid_expr_test.go | 3 + .../with_permissionless_owner_test.go | 9 +- .../add_policy/with_unused_relations_test.go | 3 +- tests/integration/acp/fixture.go | 6 +- tests/integration/acp/index/create_test.go | 8 +- tests/integration/acp/index/fixture.go | 6 +- tests/integration/acp/index/query_test.go | 12 +- .../acp/index/query_with_relation_test.go | 6 +- tests/integration/acp/p2p/replicator_test.go | 5 +- tests/integration/acp/p2p/subscribe_test.go | 5 +- tests/integration/acp/query/fixture.go | 7 +- .../acp/register_and_delete_test.go | 25 +- .../integration/acp/register_and_read_test.go | 25 +- .../acp/register_and_update_test.go | 35 +- .../add_dpi/accept_basic_dpi_fmts_test.go | 6 +- .../accept_extra_permissions_on_dpi_test.go | 9 +- .../accept_managed_relation_on_dpi_test.go | 3 +- ...ept_mixed_resources_on_partial_dpi_test.go | 3 +- .../schema/add_dpi/accept_multi_dpis_test.go | 5 +- .../accept_multi_resources_on_dpi_test.go | 6 +- ...cept_same_resource_on_diff_schemas_test.go | 3 +- .../reject_empty_arg_on_schema_test.go | 6 +- .../reject_invalid_arg_type_on_schema_test.go | 6 +- ...ect_invalid_owner_read_perm_on_dpi_test.go | 15 +- ...alid_owner_read_perm_symbol_on_dpi_test.go | 9 +- ...ct_invalid_owner_write_perm_on_dpi_test.go | 15 +- ...lid_owner_write_perm_symbol_on_dpi_test.go | 9 +- .../schema/add_dpi/reject_missing_dpi_test.go | 7 +- .../reject_missing_id_arg_on_schema_test.go | 6 +- .../reject_missing_perms_on_dpi_test.go | 3 +- ...ect_missing_resource_arg_on_schema_test.go | 6 +- .../reject_missing_resource_on_dpi_test.go | 3 +- ...ect_mixed_resources_on_partial_dpi_test.go | 3 +- .../updates/remove/policy_test.go | 5 +- 66 files changed, 773 insertions(+), 929 deletions(-) create mode 100644 acp/identity/errors.go create mode 100644 acp/identity/identity_test.go create mode 100644 acp/types.go diff --git a/acp/acp_local.go b/acp/acp_local.go index 51e071604a..b62c4a454c 100644 --- a/acp/acp_local.go +++ b/acp/acp_local.go @@ -12,21 +12,60 @@ package acp import ( "context" + "errors" protoTypes "github.com/cosmos/gogoproto/types" + "github.com/sourcenetwork/acp_core/pkg/auth" + "github.com/sourcenetwork/acp_core/pkg/engine" + "github.com/sourcenetwork/acp_core/pkg/runtime" + "github.com/sourcenetwork/acp_core/pkg/types" "github.com/sourcenetwork/immutable" - "github.com/sourcenetwork/sourcehub/x/acp/embedded" - "github.com/sourcenetwork/sourcehub/x/acp/types" ) +const localACPStoreName = "local_acp" + // ACPLocal represents a local acp implementation that makes no remote calls. type ACPLocal struct { pathToStore immutable.Option[string] - localACP *embedded.LocalACP + engine types.ACPEngineServer + manager runtime.RuntimeManager } var _ sourceHubClient = (*ACPLocal)(nil) +func mapACPCorePolicy(pol *types.Policy) policy { + resources := make(map[string]*resource) + for _, coreResource := range pol.Resources { + resource := mapACPCoreResource(coreResource) + resources[resource.Name] = resource + } + + return policy{ + ID: pol.Id, + Resources: resources, + } +} + +func mapACPCoreResource(policy *types.Resource) *resource { + perms := make(map[string]*permission) + for _, corePermission := range policy.Permissions { + perm := mapACPCorePermission(corePermission) + perms[perm.Name] = perm + } + + return &resource{ + Name: policy.Name, + Permissions: perms, + } +} + +func mapACPCorePermission(perm *types.Permission) *permission { + return &permission{ + Name: perm.Name, + Expression: perm.Expression, + } +} + func (l *ACPLocal) Init(ctx context.Context, path string) { if path == "" { l.pathToStore = immutable.None[string]() @@ -36,73 +75,79 @@ func (l *ACPLocal) Init(ctx context.Context, path string) { } func (l *ACPLocal) Start(ctx context.Context) error { - var localACP embedded.LocalACP + var manager runtime.RuntimeManager var err error + var opts []runtime.Opt + var storeLocation string if !l.pathToStore.HasValue() { // Use a non-persistent, i.e. in memory store. - localACP, err = embedded.NewLocalACP( - embedded.WithInMemStore(), - ) - - if err != nil { - return NewErrInitializationOfACPFailed(err, "Local", "in-memory") - } + storeLocation = "in-memory" + opts = append(opts, runtime.WithMemKV()) } else { // Use peristent storage. - acpStorePath := l.pathToStore.Value() + "/" + embedded.DefaultDataDir - localACP, err = embedded.NewLocalACP( - embedded.WithPersistentStorage(acpStorePath), - ) - if err != nil { - return NewErrInitializationOfACPFailed(err, "Local", l.pathToStore.Value()) - } + storeLocation = l.pathToStore.Value() + acpStorePath := storeLocation + "/" + localACPStoreName + opts = append(opts, runtime.WithPersistentKV(acpStorePath)) } - l.localACP = &localACP + manager, err = runtime.NewRuntimeManager(opts...) + if err != nil { + return NewErrInitializationOfACPFailed(err, "Local", storeLocation) + } + + l.manager = manager + l.engine = engine.NewACPEngine(manager) return nil } func (l *ACPLocal) Close() error { - return l.localACP.Close() + return l.manager.Terminate() } func (l *ACPLocal) AddPolicy( ctx context.Context, creatorID string, policy string, - policyMarshalType types.PolicyMarshalingType, + marshalType policyMarshalType, creationTime *protoTypes.Timestamp, ) (string, error) { - createPolicy := types.MsgCreatePolicy{ - Creator: creatorID, + principal, err := auth.NewDIDPrincipal(creatorID) + if err != nil { + return "", newErrInvalidActorID(err, creatorID) + } + ctx = auth.InjectPrincipal(ctx, principal) + + createPolicy := types.CreatePolicyRequest{ Policy: policy, - MarshalType: policyMarshalType, + MarshalType: types.PolicyMarshalingType(marshalType), CreationTime: protoTypes.TimestampNow(), } - createPolicyResponse, err := l.localACP.GetMsgService().CreatePolicy( - l.localACP.GetCtx(), - &createPolicy, - ) + response, err := l.engine.CreatePolicy(ctx, &createPolicy) if err != nil { return "", err } - return createPolicyResponse.Policy.Id, nil + return response.Policy.Id, nil } func (l *ACPLocal) Policy( ctx context.Context, policyID string, -) (*types.Policy, error) { - queryPolicyResponse, err := l.localACP.GetQueryService().Policy( - l.localACP.GetCtx(), - &types.QueryPolicyRequest{Id: policyID}, - ) +) (immutable.Option[policy], error) { + none := immutable.None[policy]() + + request := types.GetPolicyRequest{Id: policyID} + response, err := l.engine.GetPolicy(ctx, &request) + if err != nil { - return nil, err + if errors.Is(err, types.ErrPolicyNotFound) { + return none, nil + } + return none, err } - return queryPolicyResponse.Policy, nil + policy := mapACPCorePolicy(response.Policy) + return immutable.Some(policy), nil } func (l *ACPLocal) RegisterObject( @@ -112,21 +157,27 @@ func (l *ACPLocal) RegisterObject( resourceName string, objectID string, creationTime *protoTypes.Timestamp, -) (types.RegistrationResult, error) { - registerDocResponse, err := l.localACP.GetMsgService().RegisterObject( - l.localACP.GetCtx(), - &types.MsgRegisterObject{ - Creator: actorID, - PolicyId: policyID, - Object: types.NewObject(resourceName, objectID), - CreationTime: creationTime, - }, - ) +) (RegistrationResult, error) { + principal, err := auth.NewDIDPrincipal(actorID) if err != nil { - return types.RegistrationResult(0), err + return RegistrationResult_NoOp, newErrInvalidActorID(err, actorID) + } + + ctx = auth.InjectPrincipal(ctx, principal) + req := types.RegisterObjectRequest{ + PolicyId: policyID, + Object: types.NewObject(resourceName, objectID), + CreationTime: creationTime, } - return registerDocResponse.Result, nil + registerDocResponse, err := l.engine.RegisterObject(ctx, &req) + + if err != nil { + return RegistrationResult_NoOp, err + } + + result := RegistrationResult(registerDocResponse.Result) + return result, nil } func (l *ACPLocal) ObjectOwner( @@ -134,14 +185,23 @@ func (l *ACPLocal) ObjectOwner( policyID string, resourceName string, objectID string, -) (*types.QueryObjectOwnerResponse, error) { - return l.localACP.GetQueryService().ObjectOwner( - l.localACP.GetCtx(), - &types.QueryObjectOwnerRequest{ - PolicyId: policyID, - Object: types.NewObject(resourceName, objectID), - }, - ) +) (immutable.Option[string], error) { + none := immutable.None[string]() + + req := types.GetObjectRegistrationRequest{ + PolicyId: policyID, + Object: types.NewObject(resourceName, objectID), + } + result, err := l.engine.GetObjectRegistration(ctx, &req) + if err != nil { + return none, err + } + + if result.IsRegistered { + return immutable.Some(result.OwnerId), nil + } + + return none, nil } func (l *ACPLocal) VerifyAccessRequest( @@ -152,26 +212,25 @@ func (l *ACPLocal) VerifyAccessRequest( resourceName string, docID string, ) (bool, error) { - checkDocResponse, err := l.localACP.GetQueryService().VerifyAccessRequest( - l.localACP.GetCtx(), - &types.QueryVerifyAccessRequestRequest{ - PolicyId: policyID, - AccessRequest: &types.AccessRequest{ - Operations: []*types.Operation{ - { - Object: types.NewObject(resourceName, docID), - Permission: permission.String(), - }, - }, - Actor: &types.Actor{ - Id: actorID, + req := types.VerifyAccessRequestRequest{ + PolicyId: policyID, + AccessRequest: &types.AccessRequest{ + Operations: []*types.Operation{ + { + Object: types.NewObject(resourceName, docID), + Permission: permission.String(), }, }, + Actor: &types.Actor{ + Id: actorID, + }, }, - ) + } + resp, err := l.engine.VerifyAccessRequest(ctx, &req) + if err != nil { return false, err } - return checkDocResponse.Valid, nil + return resp.Valid, nil } diff --git a/acp/acp_local_test.go b/acp/acp_local_test.go index ed613a829c..ac024d73c6 100644 --- a/acp/acp_local_test.go +++ b/acp/acp_local_test.go @@ -17,11 +17,13 @@ import ( "github.com/stretchr/testify/require" ) -var identity1 = "cosmos1zzg43wdrhmmk89z3pmejwete2kkd4a3vn7w969" -var identity2 = "cosmos1x25hhksxhu86r45hqwk28dd70qzux3262hdrll" +var identity1 = "did:key:z7r8os2G88XXBNBTLj3kFR5rzUJ4VAesbX7PgsA68ak9B5RYcXF5EZEmjRzzinZndPSSwujXb4XKHG6vmKEFG6ZfsfcQn" +var identity2 = "did:key:z7r8ooUiNXK8TT8Xjg1EWStR2ZdfxbzVfvGWbA2FjmzcnmDxz71QkP1Er8PP3zyLZpBLVgaXbZPGJPS4ppXJDPRcqrx4F" +var invalidIdentity = "did:something" -var validPolicyID string = "4f13c5084c3d0e1e5c5db702fceef84c3b6ab948949ca8e27fcaad3fb8bc39f4" +var validPolicyID string = "d59f91ba65fe142d35fc7df34482eafc7e99fed7c144961ba32c4664634e61b7" var validPolicy string = ` +name: test description: a policy actor: @@ -72,7 +74,7 @@ func Test_LocalACP_PersistentMemory_StartAndClose_NoError(t *testing.T) { require.Nil(t, err) } -func Test_LocalACP_InMemory_AddPolicy_CanCreateTwice(t *testing.T) { +func Test_LocalACP_InMemory_AddPolicy_CreatingSamePolicyAfterWipeReturnsSameID(t *testing.T) { ctx := context.Background() localACP := NewLocalACP() @@ -96,7 +98,7 @@ func Test_LocalACP_InMemory_AddPolicy_CanCreateTwice(t *testing.T) { errClose := localACP.Close() require.Nil(t, errClose) - // Since nothing is persisted should allow adding same policy again. + // Since nothing is persisted should allow adding same policy again with same ID localACP.Init(ctx, "") errStart = localACP.Start(ctx) @@ -118,7 +120,7 @@ func Test_LocalACP_InMemory_AddPolicy_CanCreateTwice(t *testing.T) { require.Nil(t, errClose) } -func Test_LocalACP_PersistentMemory_AddPolicy_CanNotCreateTwice(t *testing.T) { +func Test_LocalACP_PersistentMemory_AddPolicy_CreatingSamePolicyReturnsDifferentIDs(t *testing.T) { acpPath := t.TempDir() require.NotEqual(t, "", acpPath) @@ -150,14 +152,14 @@ func Test_LocalACP_PersistentMemory_AddPolicy_CanNotCreateTwice(t *testing.T) { errStart = localACP.Start(ctx) require.Nil(t, errStart) - // Should not allow us to create the same policy again as it exists already. - _, errAddPolicy = localACP.AddPolicy( + // Should generate a different ID for the new policy, even though the payload is the same + newPolicyID, errAddPolicy := localACP.AddPolicy( ctx, identity1, validPolicy, ) - require.Error(t, errAddPolicy) - require.ErrorIs(t, errAddPolicy, ErrFailedToAddPolicyWithACP) + require.NoError(t, errAddPolicy) + require.NotEqual(t, newPolicyID, policyID) errClose = localACP.Close() require.Nil(t, errClose) @@ -652,3 +654,95 @@ func Test_LocalACP_PersistentMemory_CheckDocAccess_TrueIfHaveAccessFalseIfNotErr errClose = localACP.Close() require.Nil(t, errClose) } + +func Test_LocalACP_InMemory_AddPolicy_InvalidCreatorIDReturnsError(t *testing.T) { + ctx := context.Background() + localACP := NewLocalACP() + + localACP.Init(ctx, "") + err := localACP.Start(ctx) + require.Nil(t, err) + + policyID, err := localACP.AddPolicy( + ctx, + invalidIdentity, + validPolicy, + ) + + require.ErrorIs(t, err, ErrInvalidActorID) + require.Empty(t, policyID) + + err = localACP.Close() + require.NoError(t, err) +} + +func Test_LocalACP_InMemory_RegisterObject_InvalidCreatorIDReturnsError(t *testing.T) { + ctx := context.Background() + localACP := NewLocalACP() + + localACP.Init(ctx, "") + err := localACP.Start(ctx) + require.Nil(t, err) + + err = localACP.RegisterDocObject( + ctx, + invalidIdentity, + validPolicyID, + "users", + "documentID_XYZ", + ) + + require.ErrorIs(t, err, ErrInvalidActorID) + + err = localACP.Close() + require.NoError(t, err) +} + +func Test_LocalACP_Persistent_AddPolicy_InvalidCreatorIDReturnsError(t *testing.T) { + acpPath := t.TempDir() + require.NotEqual(t, "", acpPath) + + ctx := context.Background() + localACP := NewLocalACP() + + localACP.Init(ctx, acpPath) + err := localACP.Start(ctx) + require.Nil(t, err) + + policyID, err := localACP.AddPolicy( + ctx, + invalidIdentity, + validPolicy, + ) + + require.ErrorIs(t, err, ErrInvalidActorID) + require.Empty(t, policyID) + + err = localACP.Close() + require.NoError(t, err) +} + +func Test_LocalACP_Persistent_RegisterObject_InvalidCreatorIDReturnsError(t *testing.T) { + acpPath := t.TempDir() + require.NotEqual(t, "", acpPath) + + ctx := context.Background() + localACP := NewLocalACP() + + localACP.Init(ctx, acpPath) + err := localACP.Start(ctx) + require.Nil(t, err) + + err = localACP.RegisterDocObject( + ctx, + invalidIdentity, + validPolicyID, + "users", + "documentID_XYZ", + ) + + require.ErrorIs(t, err, ErrInvalidActorID) + + err = localACP.Close() + require.NoError(t, err) +} diff --git a/acp/errors.go b/acp/errors.go index 307b32f5ad..5ff4eee302 100644 --- a/acp/errors.go +++ b/acp/errors.go @@ -35,6 +35,8 @@ const ( errExprOfRequiredPermMustStartWithRelation = "expr of required permission must start with required relation" errExprOfRequiredPermHasInvalidChar = "expr of required permission has invalid character after relation" + + errInvalidActorID = "invalid actor ID" ) var ( @@ -53,6 +55,7 @@ var ( ErrNoPolicyArgs = errors.New(errNoPolicyArgs) ErrPolicyIDMustNotBeEmpty = errors.New(errPolicyIDMustNotBeEmpty) ErrResourceNameMustNotBeEmpty = errors.New(errResourceNameMustNotBeEmpty) + ErrInvalidActorID = errors.New(errInvalidActorID) ) func NewErrInitializationOfACPFailed( @@ -205,3 +208,14 @@ func newErrExprOfRequiredPermissionHasInvalidChar( errors.NewKV("Character", string(char)), ) } + +func newErrInvalidActorID( + inner error, + id string, +) error { + return errors.Wrap( + errInvalidActorID, + inner, + errors.NewKV("ActorID", id), + ) +} diff --git a/acp/identity/errors.go b/acp/identity/errors.go new file mode 100644 index 0000000000..54f272b780 --- /dev/null +++ b/acp/identity/errors.go @@ -0,0 +1,30 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package identity + +import ( + "encoding/hex" + + "github.com/sourcenetwork/defradb/errors" +) + +const errDIDCreation = "could not produce did for key" + +var ErrDIDCreation = errors.New(errDIDCreation) + +func newErrDIDCreation(inner error, keytype string, pubKey []byte) error { + return errors.Wrap( + errDIDCreation, + inner, + errors.NewKV("KeyType", keytype), + errors.NewKV("PubKey", hex.EncodeToString(pubKey)), + ) +} diff --git a/acp/identity/identity.go b/acp/identity/identity.go index 66fec280db..8d9a84c23b 100644 --- a/acp/identity/identity.go +++ b/acp/identity/identity.go @@ -11,12 +11,15 @@ package identity import ( - cosmosSecp256k1 "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" - "github.com/cosmos/cosmos-sdk/types" + "github.com/cyware/ssi-sdk/crypto" + "github.com/cyware/ssi-sdk/did/key" "github.com/decred/dcrd/dcrec/secp256k1/v4" "github.com/sourcenetwork/immutable" ) +// didProducer generates a did:key from a public key +type didProducer = func(crypto.KeyType, []byte) (*key.DIDKey, error) + // None specifies an anonymous actor. var None = immutable.None[Identity]() @@ -26,33 +29,51 @@ type Identity struct { PublicKey *secp256k1.PublicKey // PrivateKey is the actor's private key. PrivateKey *secp256k1.PrivateKey - // Address is the actor's unique address. + // DID is the actor's unique identifier. // - // The address is derived from the actor's public key. - Address string + // The address is derived from the actor's public key, + // using the did:key method + DID string } // FromPrivateKey returns a new identity using the given private key. -func FromPrivateKey(privateKey *secp256k1.PrivateKey) immutable.Option[Identity] { +func FromPrivateKey(privateKey *secp256k1.PrivateKey) (immutable.Option[Identity], error) { pubKey := privateKey.PubKey() + did, err := DIDFromPublicKey(pubKey) + if err != nil { + return None, err + } + return immutable.Some(Identity{ - Address: AddressFromPublicKey(pubKey), + DID: did, PublicKey: pubKey, PrivateKey: privateKey, - }) + }), nil } // FromPublicKey returns a new identity using the given public key. -func FromPublicKey(publicKey *secp256k1.PublicKey) immutable.Option[Identity] { +func FromPublicKey(publicKey *secp256k1.PublicKey) (immutable.Option[Identity], error) { + did, err := DIDFromPublicKey(publicKey) + if err != nil { + return None, err + } return immutable.Some(Identity{ - Address: AddressFromPublicKey(publicKey), + DID: did, PublicKey: publicKey, - }) + }), nil +} + +// DIDFromPublicKey returns a did:key generated from the the given public key. +func DIDFromPublicKey(publicKey *secp256k1.PublicKey) (string, error) { + return didFromPublicKey(publicKey, key.CreateDIDKey) } -// AddressFromPublicKey returns the unique address of the given public key. -func AddressFromPublicKey(publicKey *secp256k1.PublicKey) string { - pub := cosmosSecp256k1.PubKey{Key: publicKey.SerializeCompressed()} - // conversion from well known types should never cause a panic - return types.MustBech32ifyAddressBytes("cosmos", pub.Address().Bytes()) +// didFromPublicKey produces a did from a secp256k1 key and a producer function +func didFromPublicKey(publicKey *secp256k1.PublicKey, producer didProducer) (string, error) { + bytes := publicKey.SerializeUncompressed() + did, err := producer(crypto.SECP256k1, bytes) + if err != nil { + return "", newErrDIDCreation(err, "secp256k1", bytes) + } + return did.String(), nil } diff --git a/acp/identity/identity_test.go b/acp/identity/identity_test.go new file mode 100644 index 0000000000..2f04c01337 --- /dev/null +++ b/acp/identity/identity_test.go @@ -0,0 +1,44 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package identity + +import ( + "fmt" + "testing" + + "github.com/cyware/ssi-sdk/crypto" + "github.com/cyware/ssi-sdk/did/key" + "github.com/decred/dcrd/dcrec/secp256k1/v4" + "github.com/stretchr/testify/require" +) + +func Test_DIDFromPublicKey_ProducesDIDForPublicKey(t *testing.T) { + pubKey := &secp256k1.PublicKey{} + + did, err := DIDFromPublicKey(pubKey) + + want := "did:key:z7r8ooUiNXK8TT8Xjg1EWStR2ZdfxbzVfvGWbA2FjmzcnmDxz71QkP1Er8PP3zyLZpBLVgaXbZPGJPS4ppXJDPRcqrx4F" + require.Equal(t, want, did) + require.NoError(t, err) +} + +func Test_didFromPublicKey_ReturnsErrorWhenProducerFails(t *testing.T) { + mockedProducer := func(crypto.KeyType, []byte) (*key.DIDKey, error) { + return nil, fmt.Errorf("did generation err") + } + + pubKey := &secp256k1.PublicKey{} + + did, err := didFromPublicKey(pubKey, mockedProducer) + + require.Empty(t, did) + require.ErrorIs(t, err, ErrDIDCreation) +} diff --git a/acp/source_hub_client.go b/acp/source_hub_client.go index b41544d68f..22371cd6e2 100644 --- a/acp/source_hub_client.go +++ b/acp/source_hub_client.go @@ -15,10 +15,8 @@ import ( protoTypes "github.com/cosmos/gogoproto/types" "github.com/sourcenetwork/corelog" - "github.com/sourcenetwork/sourcehub/x/acp/types" + "github.com/sourcenetwork/immutable" "github.com/valyala/fastjson" - - "github.com/sourcenetwork/defradb/errors" ) // sourceHubClient is a private abstraction to allow multiple ACP implementations @@ -43,7 +41,7 @@ type sourceHubClient interface { ctx context.Context, creatorID string, policy string, - policyMarshalingType types.PolicyMarshalingType, + marshalType policyMarshalType, creationTime *protoTypes.Timestamp, ) (string, error) @@ -51,7 +49,7 @@ type sourceHubClient interface { Policy( ctx context.Context, policyID string, - ) (*types.Policy, error) + ) (immutable.Option[policy], error) // RegisterObject registers the object to have access control. // No error is returned upon successful registering of an object. @@ -62,7 +60,7 @@ type sourceHubClient interface { resourceName string, objectID string, creationTime *protoTypes.Timestamp, - ) (types.RegistrationResult, error) + ) (RegistrationResult, error) // ObjectOwner returns the owner of the object of the given objectID. ObjectOwner( @@ -70,7 +68,7 @@ type sourceHubClient interface { policyID string, resourceName string, objectID string, - ) (*types.QueryObjectOwnerResponse, error) + ) (immutable.Option[string], error) // VerifyAccessRequest returns true if the check was successfull and the request has access to the object. If // the check was successful but the request does not have access to the object, then returns false. @@ -120,17 +118,16 @@ func (a *sourceHubBridge) AddPolicy(ctx context.Context, creatorID string, polic return "", ErrPolicyDataMustNotBeEmpty } - // Assume policy is in YAML format by default. - policyMarshalType := types.PolicyMarshalingType_SHORT_YAML + marshalType := policyMarshalType_YAML if isJSON := fastjson.Validate(policy) == nil; isJSON { // Detect JSON format. - policyMarshalType = types.PolicyMarshalingType_SHORT_JSON + marshalType = policyMarshalType_JSON } policyID, err := a.client.AddPolicy( ctx, creatorID, policy, - policyMarshalType, + marshalType, protoTypes.TimestampNow(), ) @@ -160,19 +157,20 @@ func (a *sourceHubBridge) ValidateResourceExistsOnValidDPI( return ErrResourceNameMustNotBeEmpty } - policy, err := a.client.Policy(ctx, policyID) + maybePolicy, err := a.client.Policy(ctx, policyID) if err != nil { - if errors.Is(err, types.ErrPolicyNotFound) { - return newErrPolicyDoesNotExistWithACP(err, policyID) - } else { - return newErrPolicyValidationFailedWithACP(err, policyID) - } + return newErrPolicyValidationFailedWithACP(err, policyID) } + if !maybePolicy.HasValue() { + return newErrPolicyDoesNotExistWithACP(err, policyID) + } + + policy := maybePolicy.Value() // So far we validated that the policy exists, now lets validate that resource exists. - resourceResponse := policy.GetResourceByName(resourceName) - if resourceResponse == nil { + resourceResponse, ok := policy.Resources[resourceName] + if !ok { return newErrResourceDoesNotExistOnTargetPolicy(resourceName, policyID) } @@ -180,8 +178,8 @@ func (a *sourceHubBridge) ValidateResourceExistsOnValidDPI( // resource with the matching name, validate that all required permissions // for DPI actually exist on the target resource. for _, requiredPermission := range dpiRequiredPermissions { - permissionResponse := resourceResponse.GetPermissionByName(requiredPermission) - if permissionResponse == nil { + permissionResponse, ok := resourceResponse.Permissions[requiredPermission] + if !ok { return newErrResourceIsMissingRequiredPermission( resourceName, requiredPermission, @@ -227,10 +225,10 @@ func (a *sourceHubBridge) RegisterDocObject( } switch registerDocResult { - case types.RegistrationResult_NoOp: + case RegistrationResult_NoOp: return ErrObjectDidNotRegister - case types.RegistrationResult_Registered: + case RegistrationResult_Registered: log.InfoContext( ctx, "Document registered with local acp", @@ -241,7 +239,7 @@ func (a *sourceHubBridge) RegisterDocObject( ) return nil - case types.RegistrationResult_Unarchived: + case RegistrationResult_Unarchived: log.InfoContext( ctx, "Document re-registered (unarchived object) with local acp", @@ -262,7 +260,7 @@ func (a *sourceHubBridge) IsDocRegistered( resourceName string, docID string, ) (bool, error) { - queryObjectOwnerResponse, err := a.client.ObjectOwner( + maybeActor, err := a.client.ObjectOwner( ctx, policyID, resourceName, @@ -272,7 +270,7 @@ func (a *sourceHubBridge) IsDocRegistered( return false, NewErrFailedToCheckIfDocIsRegisteredWithACP(err, "Local", policyID, resourceName, docID) } - return queryObjectOwnerResponse.IsRegistered, nil + return maybeActor.HasValue(), nil } func (a *sourceHubBridge) CheckDocAccess( diff --git a/acp/types.go b/acp/types.go new file mode 100644 index 0000000000..e17f9d9dc4 --- /dev/null +++ b/acp/types.go @@ -0,0 +1,54 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package acp + +// RegistrationResult is an enum type which indicates the result of a RegisterObject call to SourceHub / ACP Core +type RegistrationResult int32 + +const ( + // NoOp indicates no action was take. The operation failed or the Object already existed and was active + RegistrationResult_NoOp RegistrationResult = 0 + // Registered indicates the Object was sucessfuly registered to the Actor. + RegistrationResult_Registered RegistrationResult = 1 + // Unarchived indicates that a previously deleted Object is active again. + // Only the original owners can Unarchive an object. + RegistrationResult_Unarchived RegistrationResult = 2 +) + +// policyMarshalType represents the format in which a policy +// is marshaled as +type policyMarshalType int32 + +const ( + policyMarshalType_YAML policyMarshalType = 1 + policyMarshalType_JSON policyMarshalType = 2 +) + +// policy is a data container carrying the necessary data +// to verify whether a policy meets DPI requirements +type policy struct { + ID string + Resources map[string]*resource +} + +// resource is a data container carrying the necessary data +// to verify whether it meets DPI requirements. +type resource struct { + Name string + Permissions map[string]*permission +} + +// permission is a data container carrying the necessary data +// to verify whether it meets DPI requirements. +type permission struct { + Name string + Expression string +} diff --git a/cli/utils.go b/cli/utils.go index afd941bc5c..ed98fb0f30 100644 --- a/cli/utils.go +++ b/cli/utils.go @@ -150,7 +150,12 @@ func setContextIdentity(cmd *cobra.Command, privateKeyHex string) error { return err } privKey := secp256k1.PrivKeyFromBytes(data) - ctx := db.SetContextIdentity(cmd.Context(), acpIdentity.FromPrivateKey(privKey)) + identity, err := acpIdentity.FromPrivateKey(privKey) + if err != nil { + return err + } + + ctx := db.SetContextIdentity(cmd.Context(), identity) cmd.SetContext(ctx) return nil } diff --git a/go.mod b/go.mod index b88e71219a..c684675448 100644 --- a/go.mod +++ b/go.mod @@ -5,8 +5,8 @@ go 1.21.3 require ( github.com/bits-and-blooms/bitset v1.13.0 github.com/bxcodec/faker v2.0.1+incompatible - github.com/cosmos/cosmos-sdk v0.50.7 github.com/cosmos/gogoproto v1.4.12 + github.com/cyware/ssi-sdk v0.0.0-20231229164914-f93f3006379f github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 github.com/evanphx/json-patch/v5 v5.9.0 github.com/fxamacker/cbor/v2 v2.6.0 @@ -37,12 +37,12 @@ require ( github.com/multiformats/go-multibase v0.2.0 github.com/multiformats/go-multicodec v0.9.0 github.com/multiformats/go-multihash v0.2.3 + github.com/sourcenetwork/acp_core v0.0.0-20240607160510-47a5306b2ad2 github.com/sourcenetwork/badger/v4 v4.2.1-0.20231113215945-a63444ca5276 github.com/sourcenetwork/corelog v0.0.8 github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.14 github.com/sourcenetwork/graphql-go v0.7.10-0.20231113214537-a9560c1898dd github.com/sourcenetwork/immutable v0.3.0 - github.com/sourcenetwork/sourcehub v0.2.1-0.20240305165631-9b75b1000724 github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.19.0 @@ -62,19 +62,10 @@ require ( require ( buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.31.0-20230802163732-1c33ebd9ecfa.1 // indirect - cosmossdk.io/api v0.7.5 // indirect - cosmossdk.io/collections v0.4.0 // indirect - cosmossdk.io/core v0.11.0 // indirect - cosmossdk.io/depinject v1.0.0-alpha.4 // indirect cosmossdk.io/errors v1.0.1 // indirect cosmossdk.io/log v1.3.1 // indirect cosmossdk.io/math v1.3.0 // indirect cosmossdk.io/store v1.1.0 // indirect - cosmossdk.io/x/tx v0.13.3 // indirect - filippo.io/edwards25519 v1.0.0 // indirect - github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect - github.com/99designs/keyring v1.2.1 // indirect - github.com/DataDog/datadog-go v3.2.0+incompatible // indirect github.com/DataDog/zstd v1.5.5 // indirect github.com/Jorropo/jsync v1.0.1 // indirect github.com/NathanBaulch/protoc-gen-cobra v1.2.1 // indirect @@ -82,65 +73,46 @@ require ( github.com/awalterschulze/gographviz v2.0.3+incompatible // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 // indirect - github.com/btcsuite/btcd v0.22.1 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect - github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce // indirect + github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2 // indirect github.com/bytecodealliance/wasmtime-go/v15 v15.0.0 // indirect - github.com/cenkalti/backoff/v4 v4.2.1 // indirect - github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cloudflare/circl v1.3.7 // indirect github.com/cockroachdb/errors v1.11.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/pebble v1.1.0 // indirect github.com/cockroachdb/redact v1.1.5 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/cometbft/cometbft v0.38.7 // indirect - github.com/cometbft/cometbft-db v0.9.1 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cosmos/btcutil v1.0.5 // indirect github.com/cosmos/cosmos-db v1.0.2 // indirect - github.com/cosmos/cosmos-proto v1.0.0-beta.5 // indirect - github.com/cosmos/go-bip39 v1.0.0 // indirect - github.com/cosmos/gogogateway v1.2.0 // indirect - github.com/cosmos/gorocksdb v1.2.0 // indirect github.com/cosmos/iavl v1.1.2 // indirect github.com/cosmos/ics23/go v0.10.0 // indirect - github.com/cosmos/ledger-cosmos-go v0.13.3 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/cskr/pubsub v1.0.2 // indirect github.com/danieljoos/wincred v1.2.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect - github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect - github.com/dgraph-io/badger/v2 v2.2007.4 // indirect github.com/dgraph-io/ristretto v0.1.1 // indirect - github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/dvsekhvalnov/jose2go v1.7.0 // indirect github.com/elastic/gosigar v0.14.2 // indirect - github.com/emicklei/dot v1.6.1 // indirect - github.com/fatih/color v1.15.0 // indirect - github.com/felixge/httpsnoop v1.0.4 // indirect github.com/flynn/noise v1.1.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/getsentry/sentry-go v0.27.0 // indirect - github.com/go-jose/go-jose/v3 v3.0.3 // indirect - github.com/go-kit/kit v0.12.0 // indirect - github.com/go-kit/log v0.2.1 // indirect - github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.20.2 // indirect github.com/go-openapi/swag v0.22.8 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.15.1 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/goccy/go-json v0.10.2 // indirect - github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect - github.com/gogo/googleapis v1.4.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -150,36 +122,25 @@ require ( github.com/google/flatbuffers v2.0.6+incompatible // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/orderedcode v0.0.1 // indirect github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/gorilla/handlers v1.5.2 // indirect - github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.1 // indirect - github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect - github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect - github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-hclog v1.5.0 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-metrics v0.5.3 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-plugin v1.5.2 // indirect + github.com/hashicorp/go-uuid v1.0.2 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/golang-lru/arc/v2 v2.0.5 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/hashicorp/yamux v0.1.1 // indirect - github.com/hdevalence/ed25519consensus v0.1.0 // indirect - github.com/huandu/skiplist v1.2.0 // indirect github.com/huin/goupnp v1.3.0 // indirect github.com/hyperledger/aries-framework-go v0.3.2 // indirect github.com/hyperledger/aries-framework-go/component/kmscrypto v0.0.0-20230427134832-0c9969493bd3 // indirect github.com/hyperledger/aries-framework-go/component/log v0.0.0-20230427134832-0c9969493bd3 // indirect github.com/hyperledger/aries-framework-go/component/models v0.0.0-20230501135648-a9a7ad029347 // indirect github.com/hyperledger/aries-framework-go/spi v0.0.0-20230427134832-0c9969493bd3 // indirect - github.com/improbable-eng/grpc-web v0.15.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/invopop/yaml v0.2.0 // indirect github.com/ipfs/bbloom v0.0.4 // indirect @@ -192,7 +153,7 @@ require ( github.com/ipfs/kubo v0.25.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect - github.com/jmhodges/levigo v1.0.0 // indirect + github.com/jorrizza/ed2curve25519 v0.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/kilic/bls12-381 v0.1.1-0.20210503002446-7b7597926c69 // indirect github.com/klauspost/compress v1.17.7 // indirect @@ -200,12 +161,12 @@ require ( github.com/koron/go-ssdp v0.0.4 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect + github.com/leodido/go-urn v1.2.4 // indirect github.com/lestrrat-go/blackmagic v1.0.2 // indirect github.com/lestrrat-go/httpcc v1.0.1 // indirect github.com/lestrrat-go/httprc v1.0.5 // indirect github.com/lestrrat-go/iter v1.0.2 // indirect github.com/lestrrat-go/option v1.0.1 // indirect - github.com/lib/pq v1.10.7 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect @@ -227,13 +188,10 @@ require ( github.com/miekg/dns v1.1.58 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect - github.com/minio/highwayhash v1.0.2 // indirect github.com/minio/sha256-simd v1.0.1 // indirect - github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/mr-tron/base58 v1.2.0 // indirect - github.com/mtibben/percent v0.2.1 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect @@ -241,7 +199,6 @@ require ( github.com/multiformats/go-multistream v0.5.0 // indirect github.com/multiformats/go-varint v0.0.7 // indirect github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a // indirect - github.com/oklog/run v1.1.0 // indirect github.com/onsi/ginkgo/v2 v2.15.0 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect @@ -262,39 +219,29 @@ require ( github.com/quic-go/quic-go v0.42.0 // indirect github.com/quic-go/webtransport-go v0.6.0 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect - github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect - github.com/rs/cors v1.10.1 // indirect github.com/rs/zerolog v1.32.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sasha-s/go-deadlock v0.3.1 // indirect github.com/segmentio/asm v1.2.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect github.com/sourcegraph/conc v0.3.0 // indirect - github.com/sourcenetwork/raccoondb v0.2.0 // indirect - github.com/sourcenetwork/zanzi v0.3.0 // indirect + github.com/sourcenetwork/raccoondb v0.2.1-0.20240606193653-1e91e9be9234 // indirect + github.com/sourcenetwork/zanzi v0.3.1-0.20240606201400-df5f801d0bd4 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect - github.com/tendermint/go-amino v0.16.0 // indirect - github.com/tendermint/tm-db v0.6.7 // indirect - github.com/teserakt-io/golang-ed25519 v0.0.0-20210104091850-3888c087a4c8 // indirect github.com/tetratelabs/wazero v1.5.0 // indirect github.com/textileio/go-log/v2 v2.1.3-gke-2 // indirect github.com/ugorji/go/codec v1.2.12 // indirect github.com/wasmerio/wasmer-go v1.0.4 // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect github.com/x448/float16 v0.8.4 // indirect - github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect - github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - github.com/xeipuuv/gojsonschema v1.2.0 // indirect - github.com/zondax/hid v0.9.2 // indirect - github.com/zondax/ledger-go v0.14.3 // indirect - go.etcd.io/bbolt v1.3.8 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/otel v1.27.0 // indirect go.opentelemetry.io/otel/sdk v1.27.0 // indirect @@ -311,14 +258,10 @@ require ( golang.org/x/text v0.15.0 // indirect golang.org/x/tools v0.20.0 // indirect gonum.org/v1/gonum v0.14.0 // indirect - google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240415141817-7cd4c1c1f9ec // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - gotest.tools/v3 v3.5.1 // indirect lukechampine.com/blake3 v1.3.0 // indirect - nhooyr.io/websocket v1.8.7 // indirect - pgregory.net/rapid v1.1.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 851e498e63..fb05912276 100644 --- a/go.sum +++ b/go.sum @@ -4,14 +4,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= -cosmossdk.io/api v0.7.5 h1:eMPTReoNmGUm8DeiQL9DyM8sYDjEhWzL1+nLbI9DqtQ= -cosmossdk.io/api v0.7.5/go.mod h1:IcxpYS5fMemZGqyYtErK7OqvdM0C8kdW3dq8Q/XIG38= -cosmossdk.io/collections v0.4.0 h1:PFmwj2W8szgpD5nOd8GWH6AbYNi1f2J6akWXJ7P5t9s= -cosmossdk.io/collections v0.4.0/go.mod h1:oa5lUING2dP+gdDquow+QjlF45eL1t4TJDypgGd+tv0= -cosmossdk.io/core v0.11.0 h1:vtIafqUi+1ZNAE/oxLOQQ7Oek2n4S48SWLG8h/+wdbo= -cosmossdk.io/core v0.11.0/go.mod h1:LaTtayWBSoacF5xNzoF8tmLhehqlA9z1SWiPuNC6X1w= -cosmossdk.io/depinject v1.0.0-alpha.4 h1:PLNp8ZYAMPTUKyG9IK2hsbciDWqna2z1Wsl98okJopc= -cosmossdk.io/depinject v1.0.0-alpha.4/go.mod h1:HeDk7IkR5ckZ3lMGs/o91AVUc7E596vMaOmslGFM3yU= cosmossdk.io/errors v1.0.1 h1:bzu+Kcr0kS/1DuPBtUFdWjzLqyUuCiyHjyJB6srBV/0= cosmossdk.io/errors v1.0.1/go.mod h1:MeelVSZThMi4bEakzhhhE/CKqVv3nOJDA25bIqRDu/U= cosmossdk.io/log v1.3.1 h1:UZx8nWIkfbbNEWusZqzAx3ZGvu54TZacWib3EzUYmGI= @@ -20,72 +12,32 @@ cosmossdk.io/math v1.3.0 h1:RC+jryuKeytIiictDslBP9i1fhkVm6ZDmZEoNP316zE= cosmossdk.io/math v1.3.0/go.mod h1:vnRTxewy+M7BtXBNFybkuhSH4WfedVAAnERHgVFhp3k= cosmossdk.io/store v1.1.0 h1:LnKwgYMc9BInn9PhpTFEQVbL9UK475G2H911CGGnWHk= cosmossdk.io/store v1.1.0/go.mod h1:oZfW/4Fc/zYqu3JmQcQdUJ3fqu5vnYTn3LZFFy8P8ng= -cosmossdk.io/x/tx v0.13.3 h1:Ha4mNaHmxBc6RMun9aKuqul8yHiL78EKJQ8g23Zf73g= -cosmossdk.io/x/tx v0.13.3/go.mod h1:I8xaHv0rhUdIvIdptKIqzYy27+n2+zBVaxO6fscFhys= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= -filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek= -filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= -github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= -github.com/99designs/keyring v1.2.1 h1:tYLp1ULvO7i3fI5vE21ReQuj99QFSs7lGm0xWyJo87o= -github.com/99designs/keyring v1.2.1/go.mod h1:fc+wB5KTk9wQ9sDx0kFXB3A0MaeGHM9AwRStKOQ5vOA= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU= github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/NathanBaulch/protoc-gen-cobra v1.2.1 h1:BOqX9glwicbqDJDGndMnhHhx8psGTSjGdZzRDY1a7A8= github.com/NathanBaulch/protoc-gen-cobra v1.2.1/go.mod h1:ZLPLEPQgV3jP3a7IEp+xxYPk8tF4lhY9ViV0hn6K3iA= -github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= -github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= -github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/adlio/schema v1.3.3 h1:oBJn8I02PyTB466pZO1UZEn1TV5XLlifBSyMrmHl/1I= -github.com/adlio/schema v1.3.3/go.mod h1:1EsRssiv9/Ce2CMzq5DoL7RiMshhuigQxrR4DMV9fHg= -github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/awalterschulze/gographviz v2.0.3+incompatible h1:9sVEXJBJLwGX7EQVhLm2elIKCm7P2YHFC8v6096G09E= github.com/awalterschulze/gographviz v2.0.3+incompatible/go.mod h1:GEV5wmg4YquNw7v1kkyoX9etIk8yVmXj+AkDHuuETHs= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= @@ -94,43 +46,23 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 h1:41iFGWnSlI2gVpmOtVTJZNodLdLQLn/KsJqFvXwnd/s= -github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= -github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= -github.com/btcsuite/btcd v0.22.1 h1:CnwP9LM/M9xuRrGSCGeMVs9iv09uMqwsVX7EeIpgV2c= -github.com/btcsuite/btcd v0.22.1/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y= +github.com/btcsuite/btcd v0.22.0-beta h1:LTDpDKUM5EeOFBPM8IXpinEcmZ6FWfNZbE3lfrfdnWo= github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= github.com/btcsuite/btcd/btcutil v1.1.3 h1:xfbtw8lwpp0G6NwSHb+UE67ryTFHJAiNuipusjXSohQ= github.com/btcsuite/btcd/btcutil v1.1.3/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2 h1:KdUfX2zKommPRa+PD0sWZUyXe9w277ABlgELO7H04IM= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= -github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= -github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce h1:YtWJF7RHm2pYCvA5t0RPmAaLUhREsKuKd+SLhxFbFeQ= github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o= -github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= -github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= -github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= -github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= -github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/bufbuild/protocompile v0.6.0 h1:Uu7WiSQ6Yj9DbkdnOe7U4mNKp58y9WDMKDn28/ZlunY= -github.com/bufbuild/protocompile v0.6.0/go.mod h1:YNP35qEYoYGme7QMtz5SBCoN4kL4g12jTtjuzRNdjpE= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/bxcodec/faker v2.0.1+incompatible h1:P0KUpUw5w6WJXwrPfv35oc91i4d8nf40Nwln+M/+faA= github.com/bxcodec/faker v2.0.1+incompatible/go.mod h1:BNzfpVdTwnFJ6GtfYTcQu6l6rHShT+veBxNCnjCx5XM= github.com/bytecodealliance/wasmtime-go/v15 v15.0.0 h1:4R2MpSPPbtSxqdsOTvsMn1pnwdEhzbDGMao6LUUSLv4= github.com/bytecodealliance/wasmtime-go/v15 v15.0.0/go.mod h1:m6vB/SsM+pnJkVHmO1wzHYUeYtciltTKuxuvkR8pYcY= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= @@ -139,24 +71,14 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= -github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= +github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E= -github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8= @@ -169,62 +91,36 @@ github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwP github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/cometbft/cometbft v0.38.7 h1:ULhIOJ9+LgSy6nLekhq9ae3juX3NnQUMMPyVdhZV6Hk= github.com/cometbft/cometbft v0.38.7/go.mod h1:HIyf811dFMI73IE0F7RrnY/Fr+d1+HuJAgtkEpQjCMY= -github.com/cometbft/cometbft-db v0.9.1 h1:MIhVX5ja5bXNHF8EYrThkG9F7r9kSfv8BX4LWaxWJ4M= -github.com/cometbft/cometbft-db v0.9.1/go.mod h1:iliyWaoV0mRwBJoizElCwwRA9Tf7jZJOURcRZF9m60U= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= -github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= -github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= -github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= github.com/cosmos/cosmos-db v1.0.2 h1:hwMjozuY1OlJs/uh6vddqnk9j7VamLv+0DBlbEXbAKs= github.com/cosmos/cosmos-db v1.0.2/go.mod h1:Z8IXcFJ9PqKK6BIsVOB3QXtkKoqUOp1vRvPT39kOXEA= -github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= -github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= -github.com/cosmos/cosmos-sdk v0.50.7 h1:LsBGKxifENR/DN4E1RZaitsyL93HU44x0p8EnMHp4V4= -github.com/cosmos/cosmos-sdk v0.50.7/go.mod h1:84xDDJEHttRT7NDGwBaUOLVOMN0JNE9x7NbsYIxXs1s= -github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= -github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= -github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= -github.com/cosmos/gogogateway v1.2.0/go.mod h1:iQpLkGWxYcnCdz5iAdLcRBSw3h7NXeOkZ4GUkT+tbFI= -github.com/cosmos/gogoproto v1.4.2/go.mod h1:cLxOsn1ljAHSV527CHOtaIP91kK6cCrZETRBrkzItWU= github.com/cosmos/gogoproto v1.4.12 h1:vB6Lbe/rtnYGjQuFxkPiPYiCybqFT8QvLipDZP8JpFE= github.com/cosmos/gogoproto v1.4.12/go.mod h1:LnZob1bXRdUoqMMtwYlcR3wjiElmlC+FkjaZRv1/eLY= -github.com/cosmos/gorocksdb v1.2.0 h1:d0l3jJG8M4hBouIZq0mDUHZ+zjOx044J3nGRskwTb4Y= -github.com/cosmos/gorocksdb v1.2.0/go.mod h1:aaKvKItm514hKfNJpUJXnnOWeBnk2GL4+Qw9NHizILw= github.com/cosmos/iavl v1.1.2 h1:zL9FK7C4L/P4IF1Dm5fIwz0WXCnn7Bp1M2FxH0ayM7Y= github.com/cosmos/iavl v1.1.2/go.mod h1:jLeUvm6bGT1YutCaL2fIar/8vGUE8cPZvh/gXEWDaDM= github.com/cosmos/ics23/go v0.10.0 h1:iXqLLgp2Lp+EdpIuwXTYIQU+AiHj9mOC2X9ab++bZDM= github.com/cosmos/ics23/go v0.10.0/go.mod h1:ZfJSmng/TBNTBkFemHHHj5YY7VAU/MBU980F4VU1NG0= -github.com/cosmos/ledger-cosmos-go v0.13.3 h1:7ehuBGuyIytsXbd4MP43mLeoN2LTOEnk5nvue4rK+yM= -github.com/cosmos/ledger-cosmos-go v0.13.3/go.mod h1:HENcEP+VtahZFw38HZ3+LS3Iv5XV6svsnkk9vdJtLr8= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668 h1:ZFUue+PNxmHlu7pYv+IYMtqlaO/0VwaGEqKepZf9JpA= github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= +github.com/cyware/ssi-sdk v0.0.0-20231229164914-f93f3006379f h1:72bD8UUtmnis7LACaaurCYx3UKVdMZ2vSEent7HNMg4= +github.com/cyware/ssi-sdk v0.0.0-20231229164914-f93f3006379f/go.mod h1:fXZNsGp0JHlOW4XyY3SQk1dy6D2I0HD+aiHY3Ku0el8= github.com/danieljoos/wincred v1.2.0 h1:ozqKHaLK0W/ii4KVbbvluM91W2H3Sh0BncbUNPS7jLE= github.com/danieljoos/wincred v1.2.0/go.mod h1:FzQLLMKBFdvu+osBrnFODiv32YGwCfx0SkRa/eYHgec= -github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -235,72 +131,37 @@ github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5il github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= -github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= -github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= -github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= -github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= github.com/dgraph-io/badger/v3 v3.2011.1 h1:Hmyof0WMEF/QtutX5SQHzIMnJQxb/IrSzhjckV2SD6g= github.com/dgraph-io/badger/v3 v3.2011.1/go.mod h1:0rLLrQpKVQAL0or/lBLMQznhr6dWWX7h5AKnmnqx268= -github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/dvsekhvalnov/jose2go v1.7.0 h1:bnQc8+GMnidJZA8zc6lLEAb4xNrIqHwO+9TzqvtQZPo= -github.com/dvsekhvalnov/jose2go v1.7.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/emicklei/dot v1.6.1 h1:ujpDlBkkwgWUY+qPId5IwapRW/xEoligRSYjioR6DFI= github.com/emicklei/dot v1.6.1/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= -github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= -github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= -github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= -github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= -github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= -github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= -github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= -github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -310,16 +171,13 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA= github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= +github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/getkin/kin-openapi v0.125.0 h1:jyQCyf2qXS1qvs2U00xQzkGCqYPhEhZDmSmVt65fXno= github.com/getkin/kin-openapi v0.125.0/go.mod h1:wb1aSZA/iWmorQP9KTAS/phLj/t17B5jT7+fS8ed9NM= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= -github.com/gin-gonic/gin v1.8.1 h1:4+fr/el88TOO3ewCmQr8cx/CtZ/umlIRIs5M4NTNjf8= -github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-chi/chi/v5 v5.0.12 h1:9euLV5sTrTNTRUU9POmDUvfxyj6LAABLUcEWO+JJb4s= github.com/go-chi/chi/v5 v5.0.12/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= @@ -328,22 +186,10 @@ github.com/go-chi/cors v1.2.1/go.mod h1:sSbTewc+6wYHBBCW7ytsFSn836hqM7JxpglAy2Vz github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k= -github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= -github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= -github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= -github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= -github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -353,17 +199,14 @@ github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbX github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= github.com/go-openapi/swag v0.22.8 h1:/9RjDSQ0vbFR+NyjGMkFTsA1IA0fmhKSThmfGZjicbw= github.com/go-openapi/swag v0.22.8/go.mod h1:6QT22icPLEqAM/z/TChgb4WAveCHF92+2gF0CNjHpPI= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= -github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= -github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= -github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= -github.com/go-playground/validator/v10 v10.13.0 h1:cFRQdfaSMCOSfGCCLB20MHvuoHb/s5G8L5pu2ppK5AQ= -github.com/go-playground/validator/v10 v10.13.0/go.mod h1:dwu7+CG8/CtBiJFZDz4e+5Upb6OLw04gtBYw0mcG/z4= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.15.1 h1:BSe8uhN+xQ4r5guV/ywQI4gO59C2raYcGffYWZEjZzM= +github.com/go-playground/validator/v10 v10.15.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= @@ -371,54 +214,30 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4 github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= -github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= -github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU= -github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= -github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= -github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og= -github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= -github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= -github.com/gobwas/ws v1.2.1 h1:F2aeBZrm2NDsc7vbovKrWSogd4wvfAxg0FQ89/iqOTk= -github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= -github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid/v5 v5.2.0 h1:qw1GMx6/y8vhVsx626ImfKMuS5CvJmhIKKtuyvfajMM= github.com/gofrs/uuid/v5 v5.2.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= -github.com/gogo/googleapis v1.4.1-0.20201022092350-68b0159b7869/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= -github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= -github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -431,12 +250,9 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/flatbuffers v2.0.6+incompatible h1:XHFReMv7nFFusa+CEokzWbzaYocKXI6C7hdU5Kgh9Lw= @@ -449,29 +265,20 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= -github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo= github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/tink/go v1.7.0 h1:6Eox8zONGebBFcCBqkVmt60LaWZa6xg1cl/DwAh/J1w= -github.com/google/tink/go v1.7.0/go.mod h1:GAUOd+QE3pgj9q8VKIGTCP33c/B7eb4NhxLcgTJZStM= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -480,62 +287,28 @@ github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE0 github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk= github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= -github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= -github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No= github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU= -github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= -github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= -github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-metrics v0.5.3 h1:M5uADWMOGCTUNU1YuC4hfknOeHNaX54LDm4oYSucoNE= github.com/hashicorp/go-metrics v0.5.3/go.mod h1:KEjodfebIOuBYSAe/bHTm+HChmKSxAOXPBieMLYozDE= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.5.2 h1:aWv8eimFqWlsEiMrYZdPYl+FdHaBJSN4AWwGWfT1G2Y= -github.com/hashicorp/go-plugin v1.5.2/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw= @@ -544,22 +317,9 @@ github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= -github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= -github.com/hdevalence/ed25519consensus v0.1.0 h1:jtBwzzcHuTmFrQN6xQZn6CQEO/V9f7HsjsjeEZ6auqU= -github.com/hdevalence/ed25519consensus v0.1.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hsanjuan/ipfs-lite v1.8.1 h1:Rpd9bTXYgkmnt8M5QsZnWwtW6ebxAB7HlU/d0zE4BmA= github.com/hsanjuan/ipfs-lite v1.8.1/go.mod h1:oGCaHBi+I73UFjc6wPAQ75hr4FjJhoqy6YPZjtghDIc= -github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= -github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= -github.com/huandu/skiplist v1.2.0 h1:gox56QD77HzSC0w+Ws3MH3iie755GBJU1OER3h5VsYw= -github.com/huandu/skiplist v1.2.0/go.mod h1:7v3iFjLcSAzO4fN5B8dvebvo/qsfumiLiDXMrPiHF9w= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/hyperledger/aries-framework-go v0.3.2 h1:GsSUaSEW82cr5X8b3Qf90GAi37kmTKHqpPJLhar13X8= @@ -574,17 +334,11 @@ github.com/hyperledger/aries-framework-go/component/storageutil v0.0.0-202304271 github.com/hyperledger/aries-framework-go/component/storageutil v0.0.0-20230427134832-0c9969493bd3/go.mod h1:aSG2dWjYVzu2PVBtOqsYghaChA5+UUXnBbL+MfVceYQ= github.com/hyperledger/aries-framework-go/spi v0.0.0-20230427134832-0c9969493bd3 h1:ytWmOQZIYQfVJ4msFvrqlp6d+ZLhT43wS8rgE2m+J1A= github.com/hyperledger/aries-framework-go/spi v0.0.0-20230427134832-0c9969493bd3/go.mod h1:oryUyWb23l/a3tAP9KW+GBbfcfqp9tZD4y5hSkFrkqI= -github.com/hyperledger/ursa-wrapper-go v0.3.1 h1:Do+QrVNniY77YK2jTIcyWqj9rm/Yb5SScN0bqCjiibA= -github.com/hyperledger/ursa-wrapper-go v0.3.1/go.mod h1:nPSAuMasIzSVciQo22PedBk4Opph6bJ6ia3ms7BH/mk= github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/improbable-eng/grpc-web v0.15.0 h1:BN+7z6uNXZ1tQGcNAuaU1YjsLTApzkjt2tzCixLaUPQ= -github.com/improbable-eng/grpc-web v0.15.0/go.mod h1:1sy9HKV4Jt9aEs9JSnkWlRJPuPtwNr0l57L4f878wP8= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/invopop/yaml v0.2.0 h1:7zky/qH+O0DwAyoobXUqvVBwgBFRxKoQ/3FjcVpjTMY= github.com/invopop/yaml v0.2.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= @@ -646,45 +400,26 @@ github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPw github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= -github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jhump/protoreflect v1.15.3 h1:6SFRuqU45u9hIZPJAoZ8c28T3nK64BNdp9w6jFonzls= -github.com/jhump/protoreflect v1.15.3/go.mod h1:4ORHmSBmlCW8fh3xHmJMGyul1zNqZK4Elxc8qKP+p1k= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= -github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jorrizza/ed2curve25519 v0.1.0 h1:P58ZEiVKW4vknYuGyOXuskMm82rTJyGhgRGrMRcCE8E= +github.com/jorrizza/ed2curve25519 v0.1.0/go.mod h1:27VPNk2FnNqLQNvvVymiX41VE/nokPyn5HHP7gtfYlo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kilic/bls12-381 v0.1.1-0.20210503002446-7b7597926c69 h1:kMJlf8z8wUcpyI+FQJIdGjAhfTww1y0AbQEv86bpVQI= github.com/kilic/bls12-381 v0.1.1-0.20210503002446-7b7597926c69/go.mod h1:tlkavyke+Ac7h8R3gZIjI5LKBcvMlSWnXNMgT3vZXo8= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -699,9 +434,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lens-vm/lens/host-go v0.0.0-20231127204031-8d858ed2926c h1:bG+mr4SqbYRU69L6CSvHDsKbRg5Q9vaN2T5g7qcrPdQ= github.com/lens-vm/lens/host-go v0.0.0-20231127204031-8d858ed2926c/go.mod h1:a4edl+KcOVk1Nj3EjG77htqg2/0Mmy3bSG0kl+FWVqQ= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= -github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= +github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= +github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= github.com/lestrrat-go/blackmagic v1.0.2 h1:Cg2gVSc9h7sz9NOByczrbUvLopQmXrfFx//N+AkAr5k= github.com/lestrrat-go/blackmagic v1.0.2/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU= github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE= @@ -714,8 +448,6 @@ github.com/lestrrat-go/jwx/v2 v2.0.21 h1:jAPKupy4uHgrHFEdjVjNkUgoBKtVDgrQPB/h55F github.com/lestrrat-go/jwx/v2 v2.0.21/go.mod h1:09mLW8zto6bWL9GbwnqAli+ArLf+5M33QLQPDggkUWM= github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU= github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= -github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= -github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= @@ -752,41 +484,27 @@ github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCy github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= github.com/libp2p/zeroconf/v2 v2.2.0 h1:Cup06Jv6u81HLhIj1KasuNM/RHHrJ8T7wOTS4+Tv53Q= github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/linxGnu/grocksdb v1.8.14 h1:HTgyYalNwBSG/1qCQUIott44wU5b2Y9Kr3z7SK5OfGQ= github.com/linxGnu/grocksdb v1.8.14/go.mod h1:QYiYypR2d4v63Wj1adOOfzglnoII0gLj3PNh4fZkcFA= github.com/lmittmann/tint v1.0.4 h1:LeYihpJ9hyGvE0w+K2okPTGUdVLfng1+nDNVR4vWISc= github.com/lmittmann/tint v1.0.4/go.mod h1:HIS3gSy7qNwGCj+5oRjAutErFBl4BzdQP6cJZ0NfMwE= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= -github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= @@ -797,38 +515,22 @@ github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKo github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= -github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= -github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= -github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= -github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= @@ -854,31 +556,14 @@ github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXS github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/grpc-proxy v0.0.0-20181017164139-0f1106ef9c76/go.mod h1:x5OoJHDHqxHS801UIuhqGl6QdSAEJvtausosHSdazIo= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a h1:dlRvE5fWabOchtH7znfiFCcOvmIYgOeAS5ifBXBlh9Q= github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a/go.mod h1:hVoHR2EVESiICEMbg137etN/Lx+lSrHPTD39Z/uE+2s= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= @@ -887,7 +572,6 @@ github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042 github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= @@ -895,47 +579,23 @@ github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAl github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= -github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= -github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= -github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= -github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= github.com/petermattis/goid v0.0.0-20231207134359-e60b3f734c67 h1:jik8PHtAIsPlCRJjJzl4udgEf7hawInF9texMeO2jrU= github.com/petermattis/goid v0.0.0-20231207134359-e60b3f734c67/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/piprate/json-gold v0.5.0 h1:RmGh1PYboCFcchVFuh2pbSWAZy4XJaqTMU4KQYsApbM= @@ -945,48 +605,34 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.52.2 h1:LW8Vk7BccEdONfrJBDffQGRtpSzi5CQaRZGtboOO2ck= github.com/prometheus/common v0.52.2/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o= github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= @@ -997,18 +643,10 @@ github.com/quic-go/webtransport-go v0.6.0 h1:CvNsKqc4W2HljHJnoT+rMmbRJybShZ0YPFD github.com/quic-go/webtransport-go v0.6.0/go.mod h1:9KjU4AEBqEQidGHNDkZrb8CAa1abRaosM2yGOyiikEc= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= -github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= -github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.32.0 h1:keLypqrlIjaFsbmJOBdB/qvyF8KEtCWHwobLp5l/mQ0= github.com/rs/zerolog v1.32.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= @@ -1016,17 +654,14 @@ github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA= github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= @@ -1055,22 +690,19 @@ github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYED github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/sourcenetwork/acp_core v0.0.0-20240607160510-47a5306b2ad2 h1:2aengjyUwOeF/R6bY0Bvl8DHxd8aA5rqC72s777zCGw= +github.com/sourcenetwork/acp_core v0.0.0-20240607160510-47a5306b2ad2/go.mod h1:vCb2fA1jj1eIyneHT+noM5g10oEuE3UzxDgBzeZZH1Q= github.com/sourcenetwork/badger/v4 v4.2.1-0.20231113215945-a63444ca5276 h1:TpQDDPfucDgCNH0NVqVUk6SSq6T6G8p9HIocmwZh9Tg= github.com/sourcenetwork/badger/v4 v4.2.1-0.20231113215945-a63444ca5276/go.mod h1:lxiZTDBw0vheFMqSwX2OvB6RTDI1+/UtVCSU4rpThFM= github.com/sourcenetwork/corelog v0.0.8 h1:jCo0mFBpWrfhUCGzzN3uUtPGyQv3jnITdPO1s2ME3RY= @@ -1081,36 +713,22 @@ github.com/sourcenetwork/graphql-go v0.7.10-0.20231113214537-a9560c1898dd h1:lmp github.com/sourcenetwork/graphql-go v0.7.10-0.20231113214537-a9560c1898dd/go.mod h1:rkahXkgRH/3vZErN1Bx+qt1+w+CV5fgaJyKKWgISe4U= github.com/sourcenetwork/immutable v0.3.0 h1:gHPtGvLrTBTK5YpDAhMU+u+S8v1F6iYmc3nbZLryMdc= github.com/sourcenetwork/immutable v0.3.0/go.mod h1:GD7ceuh/HD7z6cdIwzKK2ctzgZ1qqYFJpsFp+8qYnbI= -github.com/sourcenetwork/raccoondb v0.2.0 h1:lQ/r8IUm1IMaivXWhqndgpisLsI59c6M9jn6ujKYBzk= -github.com/sourcenetwork/raccoondb v0.2.0/go.mod h1:A5ElVAhdf9yDjmpLrA3DLqYib09Fnuzm3sFUbY5r9BE= -github.com/sourcenetwork/sourcehub v0.2.1-0.20240305165631-9b75b1000724 h1:Dr13Lb9bTmycQZbNHAP+7RUVcy9g6jxL5rz74ipVyrs= -github.com/sourcenetwork/sourcehub v0.2.1-0.20240305165631-9b75b1000724/go.mod h1:jhWsUtCgIE6vDKg9/uvu1rXAOcVTrALjBXf2kLQGrCk= -github.com/sourcenetwork/zanzi v0.3.0 h1:Y9uyrpsT569QjzAxNOwWDxeWOkcntm+26qDLR7nGuo4= -github.com/sourcenetwork/zanzi v0.3.0/go.mod h1:eLQ94tdz96vfwHIZXL5ZoHbV9YHQeMyFeTc5hFSGDRU= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/sourcenetwork/raccoondb v0.2.1-0.20240606193653-1e91e9be9234 h1:8dA9bVC1A0ChJygtsUfNsek3oR0GnwpLoYpmEo4t2mk= +github.com/sourcenetwork/raccoondb v0.2.1-0.20240606193653-1e91e9be9234/go.mod h1:JvZ+G3QTmv7zto3tUBxnV0+fPoev0DkObSwczTm9VJE= +github.com/sourcenetwork/zanzi v0.3.1-0.20240606201400-df5f801d0bd4 h1:lO0ZSZ75qdQPp+ZcHEDy+kyHQhnf6ZDB1V5TjQW422w= +github.com/sourcenetwork/zanzi v0.3.1-0.20240606201400-df5f801d0bd4/go.mod h1:QdM0NfYxQd10WAPUTVAA1X10y5IgZ/CXs3gpD5YxdCE= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -1127,6 +745,7 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= @@ -1135,12 +754,6 @@ github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSW github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= -github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= -github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= -github.com/tendermint/tm-db v0.6.7 h1:fE00Cbl0jayAoqlExN6oyQJ7fR/ZtoVOmvPJ//+shu8= -github.com/tendermint/tm-db v0.6.7/go.mod h1:byQDzFkZV1syXr/ReXS808NxA2xvyuuVgXOJ/088L6I= -github.com/teserakt-io/golang-ed25519 v0.0.0-20210104091850-3888c087a4c8 h1:RBkacARv7qY5laaXGlF4wFB/tk5rnthhPb8oIBGoagY= -github.com/teserakt-io/golang-ed25519 v0.0.0-20210104091850-3888c087a4c8/go.mod h1:9PdLyPiZIiW3UopXyRnPYyjUXSpiQNHRLu8fOsR3o8M= github.com/tetratelabs/wazero v1.5.0 h1:Yz3fZHivfDiZFUXnWMPUoiW7s8tC1sjdBtlJn08qYa0= github.com/tetratelabs/wazero v1.5.0/go.mod h1:0U0G41+ochRKoPKCJlh0jMg1CHkyfK8kDqiirMmKY8A= github.com/textileio/go-datastore-extensions v1.0.1 h1:qIJGqJaigQ1wD4TdwS/hf73u0HChhXvvUSJuxBEKS+c= @@ -1151,15 +764,9 @@ github.com/textileio/go-log/v2 v2.1.3-gke-2 h1:YkMA5ua0Cf/X6CkbexInsoJ/HdaHQBlgi github.com/textileio/go-log/v2 v2.1.3-gke-2/go.mod h1:DwACkjFS3kjZZR/4Spx3aPfSsciyslwUe5bxV8CEU2w= github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= @@ -1182,33 +789,12 @@ github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdz github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zalando/go-keyring v0.2.5 h1:Bc2HHpjALryKD62ppdEzaFG6VxL6Bc+5v0LYpN8Lba8= github.com/zalando/go-keyring v0.2.5/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk= -github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= -github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= -github.com/zondax/ledger-go v0.14.3 h1:wEpJt2CEcBJ428md/5MgSLsXLBos98sBOyxNmCjfUCw= -github.com/zondax/ledger-go v0.14.3/go.mod h1:IKKaoxupuB43g4NxeQmbLXv7T9AlQyie1UpHb342ycI= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= -go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= @@ -1221,10 +807,6 @@ go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2N go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -1233,117 +815,80 @@ go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= go.uber.org/fx v1.20.1 h1:zVwVQGS8zYvhh9Xxcu4w1M6ESyeMzebzj2NbSayZ4Mk= go.uber.org/fx v1.20.1/go.mod h1:iSYNbHf2y55acNCwCXKx7LbWb5WG1Bnue5RDXz1OREg= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= -go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= -golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/exp v0.0.0-20240404231335-c0f41cb1a7a0 h1:985EYyeCOxTpcgOTJpflJUwOeEz0CQOdPt73OzpE9F8= golang.org/x/exp v0.0.0-20240404231335-c0f41cb1a7a0/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1353,52 +898,30 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1406,49 +929,32 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220315194320-039c03cc5b86/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1456,23 +962,17 @@ golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1485,7 +985,6 @@ gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1495,41 +994,20 @@ google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= -google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= -google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4= -google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be h1:Zz7rLWqp0ApfsR/l7+zSHhY3PMiH2xqgxlfYfAfNpoU= +google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be/go.mod h1:dvdCTIoAGbkWbcIKBniID56/7XHTt6WfxXNMxuziJ+w= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240415141817-7cd4c1c1f9ec h1:C3cpGJVV1aqtO+b3L4LV6wQsB7sYplbahYZxrjkZd3A= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240415141817-7cd4c1c1f9ec/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -1540,12 +1018,9 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= @@ -1553,24 +1028,17 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1582,6 +1050,7 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= @@ -1592,14 +1061,7 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= lukechampine.com/blake3 v1.3.0 h1:sJ3XhFINmHSrYCgl958hscfIa3bw8x4DqMP3u1YvoYE= lukechampine.com/blake3 v1.3.0/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= -nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= -nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= -nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= -pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw= -pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/http/auth.go b/http/auth.go index 994e9a4220..f62f1e25af 100644 --- a/http/auth.go +++ b/http/auth.go @@ -93,7 +93,7 @@ func verifyAuthToken(data []byte, audience string) (immutable.Option[acpIdentity if err != nil { return immutable.None[acpIdentity.Identity](), err } - return acpIdentity.FromPublicKey(pubKey), nil + return acpIdentity.FromPublicKey(pubKey) } // AuthMiddleware authenticates an actor and sets their identity for all subsequent actions. diff --git a/http/auth_test.go b/http/auth_test.go index 0ea9705efd..8aa9293eaf 100644 --- a/http/auth_test.go +++ b/http/auth_test.go @@ -27,7 +27,8 @@ func TestBuildAuthToken(t *testing.T) { privKey, err := crypto.GenerateSecp256k1() require.NoError(t, err) - identity := acpIdentity.FromPrivateKey(privKey) + identity, err := acpIdentity.FromPrivateKey(privKey) + require.NoError(t, err) token, err := buildAuthToken(identity.Value(), "abc123") require.NoError(t, err) @@ -43,7 +44,8 @@ func TestSignAuthTokenErrorsWithPublicIdentity(t *testing.T) { privKey, err := crypto.GenerateSecp256k1() require.NoError(t, err) - identity := acpIdentity.FromPublicKey(privKey.PubKey()) + identity, err := acpIdentity.FromPublicKey(privKey.PubKey()) + require.NoError(t, err) token, err := buildAuthToken(identity.Value(), "abc123") require.NoError(t, err) @@ -55,22 +57,25 @@ func TestVerifyAuthToken(t *testing.T) { privKey, err := crypto.GenerateSecp256k1() require.NoError(t, err) - identity := acpIdentity.FromPrivateKey(privKey) + identity, err := acpIdentity.FromPrivateKey(privKey) + require.NoError(t, err) token, err := buildAndSignAuthToken(identity.Value(), "abc123") require.NoError(t, err) actual, err := verifyAuthToken(token, "abc123") require.NoError(t, err) - expected := acpIdentity.FromPublicKey(privKey.PubKey()) - assert.Equal(t, expected.Value().Address, actual.Value().Address) + expected, err := acpIdentity.FromPublicKey(privKey.PubKey()) + require.NoError(t, err) + assert.Equal(t, expected.Value().DID, actual.Value().DID) } func TestVerifyAuthTokenErrorsWithNonMatchingAudience(t *testing.T) { privKey, err := crypto.GenerateSecp256k1() require.NoError(t, err) - identity := acpIdentity.FromPrivateKey(privKey) + identity, err := acpIdentity.FromPrivateKey(privKey) + require.NoError(t, err) token, err := buildAndSignAuthToken(identity.Value(), "valid") require.NoError(t, err) @@ -85,7 +90,8 @@ func TestVerifyAuthTokenErrorsWithWrongPublicKey(t *testing.T) { otherKey, err := crypto.GenerateSecp256k1() require.NoError(t, err) - identity := acpIdentity.FromPrivateKey(privKey) + identity, err := acpIdentity.FromPrivateKey(privKey) + require.NoError(t, err) token, err := buildAuthToken(identity.Value(), "123abc") require.NoError(t, err) @@ -105,7 +111,8 @@ func TestVerifyAuthTokenErrorsWithExpired(t *testing.T) { privKey, err := crypto.GenerateSecp256k1() require.NoError(t, err) - identity := acpIdentity.FromPrivateKey(privKey) + identity, err := acpIdentity.FromPrivateKey(privKey) + require.NoError(t, err) token, err := buildAuthToken(identity.Value(), "123abc") require.NoError(t, err) @@ -124,7 +131,8 @@ func TestVerifyAuthTokenErrorsWithNotBefore(t *testing.T) { privKey, err := crypto.GenerateSecp256k1() require.NoError(t, err) - identity := acpIdentity.FromPrivateKey(privKey) + identity, err := acpIdentity.FromPrivateKey(privKey) + require.NoError(t, err) token, err := buildAuthToken(identity.Value(), "123abc") require.NoError(t, err) diff --git a/internal/db/db.go b/internal/db/db.go index a04dee5123..277f102ace 100644 --- a/internal/db/db.go +++ b/internal/db/db.go @@ -176,7 +176,7 @@ func (db *db) AddPolicy( policyID, err := db.acp.Value().AddPolicy( ctx, - identity.Value().Address, + identity.Value().DID, policy, ) if err != nil { diff --git a/internal/db/permission/check.go b/internal/db/permission/check.go index 4d5e9f5ed0..9d3d8a587b 100644 --- a/internal/db/permission/check.go +++ b/internal/db/permission/check.go @@ -78,7 +78,7 @@ func CheckAccessOfDocOnCollectionWithACP( hasAccess, err := acpSystem.CheckDocAccess( ctx, permission, - identity.Value().Address, + identity.Value().DID, policyID, resourceName, docID, diff --git a/internal/db/permission/register.go b/internal/db/permission/register.go index bbe9a2e713..0f6e0c8587 100644 --- a/internal/db/permission/register.go +++ b/internal/db/permission/register.go @@ -40,7 +40,7 @@ func RegisterDocOnCollectionWithACP( if policyID, resourceName, hasPolicy := isPermissioned(collection); hasPolicy && identity.HasValue() { return acpSystem.RegisterDocObject( ctx, - identity.Value().Address, + identity.Value().DID, policyID, resourceName, docID, diff --git a/net/peer_test.go b/net/peer_test.go index dca864a1e3..eff618dc01 100644 --- a/net/peer_test.go +++ b/net/peer_test.go @@ -358,6 +358,7 @@ func TestSetReplicatorWithACollectionSpecifiedThatHasPolicy_ReturnError(t *testi defer n.Close() policy := ` + name: test description: a policy actor: name: actor @@ -377,13 +378,14 @@ func TestSetReplicatorWithACollectionSpecifiedThatHasPolicy_ReturnError(t *testi privKeyBytes, err := hex.DecodeString("028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f") require.NoError(t, err) privKey := secp256k1.PrivKeyFromBytes(privKeyBytes) - identity := acpIdentity.FromPrivateKey(privKey) + identity, err := acpIdentity.FromPrivateKey(privKey) + require.NoError(t, err) ctx = db.SetContextIdentity(ctx, identity) policyResult, err := d.AddPolicy(ctx, policy) policyID := policyResult.PolicyID require.NoError(t, err) - require.Equal(t, "7bef56a54eae563eafdc48c57cf37075351498ebb5a200f59cf9b8c6f8149606", policyID) + require.Equal(t, "7b5ed30570e8d9206027ef6d5469879a6c1ea4595625c6ca33a19063a6ed6214", policyID) schema := fmt.Sprintf(` type User @policy(id: "%s", resource: "user") { @@ -415,6 +417,7 @@ func TestSetReplicatorWithSomeCollectionThatHasPolicyUsingAllCollectionsByDefaul defer n.Close() policy := ` + name: test description: a policy actor: name: actor @@ -434,13 +437,14 @@ func TestSetReplicatorWithSomeCollectionThatHasPolicyUsingAllCollectionsByDefaul privKeyBytes, err := hex.DecodeString("028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f") require.NoError(t, err) privKey := secp256k1.PrivKeyFromBytes(privKeyBytes) - identity := acpIdentity.FromPrivateKey(privKey) + identity, err := acpIdentity.FromPrivateKey(privKey) + require.NoError(t, err) ctx = db.SetContextIdentity(ctx, identity) policyResult, err := d.AddPolicy(ctx, policy) policyID := policyResult.PolicyID require.NoError(t, err) - require.Equal(t, "7bef56a54eae563eafdc48c57cf37075351498ebb5a200f59cf9b8c6f8149606", policyID) + require.Equal(t, "7b5ed30570e8d9206027ef6d5469879a6c1ea4595625c6ca33a19063a6ed6214", policyID) schema := fmt.Sprintf(` type User @policy(id: "%s", resource: "user") { @@ -780,6 +784,7 @@ func TestAddP2PCollectionsWithPermissionedCollection_Error(t *testing.T) { defer n.Close() policy := ` + name: test description: a policy actor: name: actor @@ -799,13 +804,14 @@ func TestAddP2PCollectionsWithPermissionedCollection_Error(t *testing.T) { privKeyBytes, err := hex.DecodeString("028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f") require.NoError(t, err) privKey := secp256k1.PrivKeyFromBytes(privKeyBytes) - identity := acpIdentity.FromPrivateKey(privKey) + identity, err := acpIdentity.FromPrivateKey(privKey) + require.NoError(t, err) ctx = db.SetContextIdentity(ctx, identity) policyResult, err := d.AddPolicy(ctx, policy) policyID := policyResult.PolicyID require.NoError(t, err) - require.Equal(t, "7bef56a54eae563eafdc48c57cf37075351498ebb5a200f59cf9b8c6f8149606", policyID) + require.Equal(t, "7b5ed30570e8d9206027ef6d5469879a6c1ea4595625c6ca33a19063a6ed6214", policyID) schema := fmt.Sprintf(` type User @policy(id: "%s", resource: "user") { diff --git a/tests/integration/acp/add_policy/basic_test.go b/tests/integration/acp/add_policy/basic_test.go index 9adc54996b..6e08bbd65b 100644 --- a/tests/integration/acp/add_policy/basic_test.go +++ b/tests/integration/acp/add_policy/basic_test.go @@ -26,6 +26,7 @@ func TestACP_AddPolicy_BasicYAML_ValidPolicyID(t *testing.T) { Identity: actor1Identity, Policy: ` + name: test description: a basic policy that satisfies minimum DPI requirements actor: @@ -46,7 +47,7 @@ func TestACP_AddPolicy_BasicYAML_ValidPolicyID(t *testing.T) { `, - ExpectedPolicyID: "aa664afaf8dff947ba85f4d464662d595af6c1e2466bd11fd6b82ea95b547ea3", + ExpectedPolicyID: "66f3e364004a181e9b129f65dea317322d2285226e926d7e8cdfd644954e4262", }, }, } @@ -65,6 +66,7 @@ func TestACP_AddPolicy_BasicJSON_ValidPolicyID(t *testing.T) { Policy: ` { + "name": "test", "description": "a basic policy that satisfies minimum DPI requirements", "resources": { "users": { @@ -91,7 +93,7 @@ func TestACP_AddPolicy_BasicJSON_ValidPolicyID(t *testing.T) { } `, - ExpectedPolicyID: "aa664afaf8dff947ba85f4d464662d595af6c1e2466bd11fd6b82ea95b547ea3", + ExpectedPolicyID: "66f3e364004a181e9b129f65dea317322d2285226e926d7e8cdfd644954e4262", }, }, } diff --git a/tests/integration/acp/add_policy/with_empty_args_test.go b/tests/integration/acp/add_policy/with_empty_args_test.go index 6b07ef0f67..ce1af33754 100644 --- a/tests/integration/acp/add_policy/with_empty_args_test.go +++ b/tests/integration/acp/add_policy/with_empty_args_test.go @@ -48,6 +48,7 @@ func TestACP_AddPolicy_EmptyPolicyCreator_Error(t *testing.T) { Identity: immutable.None[acpIdentity.Identity](), Policy: ` + name: test description: a basic policy that satisfies minimum DPI requirements actor: diff --git a/tests/integration/acp/add_policy/with_extra_perms_and_relations_test.go b/tests/integration/acp/add_policy/with_extra_perms_and_relations_test.go index 3cdfbddab0..04b0204c37 100644 --- a/tests/integration/acp/add_policy/with_extra_perms_and_relations_test.go +++ b/tests/integration/acp/add_policy/with_extra_perms_and_relations_test.go @@ -26,6 +26,7 @@ func TestACP_AddPolicy_ExtraPermissionsAndExtraRelations_ValidPolicyID(t *testin Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -53,7 +54,7 @@ func TestACP_AddPolicy_ExtraPermissionsAndExtraRelations_ValidPolicyID(t *testin - actor `, - ExpectedPolicyID: "f29c97dca930c9e93f7ef9e2139c63939c573af96c95af5cb9392861a0111b13", + ExpectedPolicyID: "af2a2eaa2d6701262ea60665487c87e3d41ab727194e1ea18ec16348149a02cc", }, }, } diff --git a/tests/integration/acp/add_policy/with_extra_perms_test.go b/tests/integration/acp/add_policy/with_extra_perms_test.go index 9b39a3743b..7832802ebc 100644 --- a/tests/integration/acp/add_policy/with_extra_perms_test.go +++ b/tests/integration/acp/add_policy/with_extra_perms_test.go @@ -26,6 +26,7 @@ func TestACP_AddPolicy_ExtraPermissions_ValidPolicyID(t *testing.T) { Identity: actor1Identity, Policy: ` + name: a policy description: a policy resources: @@ -47,7 +48,7 @@ func TestACP_AddPolicy_ExtraPermissions_ValidPolicyID(t *testing.T) { name: actor `, - ExpectedPolicyID: "af6795fa4fce1c2b4f9c1252c1cdd758708a45e4fc3097406c008d78c820ee80", + ExpectedPolicyID: "a756ba5a7fe25e3fc36a4f113016a49b718b09028c917199ae68f478addc048c", }, }, } @@ -65,6 +66,7 @@ func TestACP_AddPolicy_ExtraDuplicatePermissions_Error(t *testing.T) { Identity: actor1Identity, Policy: ` + name: a policy description: a policy resources: diff --git a/tests/integration/acp/add_policy/with_extra_relations_test.go b/tests/integration/acp/add_policy/with_extra_relations_test.go index acfb848247..57d9e7ca2d 100644 --- a/tests/integration/acp/add_policy/with_extra_relations_test.go +++ b/tests/integration/acp/add_policy/with_extra_relations_test.go @@ -26,6 +26,7 @@ func TestACP_AddPolicy_ExtraRelations_ValidPolicyID(t *testing.T) { Identity: actor1Identity, Policy: ` + name: a policy description: a policy actor: @@ -51,7 +52,7 @@ func TestACP_AddPolicy_ExtraRelations_ValidPolicyID(t *testing.T) { - actor `, - ExpectedPolicyID: "922636974ecbc3c335143e45917832f219dfe4a168a523e7314616b94e7f9ebc", + ExpectedPolicyID: "bdfe4d449b8a42b1daf247e37b5a62ee139ff5b79fa15a970d5d7ae32c08d1e5", }, }, } @@ -69,6 +70,7 @@ func TestACP_AddPolicy_ExtraDuplicateRelations_Error(t *testing.T) { Identity: actor1Identity, Policy: ` + name: a policy description: a policy actor: diff --git a/tests/integration/acp/add_policy/with_invalid_creator_arg_test.go b/tests/integration/acp/add_policy/with_invalid_creator_arg_test.go index 68cad2b361..11a62528fc 100644 --- a/tests/integration/acp/add_policy/with_invalid_creator_arg_test.go +++ b/tests/integration/acp/add_policy/with_invalid_creator_arg_test.go @@ -31,9 +31,10 @@ func TestACP_AddPolicy_InvalidCreatorIdentityWithValidPolicy_Error(t *testing.T) Actions: []any{ testUtils.AddPolicy{ - Identity: immutable.Some(acpIdentity.Identity{Address: "invalid"}), + Identity: immutable.Some(acpIdentity.Identity{DID: "invalid"}), Policy: ` + name: a policy description: a basic policy that satisfies minimum DPI requirements actor: @@ -54,13 +55,12 @@ func TestACP_AddPolicy_InvalidCreatorIdentityWithValidPolicy_Error(t *testing.T) `, - ExpectedError: "policy creator can not be empty", + ExpectedError: "invalid actor ID", }, }, } - //TODO-ACP: https://github.com/sourcenetwork/defradb/issues/2357 - testUtils.AssertPanic(t, func() { testUtils.ExecuteTestCase(t, test) }) + testUtils.ExecuteTestCase(t, test) } func TestACP_AddPolicy_InvalidCreatorIdentityWithEmptyPolicy_Error(t *testing.T) { @@ -75,7 +75,7 @@ func TestACP_AddPolicy_InvalidCreatorIdentityWithEmptyPolicy_Error(t *testing.T) Actions: []any{ testUtils.AddPolicy{ - Identity: immutable.Some(acpIdentity.Identity{Address: "invalid"}), + Identity: immutable.Some(acpIdentity.Identity{DID: "invalid"}), Policy: "", diff --git a/tests/integration/acp/add_policy/with_invalid_relations_test.go b/tests/integration/acp/add_policy/with_invalid_relations_test.go index 37945509a5..2ccef61b5b 100644 --- a/tests/integration/acp/add_policy/with_invalid_relations_test.go +++ b/tests/integration/acp/add_policy/with_invalid_relations_test.go @@ -26,6 +26,7 @@ func TestACP_AddPolicy_NoRelations_Error(t *testing.T) { Identity: actor1Identity, Policy: ` + name: a policy description: a policy actor: @@ -60,6 +61,7 @@ func TestACP_AddPolicy_NoRelationsLabel_Error(t *testing.T) { Identity: actor1Identity, Policy: ` + name: a policy description: a policy actor: diff --git a/tests/integration/acp/add_policy/with_invalid_required_relation_test.go b/tests/integration/acp/add_policy/with_invalid_required_relation_test.go index d8982703cc..09b2b20a15 100644 --- a/tests/integration/acp/add_policy/with_invalid_required_relation_test.go +++ b/tests/integration/acp/add_policy/with_invalid_required_relation_test.go @@ -26,6 +26,7 @@ func TestACP_AddPolicy_MissingRequiredOwnerRelation_Error(t *testing.T) { Identity: actor1Identity, Policy: ` + name: a policy description: a policy actor: @@ -63,6 +64,7 @@ func TestACP_AddPolicy_DuplicateOwnerRelation_Error(t *testing.T) { Identity: actor1Identity, Policy: ` + name: a policy description: a policy resources: diff --git a/tests/integration/acp/add_policy/with_invalid_resource_test.go b/tests/integration/acp/add_policy/with_invalid_resource_test.go index 2fc311102d..4eb25c8cb1 100644 --- a/tests/integration/acp/add_policy/with_invalid_resource_test.go +++ b/tests/integration/acp/add_policy/with_invalid_resource_test.go @@ -26,6 +26,7 @@ func TestACP_AddPolicy_OneResourceThatIsEmpty_Error(t *testing.T) { Identity: actor1Identity, Policy: ` + name: a policy description: a policy actor: diff --git a/tests/integration/acp/add_policy/with_managed_relation_test.go b/tests/integration/acp/add_policy/with_managed_relation_test.go index c3bff2c8fd..c925d6aa82 100644 --- a/tests/integration/acp/add_policy/with_managed_relation_test.go +++ b/tests/integration/acp/add_policy/with_managed_relation_test.go @@ -25,6 +25,7 @@ func TestACP_AddPolicy_WithRelationManagingOtherRelation_ValidPolicyID(t *testin Identity: actor1Identity, Policy: ` + name: a policy description: a policy with admin relation managing reader relation actor: @@ -52,7 +53,7 @@ func TestACP_AddPolicy_WithRelationManagingOtherRelation_ValidPolicyID(t *testin - actor `, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "725caeee6c5043f019e7dc3bb6a627520c235f063e862696c67ac89b5c1fc840", }, }, } diff --git a/tests/integration/acp/add_policy/with_multi_policies_test.go b/tests/integration/acp/add_policy/with_multi_policies_test.go index 008f7969e9..86f8ba963b 100644 --- a/tests/integration/acp/add_policy/with_multi_policies_test.go +++ b/tests/integration/acp/add_policy/with_multi_policies_test.go @@ -26,6 +26,7 @@ func TestACP_AddPolicy_AddMultipleDifferentPolicies_ValidPolicyIDs(t *testing.T) Identity: actor1Identity, Policy: ` + name: a policy description: a policy actor: @@ -46,13 +47,14 @@ func TestACP_AddPolicy_AddMultipleDifferentPolicies_ValidPolicyIDs(t *testing.T) `, - ExpectedPolicyID: "aa664afaf8dff947ba85f4d464662d595af6c1e2466bd11fd6b82ea95b547ea3", + ExpectedPolicyID: "2eb8b503c9fc0b7c1f7b04d68a6faa0f82a299db0ae02fed68f9897612439cb6", }, testUtils.AddPolicy{ Identity: actor1Identity, Policy: ` + name: a policy description: another policy actor: @@ -80,7 +82,7 @@ func TestACP_AddPolicy_AddMultipleDifferentPolicies_ValidPolicyIDs(t *testing.T) - actor `, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "6b766a9aafabf0bf65102f73b7cd81963b65e1fd87ce763f386cc685147325ee", }, }, } @@ -99,6 +101,7 @@ func TestACP_AddPolicy_AddMultipleDifferentPoliciesInDifferentFmts_ValidPolicyID Policy: ` { + "name": "test", "description": "a policy", "actor": { "name": "actor" @@ -125,13 +128,14 @@ func TestACP_AddPolicy_AddMultipleDifferentPoliciesInDifferentFmts_ValidPolicyID } `, - ExpectedPolicyID: "aa664afaf8dff947ba85f4d464662d595af6c1e2466bd11fd6b82ea95b547ea3", + ExpectedPolicyID: "66f3e364004a181e9b129f65dea317322d2285226e926d7e8cdfd644954e4262", }, testUtils.AddPolicy{ Identity: actor1Identity, Policy: ` + name: test2 description: another policy actor: @@ -159,7 +163,7 @@ func TestACP_AddPolicy_AddMultipleDifferentPoliciesInDifferentFmts_ValidPolicyID - actor `, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "757c772e9c4418de530ecd72cbc56dfc4e0c22aa2f3b2d219afa7663b2f0af00", }, }, } @@ -169,6 +173,7 @@ func TestACP_AddPolicy_AddMultipleDifferentPoliciesInDifferentFmts_ValidPolicyID func TestACP_AddPolicy_AddDuplicatePolicyByOtherCreator_ValidPolicyIDs(t *testing.T) { const policyUsedByBoth string = ` + name: test description: a policy actor: @@ -198,7 +203,7 @@ func TestACP_AddPolicy_AddDuplicatePolicyByOtherCreator_ValidPolicyIDs(t *testin Policy: policyUsedByBoth, - ExpectedPolicyID: "aa664afaf8dff947ba85f4d464662d595af6c1e2466bd11fd6b82ea95b547ea3", + ExpectedPolicyID: "66f3e364004a181e9b129f65dea317322d2285226e926d7e8cdfd644954e4262", }, testUtils.AddPolicy{ @@ -206,7 +211,7 @@ func TestACP_AddPolicy_AddDuplicatePolicyByOtherCreator_ValidPolicyIDs(t *testin Policy: policyUsedByBoth, - ExpectedPolicyID: "5cff96a89799f7974906138fb794f670d35ac5df9985621da44f9f3529af1c0b", + ExpectedPolicyID: "ec02815cb630850678bda5e2d75cfacebc96f5610e32a602f7bfc414e21474ad", }, }, } @@ -224,6 +229,7 @@ func TestACP_AddPolicy_AddMultipleDuplicatePolicies_Error(t *testing.T) { Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -244,13 +250,14 @@ func TestACP_AddPolicy_AddMultipleDuplicatePolicies_Error(t *testing.T) { `, - ExpectedPolicyID: "aa664afaf8dff947ba85f4d464662d595af6c1e2466bd11fd6b82ea95b547ea3", + ExpectedPolicyID: "66f3e364004a181e9b129f65dea317322d2285226e926d7e8cdfd644954e4262", }, testUtils.AddPolicy{ Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -271,7 +278,7 @@ func TestACP_AddPolicy_AddMultipleDuplicatePolicies_Error(t *testing.T) { `, - ExpectedError: "policy aa664afaf8dff947ba85f4d464662d595af6c1e2466bd11fd6b82ea95b547ea3: policy exists", + ExpectedPolicyID: "ec02815cb630850678bda5e2d75cfacebc96f5610e32a602f7bfc414e21474ad", }, }, } @@ -279,7 +286,7 @@ func TestACP_AddPolicy_AddMultipleDuplicatePolicies_Error(t *testing.T) { testUtils.ExecuteTestCase(t, test) } -func TestACP_AddPolicy_AddMultipleDuplicatePoliciesDifferentFmts_Error(t *testing.T) { +func TestACP_AddPolicy_AddMultipleDuplicatePoliciesDifferentFmts_ProducesDifferentIDs(t *testing.T) { test := testUtils.TestCase{ Description: "Test acp, add duplicate policies different formats, error", @@ -289,6 +296,7 @@ func TestACP_AddPolicy_AddMultipleDuplicatePoliciesDifferentFmts_Error(t *testin Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -308,7 +316,7 @@ func TestACP_AddPolicy_AddMultipleDuplicatePoliciesDifferentFmts_Error(t *testin - actor `, - ExpectedPolicyID: "aa664afaf8dff947ba85f4d464662d595af6c1e2466bd11fd6b82ea95b547ea3", + ExpectedPolicyID: "66f3e364004a181e9b129f65dea317322d2285226e926d7e8cdfd644954e4262", }, testUtils.AddPolicy{ @@ -316,6 +324,7 @@ func TestACP_AddPolicy_AddMultipleDuplicatePoliciesDifferentFmts_Error(t *testin Policy: ` { + "name": "test", "description": "a policy", "actor": { "name": "actor" @@ -342,7 +351,7 @@ func TestACP_AddPolicy_AddMultipleDuplicatePoliciesDifferentFmts_Error(t *testin } `, - ExpectedError: "policy aa664afaf8dff947ba85f4d464662d595af6c1e2466bd11fd6b82ea95b547ea3: policy exists", + ExpectedPolicyID: "ec02815cb630850678bda5e2d75cfacebc96f5610e32a602f7bfc414e21474ad", }, }, } diff --git a/tests/integration/acp/add_policy/with_multiple_resources_test.go b/tests/integration/acp/add_policy/with_multiple_resources_test.go index c7ae18f7ed..dcabf691dd 100644 --- a/tests/integration/acp/add_policy/with_multiple_resources_test.go +++ b/tests/integration/acp/add_policy/with_multiple_resources_test.go @@ -26,6 +26,7 @@ func TestACP_AddPolicy_MultipleResources_ValidID(t *testing.T) { Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -62,7 +63,7 @@ func TestACP_AddPolicy_MultipleResources_ValidID(t *testing.T) { - actor `, - ExpectedPolicyID: "390239e42550ea5945b9185576b79694f7000a7ce3b301d60afe35572c958cd7", + ExpectedPolicyID: "a9e1a113ccc2609d7f99a42531017f0fbc9b736640ec8ffc7f09a1e29583ca45", }, }, } @@ -80,6 +81,7 @@ func TestACP_AddPolicy_MultipleResourcesUsingRelationDefinedInOther_Error(t *tes Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -131,6 +133,7 @@ func TestACP_AddPolicy_SecondResourcesMissingRequiredOwner_Error(t *testing.T) { Identity: actor1Identity, Policy: ` + name: test description: a policy actor: diff --git a/tests/integration/acp/add_policy/with_no_perms_test.go b/tests/integration/acp/add_policy/with_no_perms_test.go index b3b7faa307..0fc3d91562 100644 --- a/tests/integration/acp/add_policy/with_no_perms_test.go +++ b/tests/integration/acp/add_policy/with_no_perms_test.go @@ -34,6 +34,7 @@ func TestACP_AddPolicy_NoPermissionsOnlyOwner_ValidID(t *testing.T) { Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -50,7 +51,7 @@ func TestACP_AddPolicy_NoPermissionsOnlyOwner_ValidID(t *testing.T) { `, - ExpectedPolicyID: "39b436f0c28e7ce5ed7e1c592bb578590d62ccfacef0df565ac97520c880c017", + ExpectedPolicyID: "db6e4d3efc58f8a7bfe8f35c73c39630f1ad3e6fad7ffeb22563d1284bd176dc", }, }, } @@ -68,6 +69,7 @@ func TestACP_AddPolicy_NoPermissionsMultiRelations_ValidID(t *testing.T) { Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -87,7 +89,7 @@ func TestACP_AddPolicy_NoPermissionsMultiRelations_ValidID(t *testing.T) { `, - ExpectedPolicyID: "07da6260811df769d551e89e02364b3e939cb585696c1a69b626bb8ecdd378f9", + ExpectedPolicyID: "106a38bfb702608e26feda961d9fffd74141ef34eccc17b3de2c15dd7620da46", }, }, } @@ -105,6 +107,7 @@ func TestACP_AddPolicy_NoPermissionsLabelOnlyOwner_ValidID(t *testing.T) { Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -119,7 +122,7 @@ func TestACP_AddPolicy_NoPermissionsLabelOnlyOwner_ValidID(t *testing.T) { `, - ExpectedPolicyID: "39b436f0c28e7ce5ed7e1c592bb578590d62ccfacef0df565ac97520c880c017", + ExpectedPolicyID: "db6e4d3efc58f8a7bfe8f35c73c39630f1ad3e6fad7ffeb22563d1284bd176dc", }, }, } @@ -137,6 +140,7 @@ func TestACP_AddPolicy_NoPermissionsLabelMultiRelations_ValidID(t *testing.T) { Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -154,7 +158,7 @@ func TestACP_AddPolicy_NoPermissionsLabelMultiRelations_ValidID(t *testing.T) { `, - ExpectedPolicyID: "07da6260811df769d551e89e02364b3e939cb585696c1a69b626bb8ecdd378f9", + ExpectedPolicyID: "106a38bfb702608e26feda961d9fffd74141ef34eccc17b3de2c15dd7620da46", }, }, } diff --git a/tests/integration/acp/add_policy/with_no_resources_test.go b/tests/integration/acp/add_policy/with_no_resources_test.go index 8bd6e5268d..b297ce8b3b 100644 --- a/tests/integration/acp/add_policy/with_no_resources_test.go +++ b/tests/integration/acp/add_policy/with_no_resources_test.go @@ -28,6 +28,7 @@ func TestACP_AddPolicy_NoResource_ValidID(t *testing.T) { Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -36,7 +37,7 @@ func TestACP_AddPolicy_NoResource_ValidID(t *testing.T) { resources: `, - ExpectedPolicyID: "e16824022121b55f2b2babbd2ab82960a8837767197e20acf9c577cbb4539991", + ExpectedPolicyID: "e3ffe8e802e4612dc41d7a638cd77dc16d51eb1db0d18682eec75b05234e6ee2", }, }, } @@ -56,13 +57,14 @@ func TestACP_AddPolicy_NoResourceLabel_ValidID(t *testing.T) { Identity: actor1Identity, Policy: ` + name: test description: a policy actor: name: actor `, - ExpectedPolicyID: "e16824022121b55f2b2babbd2ab82960a8837767197e20acf9c577cbb4539991", + ExpectedPolicyID: "e3ffe8e802e4612dc41d7a638cd77dc16d51eb1db0d18682eec75b05234e6ee2", }, }, } @@ -70,9 +72,8 @@ func TestACP_AddPolicy_NoResourceLabel_ValidID(t *testing.T) { testUtils.ExecuteTestCase(t, test) } -// Eventhough empty resources make no sense from a DefraDB (DPI) perspective, -// it is still a valid sourcehub policy for now. -func TestACP_AddPolicy_PolicyWithOnlySpace_ValidID(t *testing.T) { +// A Policy can have no resources (incompatible with DPI) but it needs a name. +func TestACP_AddPolicy_PolicyWithOnlySpace_NameIsRequired(t *testing.T) { test := testUtils.TestCase{ Description: "Test acp, adding a policy that has only space", @@ -83,7 +84,7 @@ func TestACP_AddPolicy_PolicyWithOnlySpace_ValidID(t *testing.T) { Policy: " ", - ExpectedPolicyID: "e16824022121b55f2b2babbd2ab82960a8837767197e20acf9c577cbb4539991", + ExpectedError: "name is required", }, }, } diff --git a/tests/integration/acp/add_policy/with_perm_expr_test.go b/tests/integration/acp/add_policy/with_perm_expr_test.go index fd31e30840..eeaf2ae69c 100644 --- a/tests/integration/acp/add_policy/with_perm_expr_test.go +++ b/tests/integration/acp/add_policy/with_perm_expr_test.go @@ -26,6 +26,7 @@ func TestACP_AddPolicy_PermissionExprWithOwnerInTheEndWithMinus_ValidID(t *testi Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -48,7 +49,7 @@ func TestACP_AddPolicy_PermissionExprWithOwnerInTheEndWithMinus_ValidID(t *testi - actor `, - ExpectedPolicyID: "fcb989d8bad149e3c4b22f8a69969760187b29ea1c796a3f9d2e16e32f493590", + ExpectedPolicyID: "2b10641de73790b95452f496a37ad53a8d8a0703803f35f6961457af912947c0", }, }, } @@ -67,6 +68,7 @@ func TestACP_AddPolicy_PermissionExprWithOwnerInTheEndWithMinusNoSpace_ValidID(t Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -89,7 +91,7 @@ func TestACP_AddPolicy_PermissionExprWithOwnerInTheEndWithMinusNoSpace_ValidID(t - actor `, - ExpectedPolicyID: "50d8fbaf70a08c2c0e2bf0355a353a8bb06cc4d6e2f3ddbf71d91f9ef5aa49af", + ExpectedPolicyID: "b6b305214247a08903e01466a1bfd01516206458d2725506797300b285e63690", }, }, } diff --git a/tests/integration/acp/add_policy/with_perm_invalid_expr_test.go b/tests/integration/acp/add_policy/with_perm_invalid_expr_test.go index 7c5033d700..869c32c573 100644 --- a/tests/integration/acp/add_policy/with_perm_invalid_expr_test.go +++ b/tests/integration/acp/add_policy/with_perm_invalid_expr_test.go @@ -26,6 +26,7 @@ func TestACP_AddPolicy_EmptyExpressionInPermission_Error(t *testing.T) { Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -66,6 +67,7 @@ func TestACP_AddPolicy_PermissionExprWithOwnerInTheEndWithInocorrectSymbol_Error Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -106,6 +108,7 @@ func TestACP_AddPolicy_PermissionExprWithOwnerInTheEndWithInocorrectSymbolNoSpac Identity: actor1Identity, Policy: ` + name: test description: a policy actor: diff --git a/tests/integration/acp/add_policy/with_permissionless_owner_test.go b/tests/integration/acp/add_policy/with_permissionless_owner_test.go index c6ada1c121..5fd70f23c5 100644 --- a/tests/integration/acp/add_policy/with_permissionless_owner_test.go +++ b/tests/integration/acp/add_policy/with_permissionless_owner_test.go @@ -33,6 +33,7 @@ func TestACP_AddPolicy_PermissionlessOwnerWrite_ValidID(t *testing.T) { Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -55,7 +56,7 @@ func TestACP_AddPolicy_PermissionlessOwnerWrite_ValidID(t *testing.T) { - actor `, - ExpectedPolicyID: "f7e7b84108ba67bcdeb211ff740eee13b2b6770106dcf0d0436a3a33d8a2f9f0", + ExpectedPolicyID: "9328e41c1969c6269bfd82162b45831ccec8df9fc8d57902620ad43baaa0d77d", }, }, } @@ -73,6 +74,7 @@ func TestACP_AddPolicy_PermissionlessOwnerRead_ValidID(t *testing.T) { Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -95,7 +97,7 @@ func TestACP_AddPolicy_PermissionlessOwnerRead_ValidID(t *testing.T) { - actor `, - ExpectedPolicyID: "22c3eee3b3d216c01244a47a6aa241a08b767b3ef0a9edfbd30b3575a6bd94f4", + ExpectedPolicyID: "74f3c0996d5b1669b9efda5ef45f93a925df9f770e2dcd53f352b5f0693a2b0f", }, }, } @@ -113,6 +115,7 @@ func TestACP_AddPolicy_PermissionlessOwnerReadWrite_ValidID(t *testing.T) { Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -135,7 +138,7 @@ func TestACP_AddPolicy_PermissionlessOwnerReadWrite_ValidID(t *testing.T) { - actor `, - ExpectedPolicyID: "f7e7b84108ba67bcdeb211ff740eee13b2b6770106dcf0d0436a3a33d8a2f9f0", + ExpectedPolicyID: "9328e41c1969c6269bfd82162b45831ccec8df9fc8d57902620ad43baaa0d77d", }, }, } diff --git a/tests/integration/acp/add_policy/with_unused_relations_test.go b/tests/integration/acp/add_policy/with_unused_relations_test.go index 27149ede0c..241720e36e 100644 --- a/tests/integration/acp/add_policy/with_unused_relations_test.go +++ b/tests/integration/acp/add_policy/with_unused_relations_test.go @@ -26,6 +26,7 @@ func TestACP_AddPolicy_UnusedRelation_ValidID(t *testing.T) { Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -49,7 +50,7 @@ func TestACP_AddPolicy_UnusedRelation_ValidID(t *testing.T) { `, - ExpectedPolicyID: "a6d42bfedff5db1feca0313793e4f9540851e3feaefffaebc98a1ee5bb140e45", + ExpectedPolicyID: "ab1102f656ed1f2b037a3c9586611c701243f2086fa64211bd7baff7271c2030", }, }, } diff --git a/tests/integration/acp/fixture.go b/tests/integration/acp/fixture.go index c2a0c8f74b..e1aed354b4 100644 --- a/tests/integration/acp/fixture.go +++ b/tests/integration/acp/fixture.go @@ -31,5 +31,9 @@ func MustParseIdentity(privateKeyHex string) immutable.Option[acpIdentity.Identi panic(err) } privateKey := secp256k1.PrivKeyFromBytes(privateKeyBytes) - return acpIdentity.FromPrivateKey(privateKey) + identity, err := acpIdentity.FromPrivateKey(privateKey) + if err != nil { + panic(err) + } + return identity } diff --git a/tests/integration/acp/index/create_test.go b/tests/integration/acp/index/create_test.go index 8d6fc4de3f..61364a0c70 100644 --- a/tests/integration/acp/index/create_test.go +++ b/tests/integration/acp/index/create_test.go @@ -25,13 +25,13 @@ func TestACP_IndexCreateWithSeparateRequest_OnCollectionWithPolicy_NoError(t *te testUtils.AddPolicy{ Identity: acpUtils.Actor1Identity, Policy: userPolicy, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + id: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", resource: "users" ) { name: String @@ -71,13 +71,13 @@ func TestACP_IndexCreateWithDirective_OnCollectionWithPolicy_NoError(t *testing. testUtils.AddPolicy{ Identity: acpUtils.Actor1Identity, Policy: userPolicy, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + id: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", resource: "users" ) { name: String @index diff --git a/tests/integration/acp/index/fixture.go b/tests/integration/acp/index/fixture.go index 0f0af7ba1c..652fa4d90d 100644 --- a/tests/integration/acp/index/fixture.go +++ b/tests/integration/acp/index/fixture.go @@ -10,8 +10,9 @@ package test_acp_index -// policy id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd" +// policy id: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454" const userPolicy = ` +name: test description: a test policy which marks a collection in a database as a resource actor: @@ -39,8 +40,9 @@ resources: - actor ` -// policy id: "e3c35f345c844e8c0144d793933ea7287af1930d36e9d7d98e8d930fb9815a4a" +// policy id: "f6927e8861f91122a5e3e333249297e4315b672298b5cb93ee3f49facc1e0d11" const bookAuthorPolicy = ` +name: test description: a test policy which marks a collection in a database as a resource actor: diff --git a/tests/integration/acp/index/query_test.go b/tests/integration/acp/index/query_test.go index 6ce7fdc1a2..30f0e137f9 100644 --- a/tests/integration/acp/index/query_test.go +++ b/tests/integration/acp/index/query_test.go @@ -24,12 +24,12 @@ func TestACPWithIndex_UponQueryingPrivateDocWithoutIdentity_ShouldNotFetch(t *te testUtils.AddPolicy{ Identity: acpUtils.Actor1Identity, Policy: userPolicy, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + id: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", resource: "users" ) { name: String @index @@ -78,12 +78,12 @@ func TestACPWithIndex_UponQueryingPrivateDocWithIdentity_ShouldFetch(t *testing. testUtils.AddPolicy{ Identity: acpUtils.Actor1Identity, Policy: userPolicy, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + id: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", resource: "users" ) { name: String @index @@ -136,12 +136,12 @@ func TestACPWithIndex_UponQueryingPrivateDocWithWrongIdentity_ShouldNotFetch(t * testUtils.AddPolicy{ Identity: acpUtils.Actor1Identity, Policy: userPolicy, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + id: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", resource: "users" ) { name: String @index diff --git a/tests/integration/acp/index/query_with_relation_test.go b/tests/integration/acp/index/query_with_relation_test.go index 19229b3f67..c68b742dad 100644 --- a/tests/integration/acp/index/query_with_relation_test.go +++ b/tests/integration/acp/index/query_with_relation_test.go @@ -22,12 +22,12 @@ func createAuthorBooksSchemaWithPolicyAndCreateDocs() []any { testUtils.AddPolicy{ Identity: acpUtils.Actor1Identity, Policy: bookAuthorPolicy, - ExpectedPolicyID: "e3c35f345c844e8c0144d793933ea7287af1930d36e9d7d98e8d930fb9815a4a", + ExpectedPolicyID: "f6927e8861f91122a5e3e333249297e4315b672298b5cb93ee3f49facc1e0d11", }, testUtils.SchemaUpdate{ Schema: ` type Author @policy( - id: "e3c35f345c844e8c0144d793933ea7287af1930d36e9d7d98e8d930fb9815a4a", + id: "f6927e8861f91122a5e3e333249297e4315b672298b5cb93ee3f49facc1e0d11", resource: "author" ) { name: String @@ -37,7 +37,7 @@ func createAuthorBooksSchemaWithPolicyAndCreateDocs() []any { } type Book @policy( - id: "e3c35f345c844e8c0144d793933ea7287af1930d36e9d7d98e8d930fb9815a4a", + id: "f6927e8861f91122a5e3e333249297e4315b672298b5cb93ee3f49facc1e0d11", resource: "author" ) { name: String diff --git a/tests/integration/acp/p2p/replicator_test.go b/tests/integration/acp/p2p/replicator_test.go index ace55f6a06..08aa075508 100644 --- a/tests/integration/acp/p2p/replicator_test.go +++ b/tests/integration/acp/p2p/replicator_test.go @@ -35,6 +35,7 @@ func TestACP_P2POneToOneReplicatorWithPermissionedCollection_Error(t *testing.T) Identity: acpUtils.Actor1Identity, Policy: ` + name: test description: a test policy which marks a collection in a database as a resource actor: @@ -62,13 +63,13 @@ func TestACP_P2POneToOneReplicatorWithPermissionedCollection_Error(t *testing.T) - actor `, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + id: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", resource: "users" ) { name: String diff --git a/tests/integration/acp/p2p/subscribe_test.go b/tests/integration/acp/p2p/subscribe_test.go index c41039e380..523200e928 100644 --- a/tests/integration/acp/p2p/subscribe_test.go +++ b/tests/integration/acp/p2p/subscribe_test.go @@ -35,6 +35,7 @@ func TestACP_P2PSubscribeAddGetSingleWithPermissionedCollection_Error(t *testing Identity: acpUtils.Actor1Identity, Policy: ` + name: test description: a test policy which marks a collection in a database as a resource actor: @@ -62,13 +63,13 @@ func TestACP_P2PSubscribeAddGetSingleWithPermissionedCollection_Error(t *testing - actor `, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + id: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", resource: "users" ) { name: String diff --git a/tests/integration/acp/query/fixture.go b/tests/integration/acp/query/fixture.go index be521e700f..2d36f9fe1b 100644 --- a/tests/integration/acp/query/fixture.go +++ b/tests/integration/acp/query/fixture.go @@ -16,6 +16,7 @@ import ( ) const employeeCompanyPolicy = ` +name: test description: A Valid DefraDB Policy Interface (DPI) actor: @@ -58,13 +59,13 @@ func getSetupEmployeeCompanyActions() []any { testUtils.AddPolicy{ Identity: acpUtils.Actor1Identity, Policy: employeeCompanyPolicy, - ExpectedPolicyID: "6f11799717723307077147736fddccd8a7b5e68d2ec22e2155f0186e0c43a2e2", + ExpectedPolicyID: "9d6c19007a894746c3f45f7fe45513a88a20ad77637948228869546197bb1b05", }, testUtils.SchemaUpdate{ Schema: ` type Employee @policy( - id: "6f11799717723307077147736fddccd8a7b5e68d2ec22e2155f0186e0c43a2e2", + id: "9d6c19007a894746c3f45f7fe45513a88a20ad77637948228869546197bb1b05", resource: "employees" ) { name: String @@ -73,7 +74,7 @@ func getSetupEmployeeCompanyActions() []any { } type Company @policy( - id: "6f11799717723307077147736fddccd8a7b5e68d2ec22e2155f0186e0c43a2e2", + id: "9d6c19007a894746c3f45f7fe45513a88a20ad77637948228869546197bb1b05", resource: "companies" ) { name: String diff --git a/tests/integration/acp/register_and_delete_test.go b/tests/integration/acp/register_and_delete_test.go index 1392c3719a..910729d28e 100644 --- a/tests/integration/acp/register_and_delete_test.go +++ b/tests/integration/acp/register_and_delete_test.go @@ -31,6 +31,7 @@ func TestACP_CreateWithoutIdentityAndDeleteWithoutIdentity_CanDelete(t *testing. Identity: Actor1Identity, Policy: ` + name: test description: a test policy which marks a collection in a database as a resource actor: @@ -58,13 +59,13 @@ func TestACP_CreateWithoutIdentityAndDeleteWithoutIdentity_CanDelete(t *testing. - actor `, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + id: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", resource: "users" ) { name: String @@ -124,6 +125,7 @@ func TestACP_CreateWithoutIdentityAndDeleteWithIdentity_CanDelete(t *testing.T) Identity: Actor1Identity, Policy: ` + name: test description: a test policy which marks a collection in a database as a resource actor: @@ -151,13 +153,13 @@ func TestACP_CreateWithoutIdentityAndDeleteWithIdentity_CanDelete(t *testing.T) - actor `, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + id: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", resource: "users" ) { name: String @@ -218,6 +220,7 @@ func TestACP_CreateWithIdentityAndDeleteWithIdentity_CanDelete(t *testing.T) { Identity: OwnerIdentity, Policy: ` + name: test description: a test policy which marks a collection in a database as a resource actor: @@ -245,13 +248,13 @@ func TestACP_CreateWithIdentityAndDeleteWithIdentity_CanDelete(t *testing.T) { - actor `, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + id: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", resource: "users" ) { name: String @@ -316,6 +319,7 @@ func TestACP_CreateWithIdentityAndDeleteWithoutIdentity_CanNotDelete(t *testing. Identity: OwnerIdentity, Policy: ` + name: test description: a test policy which marks a collection in a database as a resource actor: @@ -343,13 +347,13 @@ func TestACP_CreateWithIdentityAndDeleteWithoutIdentity_CanNotDelete(t *testing. - actor `, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + id: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", resource: "users" ) { name: String @@ -422,6 +426,7 @@ func TestACP_CreateWithIdentityAndDeleteWithWrongIdentity_CanNotDelete(t *testin Identity: OwnerIdentity, Policy: ` + name: test description: a test policy which marks a collection in a database as a resource actor: @@ -449,13 +454,13 @@ func TestACP_CreateWithIdentityAndDeleteWithWrongIdentity_CanNotDelete(t *testin - actor `, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + id: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", resource: "users" ) { name: String diff --git a/tests/integration/acp/register_and_read_test.go b/tests/integration/acp/register_and_read_test.go index 280d85637e..a60cd130b2 100644 --- a/tests/integration/acp/register_and_read_test.go +++ b/tests/integration/acp/register_and_read_test.go @@ -27,6 +27,7 @@ func TestACP_CreateWithoutIdentityAndReadWithoutIdentity_CanRead(t *testing.T) { Identity: Actor1Identity, Policy: ` + name: test description: a test policy which marks a collection in a database as a resource actor: @@ -54,13 +55,13 @@ func TestACP_CreateWithoutIdentityAndReadWithoutIdentity_CanRead(t *testing.T) { - actor `, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + id: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", resource: "users" ) { name: String @@ -115,6 +116,7 @@ func TestACP_CreateWithoutIdentityAndReadWithIdentity_CanRead(t *testing.T) { Identity: Actor1Identity, Policy: ` + name: test description: a test policy which marks a collection in a database as a resource actor: @@ -142,13 +144,13 @@ func TestACP_CreateWithoutIdentityAndReadWithIdentity_CanRead(t *testing.T) { - actor `, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + id: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", resource: "users" ) { name: String @@ -205,6 +207,7 @@ func TestACP_CreateWithIdentityAndReadWithIdentity_CanRead(t *testing.T) { Identity: Actor1Identity, Policy: ` + name: test description: a test policy which marks a collection in a database as a resource actor: @@ -232,13 +235,13 @@ func TestACP_CreateWithIdentityAndReadWithIdentity_CanRead(t *testing.T) { - actor `, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + id: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", resource: "users" ) { name: String @@ -297,6 +300,7 @@ func TestACP_CreateWithIdentityAndReadWithoutIdentity_CanNotRead(t *testing.T) { Identity: Actor1Identity, Policy: ` + name: test description: a test policy which marks a collection in a database as a resource actor: @@ -324,13 +328,13 @@ func TestACP_CreateWithIdentityAndReadWithoutIdentity_CanNotRead(t *testing.T) { - actor `, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + id: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", resource: "users" ) { name: String @@ -381,6 +385,7 @@ func TestACP_CreateWithIdentityAndReadWithWrongIdentity_CanNotRead(t *testing.T) Identity: Actor1Identity, Policy: ` + name: test description: a test policy which marks a collection in a database as a resource actor: @@ -408,13 +413,13 @@ func TestACP_CreateWithIdentityAndReadWithWrongIdentity_CanNotRead(t *testing.T) - actor `, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + id: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", resource: "users" ) { name: String diff --git a/tests/integration/acp/register_and_update_test.go b/tests/integration/acp/register_and_update_test.go index e9ebb52e27..9394c5f012 100644 --- a/tests/integration/acp/register_and_update_test.go +++ b/tests/integration/acp/register_and_update_test.go @@ -33,6 +33,7 @@ func TestACP_CreateWithoutIdentityAndUpdateWithoutIdentity_CanUpdate(t *testing. Identity: Actor1Identity, Policy: ` + name: test description: a test policy which marks a collection in a database as a resource actor: @@ -60,13 +61,13 @@ func TestACP_CreateWithoutIdentityAndUpdateWithoutIdentity_CanUpdate(t *testing. - actor `, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + id: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", resource: "users" ) { name: String @@ -138,6 +139,7 @@ func TestACP_CreateWithoutIdentityAndUpdateWithIdentity_CanUpdate(t *testing.T) Identity: Actor1Identity, Policy: ` + name: test description: a test policy which marks a collection in a database as a resource actor: @@ -165,13 +167,13 @@ func TestACP_CreateWithoutIdentityAndUpdateWithIdentity_CanUpdate(t *testing.T) - actor `, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + id: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", resource: "users" ) { name: String @@ -244,6 +246,7 @@ func TestACP_CreateWithIdentityAndUpdateWithIdentity_CanUpdate(t *testing.T) { Identity: OwnerIdentity, Policy: ` + name: test description: a test policy which marks a collection in a database as a resource actor: @@ -271,13 +274,13 @@ func TestACP_CreateWithIdentityAndUpdateWithIdentity_CanUpdate(t *testing.T) { - actor `, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + id: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", resource: "users" ) { name: String @@ -360,6 +363,7 @@ func TestACP_CreateWithIdentityAndUpdateWithoutIdentity_CanNotUpdate(t *testing. Identity: OwnerIdentity, Policy: ` + name: test description: a test policy which marks a collection in a database as a resource actor: @@ -387,13 +391,13 @@ func TestACP_CreateWithIdentityAndUpdateWithoutIdentity_CanNotUpdate(t *testing. - actor `, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + id: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", resource: "users" ) { name: String @@ -478,6 +482,7 @@ func TestACP_CreateWithIdentityAndUpdateWithWrongIdentity_CanNotUpdate(t *testin Identity: OwnerIdentity, Policy: ` + name: test description: a test policy which marks a collection in a database as a resource actor: @@ -505,13 +510,13 @@ func TestACP_CreateWithIdentityAndUpdateWithWrongIdentity_CanNotUpdate(t *testin - actor `, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + id: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", resource: "users" ) { name: String @@ -597,6 +602,7 @@ func TestACP_CreateWithIdentityAndUpdateWithoutIdentityGQL_CanNotUpdate(t *testi Identity: OwnerIdentity, Policy: ` + name: test description: a test policy which marks a collection in a database as a resource actor: @@ -624,13 +630,13 @@ func TestACP_CreateWithIdentityAndUpdateWithoutIdentityGQL_CanNotUpdate(t *testi - actor `, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + id: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", resource: "users" ) { name: String @@ -714,6 +720,7 @@ func TestACP_CreateWithIdentityAndUpdateWithWrongIdentityGQL_CanNotUpdate(t *tes Identity: OwnerIdentity, Policy: ` + name: test description: a test policy which marks a collection in a database as a resource actor: @@ -741,13 +748,13 @@ func TestACP_CreateWithIdentityAndUpdateWithWrongIdentityGQL_CanNotUpdate(t *tes - actor `, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + id: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", resource: "users" ) { name: String diff --git a/tests/integration/acp/schema/add_dpi/accept_basic_dpi_fmts_test.go b/tests/integration/acp/schema/add_dpi/accept_basic_dpi_fmts_test.go index a7e9c835f2..aed2682b65 100644 --- a/tests/integration/acp/schema/add_dpi/accept_basic_dpi_fmts_test.go +++ b/tests/integration/acp/schema/add_dpi/accept_basic_dpi_fmts_test.go @@ -19,7 +19,7 @@ import ( ) func TestACP_AddDPISchema_BasicYAML_SchemaAccepted(t *testing.T) { - policyIDOfValidDPI := "aa664afaf8dff947ba85f4d464662d595af6c1e2466bd11fd6b82ea95b547ea3" + policyIDOfValidDPI := "66f3e364004a181e9b129f65dea317322d2285226e926d7e8cdfd644954e4262" test := testUtils.TestCase{ @@ -31,6 +31,7 @@ func TestACP_AddDPISchema_BasicYAML_SchemaAccepted(t *testing.T) { Identity: actor1Identity, Policy: ` + name: test description: a basic policy that satisfies minimum DPI requirements actor: @@ -113,7 +114,7 @@ func TestACP_AddDPISchema_BasicYAML_SchemaAccepted(t *testing.T) { } func TestACP_AddDPISchema_BasicJSON_SchemaAccepted(t *testing.T) { - policyIDOfValidDPI := "aa664afaf8dff947ba85f4d464662d595af6c1e2466bd11fd6b82ea95b547ea3" + policyIDOfValidDPI := "66f3e364004a181e9b129f65dea317322d2285226e926d7e8cdfd644954e4262" test := testUtils.TestCase{ @@ -126,6 +127,7 @@ func TestACP_AddDPISchema_BasicJSON_SchemaAccepted(t *testing.T) { Policy: ` { + "name": "test", "description": "a basic policy that satisfies minimum DPI requirements", "resources": { "users": { diff --git a/tests/integration/acp/schema/add_dpi/accept_extra_permissions_on_dpi_test.go b/tests/integration/acp/schema/add_dpi/accept_extra_permissions_on_dpi_test.go index 275caf0864..a06e1e68f0 100644 --- a/tests/integration/acp/schema/add_dpi/accept_extra_permissions_on_dpi_test.go +++ b/tests/integration/acp/schema/add_dpi/accept_extra_permissions_on_dpi_test.go @@ -19,7 +19,7 @@ import ( ) func TestACP_AddDPISchema_WithExtraPermsHavingRequiredRelation_AcceptSchema(t *testing.T) { - policyIDOfValidDPI := "c137c80b1ad0fc52aa183c3b43dff62d1eefdd04cb0f49ca6a646b545843eece" + policyIDOfValidDPI := "c74076630d0b4efd8bfe6fd8ee91256de10598a8a2b7a49dd8ddc9aab3f6973c" test := testUtils.TestCase{ @@ -32,6 +32,7 @@ func TestACP_AddDPISchema_WithExtraPermsHavingRequiredRelation_AcceptSchema(t *t Identity: actor1Identity, Policy: ` + name: test description: A Valid Defra Policy Interface (DPI) actor: @@ -118,7 +119,7 @@ func TestACP_AddDPISchema_WithExtraPermsHavingRequiredRelation_AcceptSchema(t *t } func TestACP_AddDPISchema_WithExtraPermsHavingRequiredRelationInTheEnd_AcceptSchema(t *testing.T) { - policyIDOfValidDPI := "053f118041543b324f127a57a19e29c26aa95af8fa732ded2cf80e8dd96fa2d3" + policyIDOfValidDPI := "6990d33c4bfddef663a37a0177b28922ae42a3d988987d72c355865a7afbe96f" test := testUtils.TestCase{ @@ -131,6 +132,7 @@ func TestACP_AddDPISchema_WithExtraPermsHavingRequiredRelationInTheEnd_AcceptSch Identity: actor1Identity, Policy: ` + name: test description: A Valid Defra Policy Interface (DPI) actor: @@ -217,7 +219,7 @@ func TestACP_AddDPISchema_WithExtraPermsHavingRequiredRelationInTheEnd_AcceptSch } func TestACP_AddDPISchema_WithExtraPermsHavingNoRequiredRelation_AcceptSchema(t *testing.T) { - policyIDOfValidDPI := "b1758de0d20726e53c9c343382af0f834ed6a10381f96399ce7c39fab607c349" + policyIDOfValidDPI := "05719e4bbd78a92c65758f2cb642bd58ad6d122d4aa3b8b2419bd307749f35bc" test := testUtils.TestCase{ @@ -230,6 +232,7 @@ func TestACP_AddDPISchema_WithExtraPermsHavingNoRequiredRelation_AcceptSchema(t Identity: actor1Identity, Policy: ` + name: test description: A Valid Defra Policy Interface (DPI) actor: diff --git a/tests/integration/acp/schema/add_dpi/accept_managed_relation_on_dpi_test.go b/tests/integration/acp/schema/add_dpi/accept_managed_relation_on_dpi_test.go index c7b168aea7..0d7f05394f 100644 --- a/tests/integration/acp/schema/add_dpi/accept_managed_relation_on_dpi_test.go +++ b/tests/integration/acp/schema/add_dpi/accept_managed_relation_on_dpi_test.go @@ -19,7 +19,7 @@ import ( ) func TestACP_AddDPISchema_WithManagedRelation_AcceptSchemas(t *testing.T) { - policyIDOfValidDPI := "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd" + policyIDOfValidDPI := "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454" test := testUtils.TestCase{ @@ -32,6 +32,7 @@ func TestACP_AddDPISchema_WithManagedRelation_AcceptSchemas(t *testing.T) { Identity: actor1Identity, Policy: ` + name: test description: A Valid Defra Policy Interface (DPI) actor: diff --git a/tests/integration/acp/schema/add_dpi/accept_mixed_resources_on_partial_dpi_test.go b/tests/integration/acp/schema/add_dpi/accept_mixed_resources_on_partial_dpi_test.go index 5c4f39c034..b97fda9e9f 100644 --- a/tests/integration/acp/schema/add_dpi/accept_mixed_resources_on_partial_dpi_test.go +++ b/tests/integration/acp/schema/add_dpi/accept_mixed_resources_on_partial_dpi_test.go @@ -19,7 +19,7 @@ import ( ) func TestACP_AddDPISchema_PartialValidDPIButUseOnlyValidDPIResource_AcceptSchema(t *testing.T) { - policyIDOfPartiallyValidDPI := "bfda7dc76b4719a32ff2ef6691646501d14fb139518ff6c05d4be1825b9128ed" + policyIDOfPartiallyValidDPI := "d0093bc2d984f42a498dff029df5c931bae5f8cb79b24d36243ab9b84547023d" test := testUtils.TestCase{ @@ -32,6 +32,7 @@ func TestACP_AddDPISchema_PartialValidDPIButUseOnlyValidDPIResource_AcceptSchema Identity: actor1Identity, Policy: ` + name: test description: A Partially Valid Defra Policy Interface (DPI) actor: diff --git a/tests/integration/acp/schema/add_dpi/accept_multi_dpis_test.go b/tests/integration/acp/schema/add_dpi/accept_multi_dpis_test.go index 25e41408cf..15bb7358dd 100644 --- a/tests/integration/acp/schema/add_dpi/accept_multi_dpis_test.go +++ b/tests/integration/acp/schema/add_dpi/accept_multi_dpis_test.go @@ -21,6 +21,7 @@ import ( func TestACP_AddDPISchema_AddDuplicateDPIsByOtherCreatorsUseBoth_AcceptSchema(t *testing.T) { const sameResourceNameOnBothDPI string = "users" const validDPIUsedByBoth string = ` + name: test description: A Valid Defra Policy Interface (DPI) actor: @@ -43,8 +44,8 @@ func TestACP_AddDPISchema_AddDuplicateDPIsByOtherCreatorsUseBoth_AcceptSchema(t - actor ` - const policyIDOfFirstCreatorsDPI string = "d5b240c738dba7fe7d8ae55acf257d8e4010c9d8b78e0b1f0bd26741b1ec5663" - const policyIDOfSecondCreatorsDPI string = "6d2ec2fd16ed62a1cad05d8e791abe12cbbf9551080c0ca052336b49e635c291" + const policyIDOfFirstCreatorsDPI string = "d59f91ba65fe142d35fc7df34482eafc7e99fed7c144961ba32c4664634e61b7" + const policyIDOfSecondCreatorsDPI string = "4b9291094984289a8f5557d142db453943549626067eedd8cbd5b64c3bc8a4f3" test := testUtils.TestCase{ diff --git a/tests/integration/acp/schema/add_dpi/accept_multi_resources_on_dpi_test.go b/tests/integration/acp/schema/add_dpi/accept_multi_resources_on_dpi_test.go index 97f96d1acd..ed35d8ac91 100644 --- a/tests/integration/acp/schema/add_dpi/accept_multi_resources_on_dpi_test.go +++ b/tests/integration/acp/schema/add_dpi/accept_multi_resources_on_dpi_test.go @@ -19,7 +19,7 @@ import ( ) func TestACP_AddDPISchema_WithMultipleResources_AcceptSchema(t *testing.T) { - policyIDOfValidDPI := "6209c5d12fce1fb0db4140ffa9d6b14a4d38133f601ab83f12dbb6ef84ee7da3" + policyIDOfValidDPI := "65d58052e3304b4dc564410b7ab27aeff5d68b5c676b05883f8799c0b1f7a795" test := testUtils.TestCase{ @@ -32,6 +32,7 @@ func TestACP_AddDPISchema_WithMultipleResources_AcceptSchema(t *testing.T) { Identity: actor1Identity, Policy: ` + name: test description: A Valid Defra Policy Interface (DPI) actor: @@ -128,7 +129,7 @@ func TestACP_AddDPISchema_WithMultipleResources_AcceptSchema(t *testing.T) { } func TestACP_AddDPISchema_WithMultipleResourcesBothBeingUsed_AcceptSchema(t *testing.T) { - policyIDOfValidDPI := "6209c5d12fce1fb0db4140ffa9d6b14a4d38133f601ab83f12dbb6ef84ee7da3" + policyIDOfValidDPI := "65d58052e3304b4dc564410b7ab27aeff5d68b5c676b05883f8799c0b1f7a795" test := testUtils.TestCase{ @@ -141,6 +142,7 @@ func TestACP_AddDPISchema_WithMultipleResourcesBothBeingUsed_AcceptSchema(t *tes Identity: actor1Identity, Policy: ` + name: test description: A Valid Defra Policy Interface (DPI) actor: diff --git a/tests/integration/acp/schema/add_dpi/accept_same_resource_on_diff_schemas_test.go b/tests/integration/acp/schema/add_dpi/accept_same_resource_on_diff_schemas_test.go index aaef9d915d..5113c5cd79 100644 --- a/tests/integration/acp/schema/add_dpi/accept_same_resource_on_diff_schemas_test.go +++ b/tests/integration/acp/schema/add_dpi/accept_same_resource_on_diff_schemas_test.go @@ -19,7 +19,7 @@ import ( ) func TestACP_AddDPISchema_UseSameResourceOnDifferentSchemas_AcceptSchemas(t *testing.T) { - policyIDOfValidDPI := "d5b240c738dba7fe7d8ae55acf257d8e4010c9d8b78e0b1f0bd26741b1ec5663" + policyIDOfValidDPI := "d59f91ba65fe142d35fc7df34482eafc7e99fed7c144961ba32c4664634e61b7" sharedSameResourceName := "users" test := testUtils.TestCase{ @@ -33,6 +33,7 @@ func TestACP_AddDPISchema_UseSameResourceOnDifferentSchemas_AcceptSchemas(t *tes Identity: actor1Identity, Policy: ` + name: test description: A Valid Defra Policy Interface (DPI) actor: diff --git a/tests/integration/acp/schema/add_dpi/reject_empty_arg_on_schema_test.go b/tests/integration/acp/schema/add_dpi/reject_empty_arg_on_schema_test.go index 8174ccf7bc..0827374417 100644 --- a/tests/integration/acp/schema/add_dpi/reject_empty_arg_on_schema_test.go +++ b/tests/integration/acp/schema/add_dpi/reject_empty_arg_on_schema_test.go @@ -17,7 +17,7 @@ import ( ) func TestACP_AddDPISchema_NoArgWasSpecifiedOnSchema_SchemaRejected(t *testing.T) { - policyIDOfValidDPI := "d5b240c738dba7fe7d8ae55acf257d8e4010c9d8b78e0b1f0bd26741b1ec5663" + policyIDOfValidDPI := "d59f91ba65fe142d35fc7df34482eafc7e99fed7c144961ba32c4664634e61b7" test := testUtils.TestCase{ @@ -30,6 +30,7 @@ func TestACP_AddDPISchema_NoArgWasSpecifiedOnSchema_SchemaRejected(t *testing.T) Identity: actor1Identity, Policy: ` + name: test description: A Valid Defra Policy Interface (DPI) actor: @@ -91,7 +92,7 @@ func TestACP_AddDPISchema_NoArgWasSpecifiedOnSchema_SchemaRejected(t *testing.T) } func TestACP_AddDPISchema_SpecifiedArgsAreEmptyOnSchema_SchemaRejected(t *testing.T) { - policyIDOfValidDPI := "d5b240c738dba7fe7d8ae55acf257d8e4010c9d8b78e0b1f0bd26741b1ec5663" + policyIDOfValidDPI := "d59f91ba65fe142d35fc7df34482eafc7e99fed7c144961ba32c4664634e61b7" test := testUtils.TestCase{ @@ -104,6 +105,7 @@ func TestACP_AddDPISchema_SpecifiedArgsAreEmptyOnSchema_SchemaRejected(t *testin Identity: actor1Identity, Policy: ` + name: test description: A Valid Defra Policy Interface (DPI) actor: diff --git a/tests/integration/acp/schema/add_dpi/reject_invalid_arg_type_on_schema_test.go b/tests/integration/acp/schema/add_dpi/reject_invalid_arg_type_on_schema_test.go index 028b42f4ac..1e31ce48b8 100644 --- a/tests/integration/acp/schema/add_dpi/reject_invalid_arg_type_on_schema_test.go +++ b/tests/integration/acp/schema/add_dpi/reject_invalid_arg_type_on_schema_test.go @@ -18,7 +18,7 @@ import ( ) func TestACP_AddDPISchema_InvalidPolicyIDArgTypeWasSpecifiedOnSchema_SchemaRejected(t *testing.T) { - policyIDOfValidDPI := "d5b240c738dba7fe7d8ae55acf257d8e4010c9d8b78e0b1f0bd26741b1ec5663" + policyIDOfValidDPI := "d59f91ba65fe142d35fc7df34482eafc7e99fed7c144961ba32c4664634e61b7" test := testUtils.TestCase{ @@ -31,6 +31,7 @@ func TestACP_AddDPISchema_InvalidPolicyIDArgTypeWasSpecifiedOnSchema_SchemaRejec Identity: actor1Identity, Policy: ` + name: test description: A Valid Defra Policy Interface (DPI) actor: @@ -92,7 +93,7 @@ func TestACP_AddDPISchema_InvalidPolicyIDArgTypeWasSpecifiedOnSchema_SchemaRejec } func TestACP_AddDPISchema_InvalidResourceArgTypeWasSpecifiedOnSchema_SchemaRejected(t *testing.T) { - policyIDOfValidDPI := "d5b240c738dba7fe7d8ae55acf257d8e4010c9d8b78e0b1f0bd26741b1ec5663" + policyIDOfValidDPI := "d59f91ba65fe142d35fc7df34482eafc7e99fed7c144961ba32c4664634e61b7" test := testUtils.TestCase{ @@ -105,6 +106,7 @@ func TestACP_AddDPISchema_InvalidResourceArgTypeWasSpecifiedOnSchema_SchemaRejec Identity: actor1Identity, Policy: ` + name: test description: A Valid Defra Policy Interface (DPI) actor: diff --git a/tests/integration/acp/schema/add_dpi/reject_invalid_owner_read_perm_on_dpi_test.go b/tests/integration/acp/schema/add_dpi/reject_invalid_owner_read_perm_on_dpi_test.go index a6140d23d2..aeca7916c9 100644 --- a/tests/integration/acp/schema/add_dpi/reject_invalid_owner_read_perm_on_dpi_test.go +++ b/tests/integration/acp/schema/add_dpi/reject_invalid_owner_read_perm_on_dpi_test.go @@ -18,7 +18,7 @@ import ( ) func TestACP_AddDPISchema_OwnerMissingRequiredReadPermissionOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "d3b58e284fb1117eb9c0111105195c910a6873d70695d8d3af3a967632bd5a5c" + policyIDOfInvalidDPI := "017d821ad9a540b639769fab6a58a5bf27591428f7acbf0d1ecf92dbcc4c4ba4" test := testUtils.TestCase{ @@ -31,6 +31,7 @@ func TestACP_AddDPISchema_OwnerMissingRequiredReadPermissionOnDPI_SchemaRejected Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -102,7 +103,7 @@ func TestACP_AddDPISchema_OwnerMissingRequiredReadPermissionOnDPI_SchemaRejected } func TestACP_AddDPISchema_OwnerMissingRequiredReadPermissionLabelOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "6f81aa1e33a6888e041ed7686f21007d09b30ae218fb2ce19071ea2155482b23" + policyIDOfInvalidDPI := "d56ef73738a0dd50fe1763c451ed3a9b774b38c268ed0c510058bb60a1df02de" test := testUtils.TestCase{ @@ -115,6 +116,7 @@ func TestACP_AddDPISchema_OwnerMissingRequiredReadPermissionLabelOnDPI_SchemaRej Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -185,7 +187,7 @@ func TestACP_AddDPISchema_OwnerMissingRequiredReadPermissionLabelOnDPI_SchemaRej } func TestACP_AddDPISchema_OwnerSpecifiedIncorrectlyOnReadPermissionExprOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "7f291632f6772e108830d41a5388391fc04cd4f2473d2a759d2a3326ee973848" + policyIDOfInvalidDPI := "75f0320e2d949cd201ce40e556c2e7f615ae5c1039197329fc6ed4bca6c7135d" test := testUtils.TestCase{ @@ -198,6 +200,7 @@ func TestACP_AddDPISchema_OwnerSpecifiedIncorrectlyOnReadPermissionExprOnDPI_Sch Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -269,7 +272,7 @@ func TestACP_AddDPISchema_OwnerSpecifiedIncorrectlyOnReadPermissionExprOnDPI_Sch } func TestACP_AddDPISchema_OwnerSpecifiedIncorrectlyOnReadPermissionNoSpaceExprOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "aa4c2b7bc9177a7b1cb808289a7f03564c489cc5c829ef756997cbe47b689a3f" + policyIDOfInvalidDPI := "481cc0091eb0c30eb75e8676db564dadf3166fe617dcd576f34b38e4ef4797eb" test := testUtils.TestCase{ @@ -282,6 +285,7 @@ func TestACP_AddDPISchema_OwnerSpecifiedIncorrectlyOnReadPermissionNoSpaceExprOn Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -353,7 +357,7 @@ func TestACP_AddDPISchema_OwnerSpecifiedIncorrectlyOnReadPermissionNoSpaceExprOn } func TestACP_AddDPISchema_MaliciousOwnerSpecifiedOnReadPermissionExprOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "f52f9a23643c6d52f4aaf2569885c152f06edc7d95647f88f6c4e8ead757e792" + policyIDOfInvalidDPI := "79095d371699ab7c1aa0d7336645bafea4cd02d15af9c8bacf4d3ef6078488c6" test := testUtils.TestCase{ @@ -366,6 +370,7 @@ func TestACP_AddDPISchema_MaliciousOwnerSpecifiedOnReadPermissionExprOnDPI_Schem Identity: actor1Identity, Policy: ` + name: test description: a policy actor: diff --git a/tests/integration/acp/schema/add_dpi/reject_invalid_owner_read_perm_symbol_on_dpi_test.go b/tests/integration/acp/schema/add_dpi/reject_invalid_owner_read_perm_symbol_on_dpi_test.go index 5aaedbdcf3..97ea88b78f 100644 --- a/tests/integration/acp/schema/add_dpi/reject_invalid_owner_read_perm_symbol_on_dpi_test.go +++ b/tests/integration/acp/schema/add_dpi/reject_invalid_owner_read_perm_symbol_on_dpi_test.go @@ -18,7 +18,7 @@ import ( ) func TestACP_AddDPISchema_OwnerRelationWithDifferenceSetOpOnReadPermissionExprOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "646063232aca1ae245d59f64e192436e1e843008f496c21eb4070d7d765f6f10" + policyIDOfInvalidDPI := "0fa5c11ee4323db5961207de897886f20320cffd41b7102dfbaf6bdf0cdd7f86" test := testUtils.TestCase{ @@ -31,6 +31,7 @@ func TestACP_AddDPISchema_OwnerRelationWithDifferenceSetOpOnReadPermissionExprOn Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -103,7 +104,7 @@ func TestACP_AddDPISchema_OwnerRelationWithDifferenceSetOpOnReadPermissionExprOn } func TestACP_AddDPISchema_OwnerRelationWithIntersectionSetOpOnReadPermissionExprOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "3252d478a953afc01782253abd47ad644e2784911ad4418acd802c9002a72c5a" + policyIDOfInvalidDPI := "56a7eb82f934d297548370e682e1f96751ac6163379fec5d21b5e18871e81c02" test := testUtils.TestCase{ @@ -116,6 +117,7 @@ func TestACP_AddDPISchema_OwnerRelationWithIntersectionSetOpOnReadPermissionExpr Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -188,7 +190,7 @@ func TestACP_AddDPISchema_OwnerRelationWithIntersectionSetOpOnReadPermissionExpr } func TestACP_AddDPISchema_OwnerRelationWithInvalidSetOpOnReadPermissionExprOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "e4120157eaf6546994d7301deacb3f313ba7240a90dba9aeaa7b1227292f39cb" + policyIDOfInvalidDPI := "73837e44fc12215fd639a7572d81257a17f5c11446035331948c0a08375733d5" test := testUtils.TestCase{ @@ -201,6 +203,7 @@ func TestACP_AddDPISchema_OwnerRelationWithInvalidSetOpOnReadPermissionExprOnDPI Identity: actor1Identity, Policy: ` + name: test description: a policy actor: diff --git a/tests/integration/acp/schema/add_dpi/reject_invalid_owner_write_perm_on_dpi_test.go b/tests/integration/acp/schema/add_dpi/reject_invalid_owner_write_perm_on_dpi_test.go index 36fad20c9b..69fd6cfa5e 100644 --- a/tests/integration/acp/schema/add_dpi/reject_invalid_owner_write_perm_on_dpi_test.go +++ b/tests/integration/acp/schema/add_dpi/reject_invalid_owner_write_perm_on_dpi_test.go @@ -18,7 +18,7 @@ import ( ) func TestACP_AddDPISchema_OwnerMissingRequiredWritePermissionOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "25bfbcab338ce03e1312b7a4dd78f4574156b5ca626c8f894ed101c81d2d31c8" + policyIDOfInvalidDPI := "d0f27cd0f75f3f3048bfa06e2a45f8dcfd172767437d48bf584eb3881f90c373" test := testUtils.TestCase{ @@ -31,6 +31,7 @@ func TestACP_AddDPISchema_OwnerMissingRequiredWritePermissionOnDPI_SchemaRejecte Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -102,7 +103,7 @@ func TestACP_AddDPISchema_OwnerMissingRequiredWritePermissionOnDPI_SchemaRejecte } func TestACP_AddDPISchema_OwnerMissingRequiredWritePermissionLabelOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "f0c8fc28378030d268ccdab9931e788d8839e0640085e43c1d48deebfe859f34" + policyIDOfInvalidDPI := "73c85066924b28fafb4103fc764ae35f6891a50292634df5a870f45525095bca" test := testUtils.TestCase{ @@ -115,6 +116,7 @@ func TestACP_AddDPISchema_OwnerMissingRequiredWritePermissionLabelOnDPI_SchemaRe Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -185,7 +187,7 @@ func TestACP_AddDPISchema_OwnerMissingRequiredWritePermissionLabelOnDPI_SchemaRe } func TestACP_AddDPISchema_OwnerSpecifiedIncorrectlyOnWritePermissionExprOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "e94d4ca7705564e39014bec198c3e20f5fae86c4dd44297711bd6a2257e579dc" + policyIDOfInvalidDPI := "1e4cd20ab842fb837ed3e03c69014732593261ffc56142f7c907f32200f6d255" test := testUtils.TestCase{ @@ -198,6 +200,7 @@ func TestACP_AddDPISchema_OwnerSpecifiedIncorrectlyOnWritePermissionExprOnDPI_Sc Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -269,7 +272,7 @@ func TestACP_AddDPISchema_OwnerSpecifiedIncorrectlyOnWritePermissionExprOnDPI_Sc } func TestACP_AddDPISchema_OwnerSpecifiedIncorrectlyOnWritePermissionNoSpaceExprOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "2d185da7b2d40981ce325d71d1d21dbae87690a461d7cb5c4ac753ad213607a3" + policyIDOfInvalidDPI := "44267005866f1bef0613e442254c90491bd26625be54f6c5bec10a9b83cf6f05" test := testUtils.TestCase{ @@ -282,6 +285,7 @@ func TestACP_AddDPISchema_OwnerSpecifiedIncorrectlyOnWritePermissionNoSpaceExprO Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -353,7 +357,7 @@ func TestACP_AddDPISchema_OwnerSpecifiedIncorrectlyOnWritePermissionNoSpaceExprO } func TestACP_AddDPISchema_MaliciousOwnerSpecifiedOnWritePermissionExprOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "2ad2dcd971d0e358a8d231970a3aa71b5cd94d38a85034c5cef10cd1c9fd5895" + policyIDOfInvalidDPI := "306fb2351dbe999e4f6f785f0f3ec353a389c82babedca49e375cca2d45b1ca8" test := testUtils.TestCase{ @@ -366,6 +370,7 @@ func TestACP_AddDPISchema_MaliciousOwnerSpecifiedOnWritePermissionExprOnDPI_Sche Identity: actor1Identity, Policy: ` + name: test description: a policy actor: diff --git a/tests/integration/acp/schema/add_dpi/reject_invalid_owner_write_perm_symbol_on_dpi_test.go b/tests/integration/acp/schema/add_dpi/reject_invalid_owner_write_perm_symbol_on_dpi_test.go index 07d719b920..b4699bd0c5 100644 --- a/tests/integration/acp/schema/add_dpi/reject_invalid_owner_write_perm_symbol_on_dpi_test.go +++ b/tests/integration/acp/schema/add_dpi/reject_invalid_owner_write_perm_symbol_on_dpi_test.go @@ -18,7 +18,7 @@ import ( ) func TestACP_AddDPISchema_OwnerRelationWithDifferenceSetOpOnWritePermissionExprOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "e3fa9d82173e212e4d13e4f96b521dba16644c5dd1b07518fe2e93391ca1c087" + policyIDOfInvalidDPI := "20dcca408d23cb04e7c50b8fe59af10886496843b10466fd0c208d7a1b701594" test := testUtils.TestCase{ @@ -31,6 +31,7 @@ func TestACP_AddDPISchema_OwnerRelationWithDifferenceSetOpOnWritePermissionExprO Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -103,7 +104,7 @@ func TestACP_AddDPISchema_OwnerRelationWithDifferenceSetOpOnWritePermissionExprO } func TestACP_AddDPISchema_OwnerRelationWithIntersectionSetOpOnWritePermissionExprOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "5c12fb0c1d7e108f7144f9e21c830478f80d5a4b0108b35fefa17ad83f4aac15" + policyIDOfInvalidDPI := "eed2e57dad5c3265fa13330a8bcbfd0e51d194712fe66c031a9d1a993754e648" test := testUtils.TestCase{ @@ -116,6 +117,7 @@ func TestACP_AddDPISchema_OwnerRelationWithIntersectionSetOpOnWritePermissionExp Identity: actor1Identity, Policy: ` + name: test description: a policy actor: @@ -188,7 +190,7 @@ func TestACP_AddDPISchema_OwnerRelationWithIntersectionSetOpOnWritePermissionExp } func TestACP_AddDPISchema_OwnerRelationWithInvalidSetOpOnWritePermissionExprOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "3c341a310b63bb689cf501598409f2a07a4b1798057d5634b8c47fe1efd094c9" + policyIDOfInvalidDPI := "5ddabf78f90cc7c1a2ef7dc9fa11cc7f7ed2b3875e5252a84613fb45173a5b58" test := testUtils.TestCase{ @@ -201,6 +203,7 @@ func TestACP_AddDPISchema_OwnerRelationWithInvalidSetOpOnWritePermissionExprOnDP Identity: actor1Identity, Policy: ` + name: test description: a policy actor: diff --git a/tests/integration/acp/schema/add_dpi/reject_missing_dpi_test.go b/tests/integration/acp/schema/add_dpi/reject_missing_dpi_test.go index 4aa2acb7bc..470e11a339 100644 --- a/tests/integration/acp/schema/add_dpi/reject_missing_dpi_test.go +++ b/tests/integration/acp/schema/add_dpi/reject_missing_dpi_test.go @@ -18,7 +18,7 @@ import ( ) func TestACP_AddDPISchema_WhereNoPolicyWasAdded_SchemaRejected(t *testing.T) { - nonExistingPolicyID := "aa664afaf8dff947ba85f4d464662d595af6c1e2466bd11fd6b82ea95b547ea3" + nonExistingPolicyID := "66f3e364004a181e9b129f65dea317322d2285226e926d7e8cdfd644954e4262" test := testUtils.TestCase{ @@ -68,8 +68,8 @@ func TestACP_AddDPISchema_WhereNoPolicyWasAdded_SchemaRejected(t *testing.T) { } func TestACP_AddDPISchema_WhereAPolicyWasAddedButLinkedPolicyWasNotAdded_SchemaRejected(t *testing.T) { - policyAdded := "d5b240c738dba7fe7d8ae55acf257d8e4010c9d8b78e0b1f0bd26741b1ec5663" - incorrectPolicyID := "aa664afaf8dff947ba85f4d464662d595af6c1e2466bd11fd6b82ea95b547ea3" + policyAdded := "d59f91ba65fe142d35fc7df34482eafc7e99fed7c144961ba32c4664634e61b7" + incorrectPolicyID := "66f3e364004a181e9b129f65dea317322d2285226e926d7e8cdfd644954e4262" test := testUtils.TestCase{ @@ -82,6 +82,7 @@ func TestACP_AddDPISchema_WhereAPolicyWasAddedButLinkedPolicyWasNotAdded_SchemaR Identity: actor1Identity, Policy: ` + name: test description: A Valid Defra Policy Interface (DPI) actor: diff --git a/tests/integration/acp/schema/add_dpi/reject_missing_id_arg_on_schema_test.go b/tests/integration/acp/schema/add_dpi/reject_missing_id_arg_on_schema_test.go index 9f3d90d203..e7b1e55269 100644 --- a/tests/integration/acp/schema/add_dpi/reject_missing_id_arg_on_schema_test.go +++ b/tests/integration/acp/schema/add_dpi/reject_missing_id_arg_on_schema_test.go @@ -17,7 +17,7 @@ import ( ) func TestACP_AddDPISchema_NoPolicyIDWasSpecifiedOnSchema_SchemaRejected(t *testing.T) { - policyIDOfValidDPI := "d5b240c738dba7fe7d8ae55acf257d8e4010c9d8b78e0b1f0bd26741b1ec5663" + policyIDOfValidDPI := "d59f91ba65fe142d35fc7df34482eafc7e99fed7c144961ba32c4664634e61b7" test := testUtils.TestCase{ @@ -30,6 +30,7 @@ func TestACP_AddDPISchema_NoPolicyIDWasSpecifiedOnSchema_SchemaRejected(t *testi Identity: actor1Identity, Policy: ` + name: test description: A Valid Defra Policy Interface (DPI) actor: @@ -91,7 +92,7 @@ func TestACP_AddDPISchema_NoPolicyIDWasSpecifiedOnSchema_SchemaRejected(t *testi } func TestACP_AddDPISchema_SpecifiedPolicyIDArgIsEmptyOnSchema_SchemaRejected(t *testing.T) { - policyIDOfValidDPI := "d5b240c738dba7fe7d8ae55acf257d8e4010c9d8b78e0b1f0bd26741b1ec5663" + policyIDOfValidDPI := "d59f91ba65fe142d35fc7df34482eafc7e99fed7c144961ba32c4664634e61b7" test := testUtils.TestCase{ @@ -104,6 +105,7 @@ func TestACP_AddDPISchema_SpecifiedPolicyIDArgIsEmptyOnSchema_SchemaRejected(t * Identity: actor1Identity, Policy: ` + name: test description: A Valid Defra Policy Interface (DPI) actor: diff --git a/tests/integration/acp/schema/add_dpi/reject_missing_perms_on_dpi_test.go b/tests/integration/acp/schema/add_dpi/reject_missing_perms_on_dpi_test.go index 22fb305b7b..f5af9d515e 100644 --- a/tests/integration/acp/schema/add_dpi/reject_missing_perms_on_dpi_test.go +++ b/tests/integration/acp/schema/add_dpi/reject_missing_perms_on_dpi_test.go @@ -18,7 +18,7 @@ import ( ) func TestACP_AddDPISchema_MissingRequiredReadPermissionOnDPI_SchemaRejected(t *testing.T) { - policyIDOfInvalidDPI := "07da6260811df769d551e89e02364b3e939cb585696c1a69b626bb8ecdd378f9" + policyIDOfInvalidDPI := "106a38bfb702608e26feda961d9fffd74141ef34eccc17b3de2c15dd7620da46" test := testUtils.TestCase{ @@ -31,6 +31,7 @@ func TestACP_AddDPISchema_MissingRequiredReadPermissionOnDPI_SchemaRejected(t *t Identity: actor1Identity, Policy: ` + name: test description: A policy actor: diff --git a/tests/integration/acp/schema/add_dpi/reject_missing_resource_arg_on_schema_test.go b/tests/integration/acp/schema/add_dpi/reject_missing_resource_arg_on_schema_test.go index d8d67af51f..1d3110bacc 100644 --- a/tests/integration/acp/schema/add_dpi/reject_missing_resource_arg_on_schema_test.go +++ b/tests/integration/acp/schema/add_dpi/reject_missing_resource_arg_on_schema_test.go @@ -18,7 +18,7 @@ import ( ) func TestACP_AddDPISchema_NoResourceWasSpecifiedOnSchema_SchemaRejected(t *testing.T) { - policyIDOfValidDPI := "d5b240c738dba7fe7d8ae55acf257d8e4010c9d8b78e0b1f0bd26741b1ec5663" + policyIDOfValidDPI := "d59f91ba65fe142d35fc7df34482eafc7e99fed7c144961ba32c4664634e61b7" test := testUtils.TestCase{ @@ -31,6 +31,7 @@ func TestACP_AddDPISchema_NoResourceWasSpecifiedOnSchema_SchemaRejected(t *testi Identity: actor1Identity, Policy: ` + name: test description: A Valid Defra Policy Interface (DPI) actor: @@ -94,7 +95,7 @@ func TestACP_AddDPISchema_NoResourceWasSpecifiedOnSchema_SchemaRejected(t *testi } func TestACP_AddDPISchema_SpecifiedResourceArgIsEmptyOnSchema_SchemaRejected(t *testing.T) { - policyIDOfValidDPI := "d5b240c738dba7fe7d8ae55acf257d8e4010c9d8b78e0b1f0bd26741b1ec5663" + policyIDOfValidDPI := "d59f91ba65fe142d35fc7df34482eafc7e99fed7c144961ba32c4664634e61b7" test := testUtils.TestCase{ @@ -107,6 +108,7 @@ func TestACP_AddDPISchema_SpecifiedResourceArgIsEmptyOnSchema_SchemaRejected(t * Identity: actor1Identity, Policy: ` + name: test description: A Valid Defra Policy Interface (DPI) actor: diff --git a/tests/integration/acp/schema/add_dpi/reject_missing_resource_on_dpi_test.go b/tests/integration/acp/schema/add_dpi/reject_missing_resource_on_dpi_test.go index 8261c6759f..9ab5e3b184 100644 --- a/tests/integration/acp/schema/add_dpi/reject_missing_resource_on_dpi_test.go +++ b/tests/integration/acp/schema/add_dpi/reject_missing_resource_on_dpi_test.go @@ -18,7 +18,7 @@ import ( ) func TestACP_AddDPISchema_SpecifiedResourceDoesNotExistOnDPI_SchemaRejected(t *testing.T) { - policyIDOfValidDPI := "d5b240c738dba7fe7d8ae55acf257d8e4010c9d8b78e0b1f0bd26741b1ec5663" + policyIDOfValidDPI := "d59f91ba65fe142d35fc7df34482eafc7e99fed7c144961ba32c4664634e61b7" test := testUtils.TestCase{ @@ -31,6 +31,7 @@ func TestACP_AddDPISchema_SpecifiedResourceDoesNotExistOnDPI_SchemaRejected(t *t Identity: actor1Identity, Policy: ` + name: test description: A Valid Defra Policy Interface (DPI) actor: diff --git a/tests/integration/acp/schema/add_dpi/reject_mixed_resources_on_partial_dpi_test.go b/tests/integration/acp/schema/add_dpi/reject_mixed_resources_on_partial_dpi_test.go index b2bcbbf6ae..7896588f1b 100644 --- a/tests/integration/acp/schema/add_dpi/reject_mixed_resources_on_partial_dpi_test.go +++ b/tests/integration/acp/schema/add_dpi/reject_mixed_resources_on_partial_dpi_test.go @@ -18,7 +18,7 @@ import ( ) func TestACP_AddDPISchema_PartialValidDPIButUseInValidDPIResource_RejectSchema(t *testing.T) { - policyIDOfPartiallyValidDPI := "bfda7dc76b4719a32ff2ef6691646501d14fb139518ff6c05d4be1825b9128ed" + policyIDOfPartiallyValidDPI := "d0093bc2d984f42a498dff029df5c931bae5f8cb79b24d36243ab9b84547023d" test := testUtils.TestCase{ @@ -31,6 +31,7 @@ func TestACP_AddDPISchema_PartialValidDPIButUseInValidDPIResource_RejectSchema(t Identity: actor1Identity, Policy: ` + name: test description: A Partially Valid Defra Policy Interface (DPI) actor: diff --git a/tests/integration/collection_description/updates/remove/policy_test.go b/tests/integration/collection_description/updates/remove/policy_test.go index b52b8112c4..1f7020308f 100644 --- a/tests/integration/collection_description/updates/remove/policy_test.go +++ b/tests/integration/collection_description/updates/remove/policy_test.go @@ -25,6 +25,7 @@ func TestColDescrUpdateRemovePolicy_Errors(t *testing.T) { Identity: acpUtils.Actor1Identity, Policy: ` + name: test description: a test policy which marks a collection in a database as a resource actor: @@ -52,13 +53,13 @@ func TestColDescrUpdateRemovePolicy_Errors(t *testing.T) { - actor `, - ExpectedPolicyID: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + ExpectedPolicyID: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", }, testUtils.SchemaUpdate{ Schema: ` type Users @policy( - id: "a42e109f1542da3fef5f8414621a09aa4805bf1ac9ff32ad9940bd2c488ee6cd", + id: "94eb195c0e459aa79e02a1986c7e731c5015721c18a373f2b2a0ed140a04b454", resource: "users" ) { name: String From e3eca29e30f585a80078dfbdd97198e98010be20 Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Wed, 19 Jun 2024 16:56:43 -0400 Subject: [PATCH 56/78] test: Allow test harness to execute benchmarks (#2740) ## Relevant issue(s) Resolves #2739 ## Description Allows test harness to execute benchmark tests as well as normal-tests. --- tests/change_detector/utils.go | 2 +- tests/integration/explain.go | 4 +-- tests/integration/explain_result_asserter.go | 4 +-- tests/integration/results.go | 4 +-- tests/integration/state.go | 4 +-- tests/integration/test_case.go | 6 ++--- tests/integration/utils2.go | 26 ++++++++++---------- 7 files changed, 25 insertions(+), 25 deletions(-) diff --git a/tests/change_detector/utils.go b/tests/change_detector/utils.go index 4e6e938aa5..5bc8794b95 100644 --- a/tests/change_detector/utils.go +++ b/tests/change_detector/utils.go @@ -76,7 +76,7 @@ func DatabaseDir(t testing.TB) string { } // PreTestChecks skips any test that can't be run by the change detector. -func PreTestChecks(t *testing.T, collectionNames []string) { +func PreTestChecks(t testing.TB, collectionNames []string) { if !Enabled { return } diff --git a/tests/integration/explain.go b/tests/integration/explain.go index da2adb69e5..fafab87134 100644 --- a/tests/integration/explain.go +++ b/tests/integration/explain.go @@ -375,7 +375,7 @@ func trimSubNodes(graph any) any { // trimExplainAttributes trims away all keys that aren't plan nodes within the explain graph. func trimExplainAttributes( - t *testing.T, + t testing.TB, description string, actualResult any, ) map[string]any { @@ -411,7 +411,7 @@ func trimExplainAttributes( // trimExplainAttributesArray is a helper that runs trimExplainAttributes for each item in an array. func trimExplainAttributesArray[T any]( - t *testing.T, + t testing.TB, description string, actualResult []T, ) []map[string]any { diff --git a/tests/integration/explain_result_asserter.go b/tests/integration/explain_result_asserter.go index 45f998e481..7d510bdc1a 100644 --- a/tests/integration/explain_result_asserter.go +++ b/tests/integration/explain_result_asserter.go @@ -41,7 +41,7 @@ type ExplainResultAsserter struct { planExecutions immutable.Option[uint64] } -func readNumberProp(t *testing.T, val any, prop string) uint64 { +func readNumberProp(t testing.TB, val any, prop string) uint64 { switch v := val.(type) { case uint64: return v @@ -55,7 +55,7 @@ func readNumberProp(t *testing.T, val any, prop string) uint64 { return 0 } -func (a *ExplainResultAsserter) Assert(t *testing.T, result []dataMap) { +func (a *ExplainResultAsserter) Assert(t testing.TB, result []dataMap) { require.Len(t, result, 1, "Expected len(result) = 1, got %d", len(result)) explainNode, ok := result[0]["explain"].(dataMap) require.True(t, ok, "Expected explain none") diff --git a/tests/integration/results.go b/tests/integration/results.go index 20270af1c4..b4fc9d5948 100644 --- a/tests/integration/results.go +++ b/tests/integration/results.go @@ -28,7 +28,7 @@ type AnyOf []any // assertResultsAnyOf asserts that actual result is equal to at least one of the expected results. // // The comparison is relaxed when using client types other than goClientType. -func assertResultsAnyOf(t *testing.T, client ClientType, expected AnyOf, actual any, msgAndArgs ...any) { +func assertResultsAnyOf(t testing.TB, client ClientType, expected AnyOf, actual any, msgAndArgs ...any) { switch client { case HTTPClientType, CLIClientType: if !areResultsAnyOf(expected, actual) { @@ -42,7 +42,7 @@ func assertResultsAnyOf(t *testing.T, client ClientType, expected AnyOf, actual // assertResultsEqual asserts that actual result is equal to the expected result. // // The comparison is relaxed when using client types other than goClientType. -func assertResultsEqual(t *testing.T, client ClientType, expected any, actual any, msgAndArgs ...any) { +func assertResultsEqual(t testing.TB, client ClientType, expected any, actual any, msgAndArgs ...any) { switch client { case HTTPClientType, CLIClientType: if !areResultsEqual(expected, actual) { diff --git a/tests/integration/state.go b/tests/integration/state.go index 49030c82a6..c92e3710ba 100644 --- a/tests/integration/state.go +++ b/tests/integration/state.go @@ -27,7 +27,7 @@ type state struct { ctx context.Context // The Go Test test state - t *testing.T + t testing.TB // The TestCase currently being executed. testCase TestCase @@ -88,7 +88,7 @@ type state struct { // newState returns a new fresh state for the given testCase. func newState( ctx context.Context, - t *testing.T, + t testing.TB, testCase TestCase, dbt DatabaseType, clientType ClientType, diff --git a/tests/integration/test_case.go b/tests/integration/test_case.go index 9b30dd5e35..487641c5ec 100644 --- a/tests/integration/test_case.go +++ b/tests/integration/test_case.go @@ -429,13 +429,13 @@ type GetIndexes struct { // assertions. type ResultAsserter interface { // Assert will be called with the test and the result of the request. - Assert(t *testing.T, result []map[string]any) + Assert(t testing.TB, result []map[string]any) } // ResultAsserterFunc is a function that can be used to implement the ResultAsserter -type ResultAsserterFunc func(*testing.T, []map[string]any) (bool, string) +type ResultAsserterFunc func(testing.TB, []map[string]any) (bool, string) -func (f ResultAsserterFunc) Assert(t *testing.T, result []map[string]any) { +func (f ResultAsserterFunc) Assert(t testing.TB, result []map[string]any) { f(t, result) } diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index 041b553548..f7bceebd19 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -133,7 +133,7 @@ func AssertPanic(t *testing.T, f assert.PanicTestFunc) bool { // Will also attempt to detect incompatible changes in the persisted data if // configured to do so (the CI will do so, but disabled by default as it is slow). func ExecuteTestCase( - t *testing.T, + t testing.TB, testCase TestCase, ) { flattenActions(&testCase) @@ -181,7 +181,7 @@ func ExecuteTestCase( func executeTestCase( ctx context.Context, - t *testing.T, + t testing.TB, collectionNames []string, testCase TestCase, dbt DatabaseType, @@ -578,7 +578,7 @@ func flattenActions(testCase *TestCase) { // // If a SetupComplete action is provided, the actions will be split there, if not // they will be split at the first non SchemaUpdate/CreateDoc/UpdateDoc action. -func getActionRange(t *testing.T, testCase TestCase) (int, int) { +func getActionRange(t testing.TB, testCase TestCase) (int, int) { startIndex := 0 endIndex := len(testCase.Actions) - 1 @@ -927,7 +927,7 @@ func getIndexes( func assertIndexesListsEqual( expectedIndexes []client.IndexDescription, actualIndexes []client.IndexDescription, - t *testing.T, + t testing.TB, testDescription string, ) { toNames := func(indexes []client.IndexDescription) []string { @@ -956,7 +956,7 @@ func assertIndexesListsEqual( } func assertIndexesEqual(expectedIndex, actualIndex client.IndexDescription, - t *testing.T, + t testing.TB, testDescription string, ) { assert.Equal(t, expectedIndex.Name, actualIndex.Name, testDescription) @@ -1767,7 +1767,7 @@ func executeSubscriptionRequest( // Asserts as to whether an error has been raised as expected (or not). If an expected // error has been raised it will return true, returns false in all other cases. -func AssertError(t *testing.T, description string, err error, expectedError string) bool { +func AssertError(t testing.TB, description string, err error, expectedError string) bool { if err == nil { return false } @@ -1788,7 +1788,7 @@ func AssertError(t *testing.T, description string, err error, expectedError stri // Asserts as to whether an error has been raised as expected (or not). If an expected // error has been raised it will return true, returns false in all other cases. func AssertErrors( - t *testing.T, + t testing.TB, description string, errs []error, expectedError string, @@ -1888,7 +1888,7 @@ func assertRequestResults( return false } -func assertExpectedErrorRaised(t *testing.T, description string, expectedError string, wasRaised bool) { +func assertExpectedErrorRaised(t testing.TB, description string, expectedError string, wasRaised bool) { if expectedError != "" && !wasRaised { assert.Fail(t, "Expected an error however none was raised.", description) } @@ -1974,7 +1974,7 @@ func assertClientIntrospectionResults( // Asserts that the `actual` contains the given `contains` value according to the logic // described on the [RequestTestCase.ContainsData] property. -func assertContains(t *testing.T, contains map[string]any, actual map[string]any) { +func assertContains(t testing.TB, contains map[string]any, actual map[string]any) { for k, expected := range contains { innerActual := actual[k] if innerExpected, innerIsMap := expected.(map[string]any); innerIsMap { @@ -2005,7 +2005,7 @@ func assertContains(t *testing.T, contains map[string]any, actual map[string]any } } -func assertBackupContent(t *testing.T, expectedContent, filepath string) { +func assertBackupContent(t testing.TB, expectedContent, filepath string) { b, err := os.ReadFile(filepath) assert.NoError(t, err) assert.Equal( @@ -2017,7 +2017,7 @@ func assertBackupContent(t *testing.T, expectedContent, filepath string) { // skipIfMutationTypeUnsupported skips the current test if the given supportedMutationTypes option has value // and the active mutation type is not contained within that value set. -func skipIfMutationTypeUnsupported(t *testing.T, supportedMutationTypes immutable.Option[[]MutationType]) { +func skipIfMutationTypeUnsupported(t testing.TB, supportedMutationTypes immutable.Option[[]MutationType]) { if supportedMutationTypes.HasValue() { var isTypeSupported bool for _, supportedMutationType := range supportedMutationTypes.Value() { @@ -2038,7 +2038,7 @@ func skipIfMutationTypeUnsupported(t *testing.T, supportedMutationTypes immutabl // If supportedClientTypes is none no filtering will take place and the input client set will be returned. // If the resultant filtered set is empty the test will be skipped. func skipIfClientTypeUnsupported( - t *testing.T, + t testing.TB, clients []ClientType, supportedClientTypes immutable.Option[[]ClientType], ) []ClientType { @@ -2065,7 +2065,7 @@ func skipIfClientTypeUnsupported( // skipIfNetworkTest skips the current test if the given actions // contain network actions and skipNetworkTests is true. -func skipIfNetworkTest(t *testing.T, actions []any) { +func skipIfNetworkTest(t testing.TB, actions []any) { hasNetworkAction := false for _, act := range actions { switch act.(type) { From c7fc43db6b535d48ad2490d2725316fc29ad41bd Mon Sep 17 00:00:00 2001 From: Fred Carle Date: Wed, 19 Jun 2024 23:57:15 -0400 Subject: [PATCH 57/78] fix: Add version check in basicTxn.Query (#2742) ## Relevant issue(s) Resolves #2741 ## Description This PR fixes a bug with the memory store where `basicTxn.Query` did not consider the transaction version when querying. This means that it could return KVs created after the transaction was initiated. Note that the first commit documents the bug. --- datastore/memory/txn.go | 6 ++- datastore/memory/txn_test.go | 2 +- datastore/txn_test.go | 92 ++++++++++++++++++++++++++++++++++++ 3 files changed, 98 insertions(+), 2 deletions(-) diff --git a/datastore/memory/txn.go b/datastore/memory/txn.go index f1086332b6..27b6292d06 100644 --- a/datastore/memory/txn.go +++ b/datastore/memory/txn.go @@ -179,13 +179,17 @@ func (t *basicTxn) Query(ctx context.Context, q dsq.Query) (dsq.Results, error) iter := t.ds.values.Iter() iterOps := t.ops.Iter() iterOpsHasValue := iterOps.Next() + dsVersion := t.getDSVersion() // iterate over the underlying store and ensure that ops with keys smaller than or equal to // the key of the underlying store are added with priority. for iter.Next() { // fast forward to last inserted version item := iter.Item() + if item.version > dsVersion { + continue + } for iter.Next() { - if item.key == iter.Item().key { + if item.key == iter.Item().key && iter.Item().version <= dsVersion { item = iter.Item() continue } diff --git a/datastore/memory/txn_test.go b/datastore/memory/txn_test.go index 5a0d1a2d8c..d1861e7d87 100644 --- a/datastore/memory/txn_test.go +++ b/datastore/memory/txn_test.go @@ -661,7 +661,7 @@ func TestTxnQueryWithOnlyOneOperation(t *testing.T) { tx, err := s.NewTransaction(ctx, false) require.NoError(t, err) - err = s.Put(ctx, testKey4, testValue4) + err = tx.Put(ctx, testKey4, testValue4) require.NoError(t, err) results, err := tx.Query(ctx, dsq.Query{}) diff --git a/datastore/txn_test.go b/datastore/txn_test.go index b11ca3acfe..cf3cdc5c1d 100644 --- a/datastore/txn_test.go +++ b/datastore/txn_test.go @@ -16,6 +16,7 @@ import ( "testing" ds "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" badger "github.com/sourcenetwork/badger/v4" "github.com/stretchr/testify/require" @@ -458,3 +459,94 @@ func TestBadgerFileStoreTxn_TwoTransactionsWithHasPutConflict_ShouldErrorWithCon err = txn1.Commit(ctx) require.ErrorIs(t, err, badger.ErrConflict) } + +func TestMemoryStoreTxn_TwoTransactionsWithQueryAndPut_ShouldOmmitNewPut(t *testing.T) { + ctx := context.Background() + rootstore := memory.NewDatastore(ctx) + + rootstore.Put(ctx, ds.NewKey("key"), []byte("value")) + + txn1, err := rootstore.NewTransaction(ctx, false) + require.NoError(t, err) + + txn2, err := rootstore.NewTransaction(ctx, false) + require.NoError(t, err) + + err = txn2.Put(ctx, ds.NewKey("other-key"), []byte("other-value")) + require.NoError(t, err) + + err = txn2.Commit(ctx) + require.NoError(t, err) + + qResults, err := txn1.Query(ctx, query.Query{}) + require.NoError(t, err) + + docs := [][]byte{} + for r := range qResults.Next() { + docs = append(docs, r.Entry.Value) + } + require.Equal(t, [][]byte{[]byte("value")}, docs) + txn1.Discard(ctx) +} + +func TestBadgerMemoryStoreTxn_TwoTransactionsWithQueryAndPut_ShouldOmmitNewPut(t *testing.T) { + ctx := context.Background() + opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} + rootstore, err := badgerds.NewDatastore("", &opts) + require.NoError(t, err) + + rootstore.Put(ctx, ds.NewKey("key"), []byte("value")) + + txn1, err := rootstore.NewTransaction(ctx, false) + require.NoError(t, err) + + txn2, err := rootstore.NewTransaction(ctx, false) + require.NoError(t, err) + + err = txn2.Put(ctx, ds.NewKey("other-key"), []byte("other-value")) + require.NoError(t, err) + + err = txn2.Commit(ctx) + require.NoError(t, err) + + qResults, err := txn1.Query(ctx, query.Query{}) + require.NoError(t, err) + + docs := [][]byte{} + for r := range qResults.Next() { + docs = append(docs, r.Entry.Value) + } + require.Equal(t, [][]byte{[]byte("value")}, docs) + txn1.Discard(ctx) +} + +func TestBadgerFileStoreTxn_TwoTransactionsWithQueryAndPut_ShouldOmmitNewPut(t *testing.T) { + ctx := context.Background() + opts := badgerds.Options{Options: badger.DefaultOptions("")} + rootstore, err := badgerds.NewDatastore(t.TempDir(), &opts) + require.NoError(t, err) + + rootstore.Put(ctx, ds.NewKey("key"), []byte("value")) + + txn1, err := rootstore.NewTransaction(ctx, false) + require.NoError(t, err) + + txn2, err := rootstore.NewTransaction(ctx, false) + require.NoError(t, err) + + err = txn2.Put(ctx, ds.NewKey("other-key"), []byte("other-value")) + require.NoError(t, err) + + err = txn2.Commit(ctx) + require.NoError(t, err) + + qResults, err := txn1.Query(ctx, query.Query{}) + require.NoError(t, err) + + docs := [][]byte{} + for r := range qResults.Next() { + docs = append(docs, r.Entry.Value) + } + require.Equal(t, [][]byte{[]byte("value")}, docs) + txn1.Discard(ctx) +} From 3856b80211e834ac120e891ca8dd9a3c8db108f6 Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Thu, 20 Jun 2024 10:04:51 -0700 Subject: [PATCH 58/78] refactor: Use events to test network logic (#2700) ## Relevant issue(s) Resolves #2699 Resolves #2687 ## Description This PR refactors the events package and updates the networking tests to use events. I think there is a bit more clean up to be done, but the majority should be ready for review. ## Tasks - [x] I made sure the code is well commented, particularly hard-to-understand areas. - [x] I made sure the repository-held documentation is changed accordingly. - [x] I made sure the pull request title adheres to the conventional commit style (the subset used in the project can be found in [tools/configs/chglog/config.yml](tools/configs/chglog/config.yml)). - [x] I made sure to discuss its limitations such as threats to validity, vulnerability to mistake and misuse, robustness to invalidation of assumptions, resource requirements, ... ## How has this been tested? `make test` Specify the platform(s) on which this was tested: - MacOS --- cli/start.go | 2 - client/db.go | 4 +- client/mocks/db.go | 16 +- datastore/mocks/utils.go | 1 + event/bus.go | 163 +++++++++++++ event/bus_test.go | 179 ++++++++++++++ {events => event}/errors.go | 4 +- event/event.go | 112 +++++++++ events/dag_sync.go | 34 --- events/db_update.go | 45 ---- events/events.go | 55 ----- events/publisher.go | 68 ------ events/publisher_test.go | 81 ------- events/simple.go | 131 ---------- events/simple_test.go | 110 --------- http/client.go | 4 +- http/handler_ccip_test.go | 2 +- internal/db/collection.go | 27 +-- internal/db/collection_delete.go | 24 +- internal/db/config.go | 16 -- internal/db/config_test.go | 12 - internal/db/db.go | 32 +-- internal/db/merge.go | 24 +- internal/db/merge_test.go | 12 +- internal/db/subscriptions.go | 24 +- net/client.go | 4 +- net/client_test.go | 8 +- net/node.go | 249 ++----------------- net/node_test.go | 389 +----------------------------- net/peer.go | 55 ++--- net/peer_test.go | 70 ++---- net/server.go | 85 ++----- net/sync_dag.go | 3 + tests/clients/cli/wrapper.go | 12 +- tests/clients/clients.go | 2 - tests/clients/http/wrapper.go | 12 +- tests/integration/db.go | 4 - tests/integration/events/utils.go | 11 +- tests/integration/p2p.go | 94 ++++---- tests/integration/state.go | 5 + tests/integration/utils2.go | 19 +- 41 files changed, 706 insertions(+), 1498 deletions(-) create mode 100644 event/bus.go create mode 100644 event/bus_test.go rename {events => event}/errors.go (88%) create mode 100644 event/event.go delete mode 100644 events/dag_sync.go delete mode 100644 events/db_update.go delete mode 100644 events/events.go delete mode 100644 events/publisher.go delete mode 100644 events/publisher_test.go delete mode 100644 events/simple.go delete mode 100644 events/simple_test.go diff --git a/cli/start.go b/cli/start.go index ef0067abef..970b857aa0 100644 --- a/cli/start.go +++ b/cli/start.go @@ -63,8 +63,6 @@ func MakeStartCommand() *cobra.Command { node.WithACPType(node.LocalACPType), node.WithPeers(peers...), // db options - db.WithUpdateEvents(), - db.WithDAGMergeEvents(), db.WithMaxRetries(cfg.GetInt("datastore.MaxTxnRetries")), // net node options net.WithListenAddresses(cfg.GetStringSlice("net.p2pAddresses")...), diff --git a/client/db.go b/client/db.go index 6ab945a815..e52dfed60a 100644 --- a/client/db.go +++ b/client/db.go @@ -18,7 +18,7 @@ import ( "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/event" ) type CollectionName = string @@ -75,7 +75,7 @@ type DB interface { // // It may be used to monitor database events - a new event will be yielded for each mutation. // Note: it does not copy the queue, just the reference to it. - Events() events.Events + Events() *event.Bus // MaxTxnRetries returns the number of retries that this DefraDB instance has been configured to // make in the event of a transaction conflict in certain scenarios. diff --git a/client/mocks/db.go b/client/mocks/db.go index 4f0320f0c4..c56af31167 100644 --- a/client/mocks/db.go +++ b/client/mocks/db.go @@ -9,7 +9,7 @@ import ( datastore "github.com/sourcenetwork/defradb/datastore" - events "github.com/sourcenetwork/defradb/events" + event "github.com/sourcenetwork/defradb/event" go_datastore "github.com/ipfs/go-datastore" @@ -360,14 +360,16 @@ func (_c *DB_Close_Call) RunAndReturn(run func()) *DB_Close_Call { } // Events provides a mock function with given fields: -func (_m *DB) Events() events.Events { +func (_m *DB) Events() *event.Bus { ret := _m.Called() - var r0 events.Events - if rf, ok := ret.Get(0).(func() events.Events); ok { + var r0 *event.Bus + if rf, ok := ret.Get(0).(func() *event.Bus); ok { r0 = rf() } else { - r0 = ret.Get(0).(events.Events) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*event.Bus) + } } return r0 @@ -390,12 +392,12 @@ func (_c *DB_Events_Call) Run(run func()) *DB_Events_Call { return _c } -func (_c *DB_Events_Call) Return(_a0 events.Events) *DB_Events_Call { +func (_c *DB_Events_Call) Return(_a0 *event.Bus) *DB_Events_Call { _c.Call.Return(_a0) return _c } -func (_c *DB_Events_Call) RunAndReturn(run func() events.Events) *DB_Events_Call { +func (_c *DB_Events_Call) RunAndReturn(run func() *event.Bus) *DB_Events_Call { _c.Call.Return(run) return _c } diff --git a/datastore/mocks/utils.go b/datastore/mocks/utils.go index af91fc6d3a..50131a8538 100644 --- a/datastore/mocks/utils.go +++ b/datastore/mocks/utils.go @@ -68,6 +68,7 @@ func prepareDAGStore(t *testing.T) *DAGStore { func NewTxnWithMultistore(t *testing.T) *MultiStoreTxn { txn := NewTxn(t) txn.EXPECT().OnSuccess(mock.Anything).Maybe() + txn.EXPECT().OnSuccessAsync(mock.Anything).Maybe() result := &MultiStoreTxn{ Txn: txn, diff --git a/event/bus.go b/event/bus.go new file mode 100644 index 0000000000..371173686d --- /dev/null +++ b/event/bus.go @@ -0,0 +1,163 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package event + +import ( + "sync" + "sync/atomic" +) + +type subscribeCommand *Subscription + +type unsubscribeCommand *Subscription + +type publishCommand Message + +type closeCommand struct{} + +// Bus uses a buffered channel to manage subscribers and publish messages. +type Bus struct { + // subID is incremented for each subscriber and used to set subscriber ids. + subID atomic.Uint64 + // subs is a mapping of subscriber ids to subscriptions. + subs map[uint64]*Subscription + // events is a mapping of event names to subscriber ids. + events map[Name]map[uint64]struct{} + // commandChannel manages all commands sent to the bufferedBus. + // + // It is important that all stuff gets sent through this single channel to ensure + // that the order of operations is preserved. + // + // This does mean that non-event commands can block the database if the buffer + // size is breached (e.g. if many subscribe commands occupy the buffer). + commandChannel chan any + eventBufferSize int + hasClosedChan chan struct{} + isClosed bool + // closeMutex is only locked when the bus is closing. + closeMutex sync.RWMutex +} + +// NewBus creates a new event bus with the given commandBufferSize and +// eventBufferSize. +// +// Should the buffers be filled, subsequent calls on this bus will block. +func NewBus(commandBufferSize int, eventBufferSize int) *Bus { + bus := Bus{ + subs: make(map[uint64]*Subscription), + events: make(map[Name]map[uint64]struct{}), + commandChannel: make(chan any, commandBufferSize), + hasClosedChan: make(chan struct{}), + eventBufferSize: eventBufferSize, + } + go bus.handleChannel() + return &bus +} + +// Publish broadcasts the given message to the bus subscribers. Non-blocking. +func (b *Bus) Publish(msg Message) { + b.closeMutex.RLock() + defer b.closeMutex.RUnlock() + + if b.isClosed { + return + } + b.commandChannel <- publishCommand(msg) +} + +// Subscribe returns a new subscription that will receive all of the events +// contained in the given list of events. +func (b *Bus) Subscribe(events ...Name) (*Subscription, error) { + b.closeMutex.RLock() + defer b.closeMutex.RUnlock() + + if b.isClosed { + return nil, ErrSubscribedToClosedChan + } + sub := &Subscription{ + id: b.subID.Add(1), + value: make(chan Message, b.eventBufferSize), + events: events, + } + b.commandChannel <- subscribeCommand(sub) + return sub, nil +} + +// Unsubscribe removes all event subscriptions and closes the subscription channel. +// +// Will do nothing if this object is already closed. +func (b *Bus) Unsubscribe(sub *Subscription) { + b.closeMutex.RLock() + defer b.closeMutex.RUnlock() + + if b.isClosed { + return + } + b.commandChannel <- unsubscribeCommand(sub) +} + +// Close unsubscribes all active subscribers and closes the command channel. +func (b *Bus) Close() { + b.closeMutex.Lock() + defer b.closeMutex.Unlock() + + if b.isClosed { + return + } + b.isClosed = true + b.commandChannel <- closeCommand{} + // Wait for the close command to be handled, in order, before returning + <-b.hasClosedChan +} + +func (b *Bus) handleChannel() { + for cmd := range b.commandChannel { + switch t := cmd.(type) { + case closeCommand: + for _, subscriber := range b.subs { + close(subscriber.value) + } + close(b.commandChannel) + close(b.hasClosedChan) + return + + case subscribeCommand: + for _, event := range t.events { + if _, ok := b.events[event]; !ok { + b.events[event] = make(map[uint64]struct{}) + } + b.events[event][t.id] = struct{}{} + } + b.subs[t.id] = t + + case unsubscribeCommand: + if _, ok := b.subs[t.id]; !ok { + continue // not subscribed + } + for _, event := range t.events { + delete(b.events[event], t.id) + } + delete(b.subs, t.id) + close(t.value) + + case publishCommand: + for id := range b.events[WildCardName] { + b.subs[id].value <- Message(t) + } + for id := range b.events[t.Name] { + if _, ok := b.events[WildCardName][id]; ok { + continue + } + b.subs[id].value <- Message(t) + } + } + } +} diff --git a/event/bus_test.go b/event/bus_test.go new file mode 100644 index 0000000000..b927e8bec4 --- /dev/null +++ b/event/bus_test.go @@ -0,0 +1,179 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package event + +import ( + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestBus_IfPublishingWithoutSubscribers_ItShouldNotBlock(t *testing.T) { + bus := NewBus(0, 0) + defer bus.Close() + + msg := NewMessage("test", 1) + bus.Publish(msg) + + // just assert that we reach this line, for the sake of having an assert + assert.True(t, true) +} + +func TestBus_IfClosingAfterSubscribing_ItShouldNotBlock(t *testing.T) { + bus := NewBus(0, 0) + defer bus.Close() + + sub, err := bus.Subscribe("test") + assert.NoError(t, err) + + bus.Close() + + <-sub.Message() + + // just assert that we reach this line, for the sake of having an assert + assert.True(t, true) +} + +func TestBus_IfSubscriptionIsUnsubscribedTwice_ItShouldNotPanic(t *testing.T) { + bus := NewBus(0, 0) + defer bus.Close() + + sub, err := bus.Subscribe(WildCardName) + assert.NoError(t, err) + + bus.Unsubscribe(sub) + bus.Unsubscribe(sub) +} + +func TestBus_IfSubscribedToWildCard_ItShouldNotReceiveMessageTwice(t *testing.T) { + bus := NewBus(0, 0) + defer bus.Close() + + sub, err := bus.Subscribe("test", WildCardName) + assert.NoError(t, err) + + msg := NewMessage("test", 1) + bus.Publish(msg) + + evt := <-sub.Message() + assert.Equal(t, evt, msg) + + select { + case <-sub.Message(): + t.Errorf("should not receive duplicate message") + case <-time.After(100 * time.Millisecond): + // message is deduplicated + } +} + +func TestBus_IfMultipleSubscriptionsToTheSameEvent_EachSubscriberRecievesEachEvent(t *testing.T) { + bus := NewBus(0, 0) + defer bus.Close() + + msg1 := NewMessage("test", 1) + msg2 := NewMessage("test", 2) + + sub1, err := bus.Subscribe("test") + assert.NoError(t, err) + + sub2, err := bus.Subscribe("test") + assert.NoError(t, err) + + // ordering of publish is not deterministic + // so capture each in a go routine + var wg sync.WaitGroup + var event1 Message + var event2 Message + + go func() { + event1 = <-sub1.Message() + wg.Done() + }() + + go func() { + event2 = <-sub2.Message() + wg.Done() + }() + + wg.Add(2) + bus.Publish(msg1) + wg.Wait() + + assert.Equal(t, msg1, event1) + assert.Equal(t, msg1, event2) + + go func() { + event1 = <-sub1.Message() + wg.Done() + }() + + go func() { + event2 = <-sub2.Message() + wg.Done() + }() + + wg.Add(2) + bus.Publish(msg2) + wg.Wait() + + assert.Equal(t, msg2, event1) + assert.Equal(t, msg2, event2) +} + +func TestBus_IfMultipleBufferedSubscribersWithMultipleEvents_EachSubscriberRecievesEachItem(t *testing.T) { + bus := NewBus(0, 2) + defer bus.Close() + + msg1 := NewMessage("test", 1) + msg2 := NewMessage("test", 2) + + sub1, err := bus.Subscribe("test") + assert.NoError(t, err) + sub2, err := bus.Subscribe("test") + assert.NoError(t, err) + + // both inputs are added first before read, using the internal chan buffer + bus.Publish(msg1) + bus.Publish(msg2) + + output1Ch1 := <-sub1.Message() + output1Ch2 := <-sub2.Message() + + output2Ch1 := <-sub1.Message() + output2Ch2 := <-sub2.Message() + + assert.Equal(t, msg1, output1Ch1) + assert.Equal(t, msg1, output1Ch2) + + assert.Equal(t, msg2, output2Ch1) + assert.Equal(t, msg2, output2Ch2) +} + +func TestBus_IfSubscribedThenUnsubscribe_SubscriptionShouldNotReceiveEvent(t *testing.T) { + bus := NewBus(0, 0) + defer bus.Close() + + sub, err := bus.Subscribe("test") + assert.NoError(t, err) + bus.Unsubscribe(sub) + + msg := NewMessage("test", 1) + bus.Publish(msg) + + // tiny delay to try and make sure the internal logic would have had time + // to do its thing with the pushed item. + time.Sleep(5 * time.Millisecond) + + // closing the channel will result in reads yielding the default value + assert.Equal(t, Message{}, <-sub.Message()) +} diff --git a/events/errors.go b/event/errors.go similarity index 88% rename from events/errors.go rename to event/errors.go index bc0a06808d..68cac62308 100644 --- a/events/errors.go +++ b/event/errors.go @@ -1,4 +1,4 @@ -// Copyright 2022 Democratized Data Foundation +// Copyright 2024 Democratized Data Foundation // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. @@ -8,7 +8,7 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package events +package event import ( "github.com/sourcenetwork/defradb/errors" diff --git a/event/event.go b/event/event.go new file mode 100644 index 0000000000..e9afdf1a57 --- /dev/null +++ b/event/event.go @@ -0,0 +1,112 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package event + +import ( + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/event" + "github.com/libp2p/go-libp2p/core/peer" +) + +// Name identifies an event +type Name string + +const ( + // WildCardName is the alias used to subscribe to all events. + WildCardName = Name("*") + // MergeName is the name of the net merge request event. + MergeName = Name("merge") + // MergeCompleteName is the name of the database merge complete event. + MergeCompleteName = Name("merge-complete") + // UpdateName is the name of the database update event. + UpdateName = Name("update") + // PubSubName is the name of the network pubsub event. + PubSubName = Name("pubsub") + // PeerName is the name of the network connect event. + PeerName = Name("peer") +) + +// Peer is an event that is published when +// a peer connection has changed status. +type Peer = event.EvtPeerConnectednessChanged + +// PubSub is an event that is published when +// a pubsub message has been received from a remote peer. +type PubSub struct { + // Peer is the id of the peer that published the message. + Peer peer.ID +} + +// Update represents a new DAG node added to the append-only composite MerkleCRDT Clock graph +// of a document. +// +// It must only contain public elements not protected by ACP. +type Update struct { + // DocID is the unique immutable identifier of the document that was updated. + DocID string + + // Cid is the id of the composite commit that formed this update in the DAG. + Cid cid.Cid + + // SchemaRoot is the root identifier of the schema that defined the shape of the document that was updated. + SchemaRoot string + + // Block is the encoded contents of this composite commit, it contains the Cids of the field level commits that + // also formed this update. + Block []byte + + // IsCreate is true if this update is the creation of a new document. + IsCreate bool +} + +// Merge is a notification that a merge can be performed up to the provided CID. +type Merge struct { + // DocID is the unique immutable identifier of the document that was updated. + DocID string + + // ByPeer is the id of the peer that created the push log request. + ByPeer peer.ID + + // FromPeer is the id of the peer that received the push log request. + FromPeer peer.ID + + // Cid is the id of the composite commit that formed this update in the DAG. + Cid cid.Cid + + // SchemaRoot is the root identifier of the schema that defined the shape of the document that was updated. + SchemaRoot string +} + +// Message contains event info. +type Message struct { + // Name is the name of the event this message was generated from. + Name Name + + // Data contains optional event information. + Data any +} + +// NewMessage returns a new message with the given name and optional data. +func NewMessage(name Name, data any) Message { + return Message{name, data} +} + +// Subscription is a read-only event stream. +type Subscription struct { + id uint64 + value chan Message + events []Name +} + +// Message returns the next event value from the subscription. +func (s *Subscription) Message() <-chan Message { + return s.value +} diff --git a/events/dag_sync.go b/events/dag_sync.go deleted file mode 100644 index d6150c9118..0000000000 --- a/events/dag_sync.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2024 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package events - -import ( - "sync" - - "github.com/ipfs/go-cid" - "github.com/sourcenetwork/immutable" -) - -// DAGMergeChannel is the bus onto which dag merge are published. -type DAGMergeChannel = immutable.Option[Channel[DAGMerge]] - -// DAGMerge is a notification that a merge can be performed up to the provided CID. -type DAGMerge struct { - // DocID is the unique identifier for the document being merged. - DocID string - // Cid is the id of the composite commit that formed this update in the DAG. - Cid cid.Cid - // SchemaRoot is the root identifier of the schema that defined the shape of the document that was updated. - SchemaRoot string - // Wg is a wait group that can be used to synchronize the merge, - // allowing the caller to optionnaly block until the merge is complete. - Wg *sync.WaitGroup -} diff --git a/events/db_update.go b/events/db_update.go deleted file mode 100644 index 2b93752573..0000000000 --- a/events/db_update.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package events - -import ( - "github.com/ipfs/go-cid" - - "github.com/sourcenetwork/immutable" -) - -// UpdateChannel is the bus onto which updates are published. -type UpdateChannel = immutable.Option[Channel[Update]] - -// EmptyUpdateChannel is an empty UpdateChannel. -var EmptyUpdateChannel = immutable.None[Channel[Update]]() - -// UpdateEvent represents a new DAG node added to the append-only composite MerkleCRDT Clock graph -// of a document. -// -// It must only contain public elements not protected by ACP. -type Update struct { - // DocID is the unique immutable identifier of the document that was updated. - DocID string - - // Cid is the id of the composite commit that formed this update in the DAG. - Cid cid.Cid - - // SchemaRoot is the root identifier of the schema that defined the shape of the document that was updated. - SchemaRoot string - - // Block is the encoded contents of this composite commit, it contains the Cids of the field level commits that - // also formed this update. - Block []byte - - // IsCreate is true if this update is the creation of a new document. - IsCreate bool -} diff --git a/events/events.go b/events/events.go deleted file mode 100644 index ec29adc24c..0000000000 --- a/events/events.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -/* -Package events provides the internal event system. -*/ -package events - -type Subscription[T any] chan T - -// Channel represents a subscribable type that will expose inputted items to subscribers. -type Channel[T any] interface { - // Subscribe subscribes to the Channel, returning a channel by which events can - // be read from, or an error should one occur (e.g. if this object is closed). - // - // This function is non-blocking unless the subscription-buffer is full. - Subscribe() (Subscription[T], error) - - // Unsubscribe unsubscribes from the Channel, closing the provided channel. - // - // Will do nothing if this object is already closed. - Unsubscribe(Subscription[T]) - - // Publish pushes the given item into this channel. Non-blocking. - Publish(item T) - - // Close closes this Channel, and any owned or subscribing channels. - Close() -} - -var _ Channel[int] = (*simpleChannel[int])(nil) - -// New creates and returns a new Channel instance. -// -// At the moment this will always return a new simpleChannel, however that may change in -// the future as this feature gets fleshed out. -func New[T any](commandBufferSize int, eventBufferSize int) Channel[T] { - return NewSimpleChannel[T](commandBufferSize, eventBufferSize) -} - -// Events hold the supported event types -type Events struct { - // Updates publishes an `Update` for each document written to in the database. - Updates UpdateChannel - - // DAGMerges publishes a `DAGMerge` for each completed DAG sync process over P2P. - DAGMerges DAGMergeChannel -} diff --git a/events/publisher.go b/events/publisher.go deleted file mode 100644 index 2d2d93db60..0000000000 --- a/events/publisher.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package events - -import "time" - -// time limit we set for the client to read after publishing. -var clientTimeout = 60 * time.Second - -// Publisher hold a referance to the event channel, -// the associated subscription channel and the stream channel that -// returns data to the subscribed client -type Publisher[T any] struct { - ch Channel[T] - event Subscription[T] - stream chan any -} - -// NewPublisher creates a new Publisher with the given event Channel, subscribes to the -// event Channel and opens a new channel for the stream. -func NewPublisher[T any](ch Channel[T], streamBufferSize int) (*Publisher[T], error) { - evtCh, err := ch.Subscribe() - if err != nil { - return nil, err - } - - return &Publisher[T]{ - ch: ch, - event: evtCh, - stream: make(chan any, streamBufferSize), - }, nil -} - -// Event returns the subscription channel -func (p *Publisher[T]) Event() Subscription[T] { - return p.event -} - -// Stream returns the streaming channel -func (p *Publisher[T]) Stream() chan any { - return p.stream -} - -// Publish sends data to the streaming channel and unsubscribes if -// the client hangs for too long. -func (p *Publisher[T]) Publish(data any) { - select { - case p.stream <- data: - case <-time.After(clientTimeout): - // if sending to the client times out, we assume an inactive or problematic client and - // unsubscribe them from the event stream - p.Unsubscribe() - } -} - -// Unsubscribe unsubscribes the client for the event channel and closes the stream. -func (p *Publisher[T]) Unsubscribe() { - p.ch.Unsubscribe(p.event) - close(p.stream) -} diff --git a/events/publisher_test.go b/events/publisher_test.go deleted file mode 100644 index 97ff7b6255..0000000000 --- a/events/publisher_test.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package events - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func TestNewPublisher(t *testing.T) { - ch := startEventChanel() - - pub, err := NewPublisher(ch, 0) - if err != nil { - t.Fatal(err) - } - assert.NotNil(t, pub) -} - -func TestNewPublisherWithError(t *testing.T) { - ch := startEventChanel() - ch.Close() - _, err := NewPublisher(ch, 0) - assert.Error(t, err) -} - -func TestPublisherToStream(t *testing.T) { - ch := startEventChanel() - - pub, err := NewPublisher(ch, 1) - if err != nil { - t.Fatal(err) - } - assert.NotNil(t, pub) - - ch.Publish(10) - evt := <-pub.Event() - assert.Equal(t, 10, evt) - - pub.Publish(evt) - assert.Equal(t, 10, <-pub.Stream()) - - pub.Unsubscribe() - - _, open := <-pub.Stream() - assert.Equal(t, false, open) -} - -func TestPublisherToStreamWithTimeout(t *testing.T) { - clientTimeout = 1 * time.Second - ch := startEventChanel() - - pub, err := NewPublisher(ch, 0) - if err != nil { - t.Fatal(err) - } - assert.NotNil(t, pub) - - ch.Publish(10) - evt := <-pub.Event() - assert.Equal(t, 10, evt) - - pub.Publish(evt) - - _, open := <-pub.Stream() - assert.Equal(t, false, open) -} - -func startEventChanel() Channel[int] { - return New[int](0, 0) -} diff --git a/events/simple.go b/events/simple.go deleted file mode 100644 index bf247a7a16..0000000000 --- a/events/simple.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package events - -type simpleChannel[T any] struct { - subscribers []chan T - // commandChannel manages all commands sent to this simpleChannel. - // - // It is important that all stuff gets sent through this single channel to ensure - // that the order of operations is preserved. - // - // WARNING: This does mean that non-event commands can block the database if the buffer - // size is breached (e.g. if many subscribe commands occupy the buffer). - commandChannel chan any - eventBufferSize int - hasClosedChan chan struct{} - isClosed bool -} - -type subscribeCommand[T any] Subscription[T] - -type unsubscribeCommand[T any] Subscription[T] - -type publishCommand[T any] struct { - item T -} - -type closeCommand struct{} - -// NewSimpleChannel creates a new simpleChannel with the given commandBufferSize and -// eventBufferSize. -// -// Should the buffers be filled subsequent calls to functions on this object may start to block. -func NewSimpleChannel[T any](commandBufferSize int, eventBufferSize int) Channel[T] { - c := simpleChannel[T]{ - commandChannel: make(chan any, commandBufferSize), - hasClosedChan: make(chan struct{}), - eventBufferSize: eventBufferSize, - } - - go c.handleChannel() - - return &c -} - -func (c *simpleChannel[T]) Subscribe() (Subscription[T], error) { - if c.isClosed { - return nil, ErrSubscribedToClosedChan - } - - // It is important to set this buffer size too, else we may end up blocked in the handleChannel func - ch := make(chan T, c.eventBufferSize) - - c.commandChannel <- subscribeCommand[T](ch) - return ch, nil -} - -func (c *simpleChannel[T]) Unsubscribe(ch Subscription[T]) { - if c.isClosed { - return - } - c.commandChannel <- unsubscribeCommand[T](ch) -} - -func (c *simpleChannel[T]) Publish(item T) { - if c.isClosed { - return - } - c.commandChannel <- publishCommand[T]{item} -} - -func (c *simpleChannel[T]) Close() { - if c.isClosed { - return - } - c.isClosed = true - c.commandChannel <- closeCommand{} - - // Wait for the close command to be handled, in order, before returning - <-c.hasClosedChan -} - -func (c *simpleChannel[T]) handleChannel() { - for cmd := range c.commandChannel { - switch command := cmd.(type) { - case closeCommand: - for _, subscriber := range c.subscribers { - close(subscriber) - } - close(c.commandChannel) - close(c.hasClosedChan) - return - - case subscribeCommand[T]: - c.subscribers = append(c.subscribers, command) - - case unsubscribeCommand[T]: - var isFound bool - var index int - for i, subscriber := range c.subscribers { - if command == subscriber { - index = i - isFound = true - break - } - } - if !isFound { - continue - } - - // Remove channel from list of subscribers - c.subscribers[index] = c.subscribers[len(c.subscribers)-1] - c.subscribers = c.subscribers[:len(c.subscribers)-1] - - close(command) - - case publishCommand[T]: - for _, subscriber := range c.subscribers { - subscriber <- command.item - } - } - } -} diff --git a/events/simple_test.go b/events/simple_test.go deleted file mode 100644 index d4cc91047b..0000000000 --- a/events/simple_test.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package events - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func TestSimplePushIsNotBlockedWithoutSubscribers(t *testing.T) { - s := NewSimpleChannel[int](0, 0) - - s.Publish(1) - - // just assert that we reach this line, for the sake of having an assert - assert.True(t, true) -} - -func TestSimpleSubscribersAreNotBlockedAfterClose(t *testing.T) { - s := NewSimpleChannel[int](0, 0) - ch, err := s.Subscribe() - assert.Nil(t, err) - - s.Close() - - <-ch - - // just assert that we reach this line, for the sake of having an assert - assert.True(t, true) -} - -func TestSimpleEachSubscribersRecievesEachItem(t *testing.T) { - s := NewSimpleChannel[int](0, 0) - input1 := 1 - input2 := 2 - - ch1, err := s.Subscribe() - assert.Nil(t, err) - ch2, err := s.Subscribe() - assert.Nil(t, err) - - s.Publish(input1) - - output1Ch1 := <-ch1 - output1Ch2 := <-ch2 - - s.Publish(input2) - - output2Ch1 := <-ch1 - output2Ch2 := <-ch2 - - assert.Equal(t, input1, output1Ch1) - assert.Equal(t, input1, output1Ch2) - - assert.Equal(t, input2, output2Ch1) - assert.Equal(t, input2, output2Ch2) -} - -func TestSimpleEachSubscribersRecievesEachItemGivenBufferedEventChan(t *testing.T) { - s := NewSimpleChannel[int](0, 2) - input1 := 1 - input2 := 2 - - ch1, err := s.Subscribe() - assert.Nil(t, err) - ch2, err := s.Subscribe() - assert.Nil(t, err) - - // both inputs are added first before read, using the internal chan buffer - s.Publish(input1) - s.Publish(input2) - - output1Ch1 := <-ch1 - output1Ch2 := <-ch2 - - output2Ch1 := <-ch1 - output2Ch2 := <-ch2 - - assert.Equal(t, input1, output1Ch1) - assert.Equal(t, input1, output1Ch2) - - assert.Equal(t, input2, output2Ch1) - assert.Equal(t, input2, output2Ch2) -} - -func TestSimpleSubscribersDontRecieveItemsAfterUnsubscribing(t *testing.T) { - s := NewSimpleChannel[int](0, 0) - ch, err := s.Subscribe() - assert.Nil(t, err) - s.Unsubscribe(ch) - - s.Publish(1) - - // tiny delay to try and make sure the internal logic would have had time - // to do its thing with the pushed item. - time.Sleep(5 * time.Millisecond) - - // closing the channel will result in reads yielding the default value - assert.Equal(t, 0, <-ch) -} diff --git a/http/client.go b/http/client.go index 9792208214..2843ee4f2d 100644 --- a/http/client.go +++ b/http/client.go @@ -28,7 +28,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/event" ) var _ client.DB = (*Client)(nil) @@ -451,7 +451,7 @@ func (c *Client) Headstore() ds.Read { panic("client side database") } -func (c *Client) Events() events.Events { +func (c *Client) Events() *event.Bus { panic("client side database") } diff --git a/http/handler_ccip_test.go b/http/handler_ccip_test.go index e17d8a882a..d35a910161 100644 --- a/http/handler_ccip_test.go +++ b/http/handler_ccip_test.go @@ -193,7 +193,7 @@ func TestCCIPPost_WithInvalidBody(t *testing.T) { func setupDatabase(t *testing.T) client.DB { ctx := context.Background() - cdb, err := db.NewDB(ctx, memory.NewDatastore(ctx), acp.NoACP, nil, db.WithUpdateEvents()) + cdb, err := db.NewDB(ctx, memory.NewDatastore(ctx), acp.NoACP, nil) require.NoError(t, err) _, err = cdb.AddSchema(ctx, `type User { diff --git a/internal/db/collection.go b/internal/db/collection.go index 19cb42cb86..7e20f0da8f 100644 --- a/internal/db/collection.go +++ b/internal/db/collection.go @@ -27,7 +27,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/event" "github.com/sourcenetwork/defradb/internal/core" coreblock "github.com/sourcenetwork/defradb/internal/core/block" "github.com/sourcenetwork/defradb/internal/db/base" @@ -675,21 +675,18 @@ func (c *collection) save( if err != nil { return cid.Undef, err } - if c.db.events.Updates.HasValue() { - txn.OnSuccess( - func() { - c.db.events.Updates.Value().Publish( - events.Update{ - DocID: doc.ID().String(), - Cid: link.Cid, - SchemaRoot: c.Schema().Root, - Block: headNode, - IsCreate: isCreate, - }, - ) - }, - ) + + // publish an update event when the txn succeeds + updateEvent := event.Update{ + DocID: doc.ID().String(), + Cid: link.Cid, + SchemaRoot: c.Schema().Root, + Block: headNode, + IsCreate: isCreate, } + txn.OnSuccess(func() { + c.db.events.Publish(event.NewMessage(event.UpdateName, updateEvent)) + }) txn.OnSuccess(func() { doc.SetHead(link.Cid) diff --git a/internal/db/collection_delete.go b/internal/db/collection_delete.go index 9612c4f42c..082a53caf2 100644 --- a/internal/db/collection_delete.go +++ b/internal/db/collection_delete.go @@ -17,7 +17,7 @@ import ( "github.com/sourcenetwork/defradb/acp" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/event" "github.com/sourcenetwork/defradb/internal/core" coreblock "github.com/sourcenetwork/defradb/internal/core/block" "github.com/sourcenetwork/defradb/internal/merkle/clock" @@ -166,20 +166,16 @@ func (c *collection) applyDelete( return err } - if c.db.events.Updates.HasValue() { - txn.OnSuccess( - func() { - c.db.events.Updates.Value().Publish( - events.Update{ - DocID: primaryKey.DocID, - Cid: link.Cid, - SchemaRoot: c.Schema().Root, - Block: b, - }, - ) - }, - ) + // publish an update event if the txn succeeds + updateEvent := event.Update{ + DocID: primaryKey.DocID, + Cid: link.Cid, + SchemaRoot: c.Schema().Root, + Block: b, } + txn.OnSuccess(func() { + c.db.events.Publish(event.NewMessage(event.UpdateName, updateEvent)) + }) return nil } diff --git a/internal/db/config.go b/internal/db/config.go index 2debf41df9..8ce725ebd0 100644 --- a/internal/db/config.go +++ b/internal/db/config.go @@ -12,8 +12,6 @@ package db import ( "github.com/sourcenetwork/immutable" - - "github.com/sourcenetwork/defradb/events" ) const ( @@ -24,20 +22,6 @@ const ( // Option is a funtion that sets a config value on the db. type Option func(*db) -// WithUpdateEvents enables the update events channel. -func WithUpdateEvents() Option { - return func(db *db) { - db.events.Updates = immutable.Some(events.New[events.Update](0, updateEventBufferSize)) - } -} - -// WithDAGMergeEvents enables the dag merge events channel. -func WithDAGMergeEvents() Option { - return func(db *db) { - db.events.DAGMerges = immutable.Some(events.New[events.DAGMerge](0, updateEventBufferSize)) - } -} - // WithMaxRetries sets the maximum number of retries per transaction. func WithMaxRetries(num int) Option { return func(db *db) { diff --git a/internal/db/config_test.go b/internal/db/config_test.go index a08bd7815d..405e192598 100644 --- a/internal/db/config_test.go +++ b/internal/db/config_test.go @@ -16,18 +16,6 @@ import ( "github.com/stretchr/testify/assert" ) -func TestWithUpdateEvents(t *testing.T) { - d := &db{} - WithUpdateEvents()(d) - assert.NotNil(t, d.events.Updates) -} - -func TestWithDAGMergeEvents(t *testing.T) { - d := &db{} - WithDAGMergeEvents()(d) - assert.NotNil(t, d.events.DAGMerges) -} - func TestWithMaxRetries(t *testing.T) { d := &db{} WithMaxRetries(10)(d) diff --git a/internal/db/db.go b/internal/db/db.go index 277f102ace..a6fb37f643 100644 --- a/internal/db/db.go +++ b/internal/db/db.go @@ -29,7 +29,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/event" "github.com/sourcenetwork/defradb/internal/core" "github.com/sourcenetwork/defradb/internal/request/graphql" ) @@ -43,6 +43,13 @@ var ( _ client.Collection = (*collection)(nil) ) +const ( + // commandBufferSize is the size of the channel buffer used to handle events. + commandBufferSize = 100_000 + // eventBufferSize is the size of the channel buffer used to subscribe to events. + eventBufferSize = 100 +) + // DB is the main interface for interacting with the // DefraDB storage system. type db struct { @@ -51,7 +58,7 @@ type db struct { rootstore datastore.RootStore multistore datastore.MultiStore - events events.Events + events *event.Bus parser core.Parser @@ -102,6 +109,7 @@ func newDB( lensRegistry: lens, parser: parser, options: options, + events: event.NewBus(commandBufferSize, eventBufferSize), } // apply options @@ -118,13 +126,11 @@ func newDB( return nil, err } - if db.events.DAGMerges.HasValue() { - merges, err := db.events.DAGMerges.Value().Subscribe() - if err != nil { - return nil, err - } - go db.handleMerges(ctx, merges) + sub, err := db.events.Subscribe(event.MergeName) + if err != nil { + return nil, err } + go db.handleMerges(ctx, sub) return db, nil } @@ -245,7 +251,7 @@ func (db *db) initialize(ctx context.Context) error { } // Events returns the events Channel. -func (db *db) Events() events.Events { +func (db *db) Events() *event.Bus { return db.events } @@ -267,12 +273,8 @@ func (db *db) PrintDump(ctx context.Context) error { // This is the place for any last minute cleanup or releasing of resources (i.e.: Badger instance). func (db *db) Close() { log.Info("Closing DefraDB process...") - if db.events.Updates.HasValue() { - db.events.Updates.Value().Close() - } - if db.events.DAGMerges.HasValue() { - db.events.DAGMerges.Value().Close() - } + + db.events.Close() err := db.rootstore.Close() if err != nil { diff --git a/internal/db/merge.go b/internal/db/merge.go index 7f78deb77e..e7d4c8252c 100644 --- a/internal/db/merge.go +++ b/internal/db/merge.go @@ -26,7 +26,7 @@ import ( "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/datastore/badger/v4" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/event" "github.com/sourcenetwork/defradb/internal/core" coreblock "github.com/sourcenetwork/defradb/internal/core/block" "github.com/sourcenetwork/defradb/internal/db/base" @@ -34,16 +34,20 @@ import ( merklecrdt "github.com/sourcenetwork/defradb/internal/merkle/crdt" ) -func (db *db) handleMerges(ctx context.Context, merges events.Subscription[events.DAGMerge]) { +func (db *db) handleMerges(ctx context.Context, sub *event.Subscription) { queue := newMergeQueue() for { select { case <-ctx.Done(): return - case merge, ok := <-merges: + case msg, ok := <-sub.Message(): if !ok { return } + merge, ok := msg.Data.(event.Merge) + if !ok { + continue + } go func() { // ensure only one merge per docID queue.add(merge.DocID) @@ -69,15 +73,12 @@ func (db *db) handleMerges(ctx context.Context, merges events.Subscription[event err, corelog.Any("Event", merge)) } - if merge.Wg != nil { - merge.Wg.Done() - } }() } } } -func (db *db) executeMerge(ctx context.Context, dagMerge events.DAGMerge) error { +func (db *db) executeMerge(ctx context.Context, dagMerge event.Merge) error { ctx, txn, err := ensureContextTxn(ctx, db, false) if err != nil { return err @@ -123,7 +124,14 @@ func (db *db) executeMerge(ctx context.Context, dagMerge events.DAGMerge) error return err } - return txn.Commit(ctx) + err = txn.Commit(ctx) + if err != nil { + return err + } + + // send a complete event so we can track merges in the integration tests + db.events.Publish(event.NewMessage(event.MergeCompleteName, dagMerge)) + return nil } // mergeQueue is synchronization source to ensure that concurrent diff --git a/internal/db/merge_test.go b/internal/db/merge_test.go index 9f0e0b34af..0c9a06ac42 100644 --- a/internal/db/merge_test.go +++ b/internal/db/merge_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/event" "github.com/sourcenetwork/defradb/internal/core" coreblock "github.com/sourcenetwork/defradb/internal/core/block" "github.com/sourcenetwork/defradb/internal/core/crdt" @@ -59,7 +59,7 @@ func TestMerge_SingleBranch_NoError(t *testing.T) { compInfo2, err := d.generateCompositeUpdate(&lsys, map[string]any{"name": "Johny"}, compInfo) require.NoError(t, err) - err = db.executeMerge(ctx, events.DAGMerge{ + err = db.executeMerge(ctx, event.Merge{ DocID: docID.String(), Cid: compInfo2.link.Cid, SchemaRoot: col.SchemaRoot(), @@ -104,7 +104,7 @@ func TestMerge_DualBranch_NoError(t *testing.T) { compInfo2, err := d.generateCompositeUpdate(&lsys, map[string]any{"name": "Johny"}, compInfo) require.NoError(t, err) - err = db.executeMerge(ctx, events.DAGMerge{ + err = db.executeMerge(ctx, event.Merge{ DocID: docID.String(), Cid: compInfo2.link.Cid, SchemaRoot: col.SchemaRoot(), @@ -114,7 +114,7 @@ func TestMerge_DualBranch_NoError(t *testing.T) { compInfo3, err := d.generateCompositeUpdate(&lsys, map[string]any{"age": 30}, compInfo) require.NoError(t, err) - err = db.executeMerge(ctx, events.DAGMerge{ + err = db.executeMerge(ctx, event.Merge{ DocID: docID.String(), Cid: compInfo3.link.Cid, SchemaRoot: col.SchemaRoot(), @@ -162,7 +162,7 @@ func TestMerge_DualBranchWithOneIncomplete_CouldNotFindCID(t *testing.T) { compInfo2, err := d.generateCompositeUpdate(&lsys, map[string]any{"name": "Johny"}, compInfo) require.NoError(t, err) - err = db.executeMerge(ctx, events.DAGMerge{ + err = db.executeMerge(ctx, event.Merge{ DocID: docID.String(), Cid: compInfo2.link.Cid, SchemaRoot: col.SchemaRoot(), @@ -181,7 +181,7 @@ func TestMerge_DualBranchWithOneIncomplete_CouldNotFindCID(t *testing.T) { compInfo3, err := d.generateCompositeUpdate(&lsys, map[string]any{"name": "Johny"}, compInfoUnkown) require.NoError(t, err) - err = db.executeMerge(ctx, events.DAGMerge{ + err = db.executeMerge(ctx, event.Merge{ DocID: docID.String(), Cid: compInfo3.link.Cid, SchemaRoot: col.SchemaRoot(), diff --git a/internal/db/subscriptions.go b/internal/db/subscriptions.go index a1b0147df4..491ae44468 100644 --- a/internal/db/subscriptions.go +++ b/internal/db/subscriptions.go @@ -15,7 +15,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/event" "github.com/sourcenetwork/defradb/internal/planner" ) @@ -26,36 +26,36 @@ func (db *db) handleSubscription(ctx context.Context, r *request.Request) (<-cha if len(r.Subscription) == 0 || len(r.Subscription[0].Selections) == 0 { return nil, nil // This is not a subscription request and we have nothing to do here } - if !db.events.Updates.HasValue() { - return nil, ErrSubscriptionsNotAllowed - } selections := r.Subscription[0].Selections[0] subRequest, ok := selections.(*request.ObjectSubscription) if !ok { return nil, client.NewErrUnexpectedType[request.ObjectSubscription]("SubscriptionSelection", selections) } - // unsubscribing from this publisher will cause a race condition - // https://github.com/sourcenetwork/defradb/issues/2687 - pub, err := events.NewPublisher(db.events.Updates.Value(), 5) + sub, err := db.events.Subscribe(event.UpdateName) if err != nil { return nil, err } - resCh := make(chan client.GQLResult) go func() { - defer close(resCh) + defer func() { + db.events.Unsubscribe(sub) + close(resCh) + }() // listen for events and send to the result channel for { - var evt events.Update + var evt event.Update select { case <-ctx.Done(): return // context cancelled - case val, ok := <-pub.Event(): + case val, ok := <-sub.Message(): if !ok { return // channel closed } - evt = val + evt, ok = val.Data.(event.Update) + if !ok { + continue // invalid event value + } } txn, err := db.NewTxn(ctx, false) diff --git a/net/client.go b/net/client.go index d29af6f60d..9930710891 100644 --- a/net/client.go +++ b/net/client.go @@ -19,7 +19,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/event" pb "github.com/sourcenetwork/defradb/net/pb" ) @@ -31,7 +31,7 @@ var ( // pushLog creates a pushLog request and sends it to another node // over libp2p grpc connection -func (s *server) pushLog(ctx context.Context, evt events.Update, pid peer.ID) error { +func (s *server) pushLog(ctx context.Context, evt event.Update, pid peer.ID) error { body := &pb.PushLogRequest_Body{ DocID: []byte(evt.DocID), Cid: evt.Cid.Bytes(), diff --git a/net/client_test.go b/net/client_test.go index 6e85a516be..6a43805ae8 100644 --- a/net/client_test.go +++ b/net/client_test.go @@ -19,7 +19,7 @@ import ( "google.golang.org/grpc" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/event" ) var def = client.CollectionDefinition{ @@ -62,7 +62,7 @@ func TestPushlogWithDialFailure(t *testing.T) { grpc.WithCredentialsBundle(nil), ) - err = n.server.pushLog(ctx, events.Update{ + err = n.server.pushLog(ctx, event.Update{ DocID: id.String(), Cid: cid, SchemaRoot: "test", @@ -84,7 +84,7 @@ func TestPushlogWithInvalidPeerID(t *testing.T) { cid, err := createCID(doc) require.NoError(t, err) - err = n.server.pushLog(ctx, events.Update{ + err = n.server.pushLog(ctx, event.Update{ DocID: id.String(), Cid: cid, SchemaRoot: "test", @@ -135,7 +135,7 @@ func TestPushlogW_WithValidPeerID_NoError(t *testing.T) { b, err := n1.db.Blockstore().AsIPLDStorage().Get(ctx, headCID.KeyString()) require.NoError(t, err) - err = n1.server.pushLog(ctx, events.Update{ + err = n1.server.pushLog(ctx, event.Update{ DocID: doc.ID().String(), Cid: headCID, SchemaRoot: col.SchemaRoot(), diff --git a/net/node.go b/net/node.go index 70fd12c016..3338ac0f04 100644 --- a/net/node.go +++ b/net/node.go @@ -31,9 +31,8 @@ import ( pubsub "github.com/libp2p/go-libp2p-pubsub" record "github.com/libp2p/go-libp2p-record" libp2pCrypto "github.com/libp2p/go-libp2p/core/crypto" - "github.com/libp2p/go-libp2p/core/event" + libp2pEvent "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/routing" "github.com/multiformats/go-multiaddr" @@ -48,10 +47,9 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/crypto" + "github.com/sourcenetwork/defradb/event" ) -var evtWaitTimeout = 10 * time.Second - var _ client.P2P = (*Node)(nil) // Node is a networked peer instance of DefraDB. @@ -61,15 +59,6 @@ type Node struct { *Peer - // receives an event when the status of a peer connection changes. - peerEvent chan event.EvtPeerConnectednessChanged - - // receives an event when a pubsub topic is added. - pubSubEvent chan EvtPubSub - - // receives an event when a pushLog request has been processed. - pushLogEvent chan EvtReceivedPushLog - ctx context.Context cancel context.CancelFunc dhtClose func() error @@ -178,6 +167,7 @@ func NewNode( return nil, fin.Cleanup(err) } } + peer, err := NewPeer( ctx, db, @@ -191,28 +181,26 @@ func NewNode( return nil, fin.Cleanup(err) } - n := &Node{ - // WARNING: The current usage of these channels means that consumers of them - // (the WaitForFoo funcs) can recieve events that occured before the WaitForFoo - // function call. This is tolerable at the moment as they are only used for - // test, but we should resolve this when we can (e.g. via using subscribe-like - // mechanics, potentially via use of a ring-buffer based [events.Channel] - // implementation): https://github.com/sourcenetwork/defradb/issues/1358. - pubSubEvent: make(chan EvtPubSub, 20), - pushLogEvent: make(chan EvtReceivedPushLog, 20), - peerEvent: make(chan event.EvtPeerConnectednessChanged, 20), - Peer: peer, - DB: db, - ctx: ctx, - cancel: cancel, - dhtClose: ddht.Close, + sub, err := h.EventBus().Subscribe(&libp2pEvent.EvtPeerConnectednessChanged{}) + if err != nil { + return nil, fin.Cleanup(err) } + // publish subscribed events to the event bus + go func() { + for val := range sub.Out() { + db.Events().Publish(event.NewMessage(event.PeerName, val)) + } + }() - n.subscribeToPeerConnectionEvents() - n.subscribeToPubSubEvents() - n.subscribeToPushLogEvents() + node = &Node{ + Peer: peer, + DB: db, + ctx: ctx, + cancel: cancel, + dhtClose: ddht.Close, + } - return n, nil + return } // Bootstrap connects to the given peers. @@ -262,203 +250,6 @@ func (n *Node) PeerInfo() peer.AddrInfo { } } -// subscribeToPeerConnectionEvents subscribes the node to the event bus for a peer connection change. -func (n *Node) subscribeToPeerConnectionEvents() { - sub, err := n.host.EventBus().Subscribe(new(event.EvtPeerConnectednessChanged)) - if err != nil { - log.InfoContext( - n.ctx, - fmt.Sprintf("failed to subscribe to peer connectedness changed event: %v", err), - ) - return - } - go func() { - for { - select { - case <-n.ctx.Done(): - err := sub.Close() - if err != nil { - log.ErrorContextE( - n.ctx, - "Failed to close peer connectedness changed event subscription", - err, - ) - } - return - case e, ok := <-sub.Out(): - if !ok { - return - } - select { - case n.peerEvent <- e.(event.EvtPeerConnectednessChanged): - default: - <-n.peerEvent - n.peerEvent <- e.(event.EvtPeerConnectednessChanged) - } - } - } - }() -} - -// subscribeToPubSubEvents subscribes the node to the event bus for a pubsub. -func (n *Node) subscribeToPubSubEvents() { - sub, err := n.host.EventBus().Subscribe(new(EvtPubSub)) - if err != nil { - log.InfoContext( - n.ctx, - fmt.Sprintf("failed to subscribe to pubsub event: %v", err), - ) - return - } - go func() { - for { - select { - case <-n.ctx.Done(): - err := sub.Close() - if err != nil { - log.ErrorContextE( - n.ctx, - "Failed to close pubsub event subscription", - err, - ) - } - return - case e, ok := <-sub.Out(): - if !ok { - return - } - select { - case n.pubSubEvent <- e.(EvtPubSub): - default: - <-n.pubSubEvent - n.pubSubEvent <- e.(EvtPubSub) - } - } - } - }() -} - -// subscribeToPushLogEvents subscribes the node to the event bus for a push log request completion. -func (n *Node) subscribeToPushLogEvents() { - sub, err := n.host.EventBus().Subscribe(new(EvtReceivedPushLog)) - if err != nil { - log.InfoContext( - n.ctx, - fmt.Sprintf("failed to subscribe to push log event: %v", err), - ) - return - } - go func() { - for { - select { - case <-n.ctx.Done(): - err := sub.Close() - if err != nil { - log.ErrorContextE( - n.ctx, - "Failed to close push log event subscription", - err, - ) - } - return - case e, ok := <-sub.Out(): - if !ok { - return - } - select { - case n.pushLogEvent <- e.(EvtReceivedPushLog): - default: - <-n.pushLogEvent - n.pushLogEvent <- e.(EvtReceivedPushLog) - } - } - } - }() -} - -// WaitForPeerConnectionEvent listens to the event channel for a connection event from a given peer. -func (n *Node) WaitForPeerConnectionEvent(id peer.ID) error { - if n.host.Network().Connectedness(id) == network.Connected { - return nil - } - for { - select { - case evt := <-n.peerEvent: - if evt.Peer != id { - continue - } - return nil - case <-time.After(evtWaitTimeout): - return ErrPeerConnectionWaitTimout - case <-n.ctx.Done(): - return nil - } - } -} - -// WaitForPubSubEvent listens to the event channel for pub sub event from a given peer. -func (n *Node) WaitForPubSubEvent(id peer.ID) error { - for { - select { - case evt := <-n.pubSubEvent: - if evt.Peer != id { - continue - } - return nil - case <-time.After(evtWaitTimeout): - return ErrPubSubWaitTimeout - case <-n.ctx.Done(): - return nil - } - } -} - -// WaitForPushLogByPeerEvent listens to the event channel for a push log event by a given peer. -// -// By refers to the log creator. It can be different than the log sender. -// -// It will block the calling thread until an event is yielded to an internal channel. This -// event is not necessarily the next event and is dependent on the number of concurrent callers -// (each event will only notify a single caller, not all of them). -func (n *Node) WaitForPushLogByPeerEvent(id peer.ID) error { - for { - select { - case evt := <-n.pushLogEvent: - if evt.ByPeer != id { - continue - } - return nil - case <-time.After(evtWaitTimeout): - return ErrPushLogWaitTimeout - case <-n.ctx.Done(): - return nil - } - } -} - -// WaitForPushLogFromPeerEvent listens to the event channel for a push log event from a given peer. -// -// From refers to the log sender. It can be different that the log creator. -// -// It will block the calling thread until an event is yielded to an internal channel. This -// event is not necessarily the next event and is dependent on the number of concurrent callers -// (each event will only notify a single caller, not all of them). -func (n *Node) WaitForPushLogFromPeerEvent(id peer.ID) error { - for { - select { - case evt := <-n.pushLogEvent: - if evt.FromPeer != id { - continue - } - return nil - case <-time.After(evtWaitTimeout): - return ErrPushLogWaitTimeout - case <-n.ctx.Done(): - return nil - } - } -} - func newDHT(ctx context.Context, h host.Host, dsb ds.Batching) (*dualdht.DHT, error) { dhtOpts := []dualdht.Option{ dualdht.DHTOption(dht.NamespacedValidator("pk", record.PublicKeyValidator{})), diff --git a/net/node_test.go b/net/node_test.go index 55b0573474..f04e7c6bac 100644 --- a/net/node_test.go +++ b/net/node_test.go @@ -13,9 +13,7 @@ package net import ( "context" "testing" - "time" - "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/peer" badger "github.com/sourcenetwork/badger/v4" "github.com/stretchr/testify/require" @@ -36,7 +34,7 @@ func FixtureNewMemoryDBWithBroadcaster(t *testing.T) client.DB { opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} rootstore, err := badgerds.NewDatastore("", &opts) require.NoError(t, err) - database, err = db.NewDB(ctx, rootstore, acp.NoACP, nil, db.WithUpdateEvents()) + database, err = db.NewDB(ctx, rootstore, acp.NoACP, nil) require.NoError(t, err) return database } @@ -44,7 +42,7 @@ func FixtureNewMemoryDBWithBroadcaster(t *testing.T) client.DB { func TestNewNode_WithEnableRelay_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, nil) require.NoError(t, err) n, err := NewNode( context.Background(), @@ -59,7 +57,7 @@ func TestNewNode_WithDBClosed_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, nil) require.NoError(t, err) db.Close() @@ -73,7 +71,7 @@ func TestNewNode_WithDBClosed_NoError(t *testing.T) { func TestNewNode_NoPubSub_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, nil) require.NoError(t, err) n, err := NewNode( context.Background(), @@ -88,7 +86,7 @@ func TestNewNode_NoPubSub_NoError(t *testing.T) { func TestNewNode_WithEnablePubSub_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, nil) require.NoError(t, err) n, err := NewNode( @@ -106,7 +104,7 @@ func TestNewNode_WithEnablePubSub_NoError(t *testing.T) { func TestNodeClose_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, nil) require.NoError(t, err) n, err := NewNode( context.Background(), @@ -119,7 +117,7 @@ func TestNodeClose_NoError(t *testing.T) { func TestNewNode_BootstrapWithNoPeer_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, nil) require.NoError(t, err) n1, err := NewNode( @@ -135,7 +133,7 @@ func TestNewNode_BootstrapWithNoPeer_NoError(t *testing.T) { func TestNewNode_BootstrapWithOnePeer_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, nil) require.NoError(t, err) n1, err := NewNode( @@ -162,7 +160,7 @@ func TestNewNode_BootstrapWithOnePeer_NoError(t *testing.T) { func TestNewNode_BootstrapWithOneValidPeerAndManyInvalidPeers_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, nil) require.NoError(t, err) n1, err := NewNode( @@ -192,7 +190,7 @@ func TestNewNode_BootstrapWithOneValidPeerAndManyInvalidPeers_NoError(t *testing func TestListenAddrs_WithListenAddresses_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, nil) require.NoError(t, err) n, err := NewNode( context.Background(), @@ -204,370 +202,3 @@ func TestListenAddrs_WithListenAddresses_NoError(t *testing.T) { require.Contains(t, n.ListenAddrs()[0].String(), "/tcp/") } - -func TestPeerConnectionEventEmitter_MultiEvent_NoError(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - context.Background(), - db, - ) - require.NoError(t, err) - defer n.Close() - - emitter, err := n.host.EventBus().Emitter(new(event.EvtPeerConnectednessChanged)) - require.NoError(t, err) - - // the emitter can take 20 events in the channel. This tests what happens whe go over the 20 events. - for i := 0; i < 21; i++ { - err = emitter.Emit(event.EvtPeerConnectednessChanged{}) - require.NoError(t, err) - } -} - -func TestSubscribeToPubSubEvents_SubscriptionError(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - context.Background(), - db, - ) - require.NoError(t, err) - defer n.Close() - - n.Peer.host = &mockHost{n.Peer.host} - - n.subscribeToPubSubEvents() -} - -func TestPubSubEventEmitter_MultiEvent_NoError(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - context.Background(), - db, - ) - require.NoError(t, err) - defer n.Close() - - emitter, err := n.host.EventBus().Emitter(new(EvtPubSub)) - require.NoError(t, err) - - // the emitter can take 20 events in the channel. This tests what happens whe go over the 20 events. - for i := 0; i < 21; i++ { - err = emitter.Emit(EvtPubSub{}) - require.NoError(t, err) - } -} - -func TestSubscribeToPushLogEvents_SubscriptionError(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - context.Background(), - db, - ) - require.NoError(t, err) - defer n.Close() - - n.Peer.host = &mockHost{n.Peer.host} - - n.subscribeToPushLogEvents() -} - -func TestPushLogEventEmitter_SingleEvent_NoError(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - context.Background(), - db, - ) - require.NoError(t, err) - defer n.Close() - - emitter, err := n.host.EventBus().Emitter(new(EvtReceivedPushLog)) - require.NoError(t, err) - - err = emitter.Emit(EvtReceivedPushLog{}) - require.NoError(t, err) -} - -func TestPushLogEventEmitter_MultiEvent_NoError(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - context.Background(), - db, - ) - require.NoError(t, err) - defer n.Close() - - emitter, err := n.host.EventBus().Emitter(new(EvtReceivedPushLog)) - require.NoError(t, err) - - // the emitter can take 20 events in the channel. This tests what happens whe go over the 20 events. - for i := 0; i < 21; i++ { - err = emitter.Emit(EvtReceivedPushLog{}) - require.NoError(t, err) - } -} - -func TestWaitForPeerConnectionEvent_WithSamePeer_NoError(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - context.Background(), - db, - ) - require.NoError(t, err) - defer n.Close() - - emitter, err := n.host.EventBus().Emitter(new(event.EvtPeerConnectednessChanged)) - require.NoError(t, err) - - err = emitter.Emit(event.EvtPeerConnectednessChanged{ - Peer: n.PeerID(), - }) - require.NoError(t, err) - - err = n.WaitForPeerConnectionEvent(n.PeerID()) - require.NoError(t, err) -} - -func TestWaitForPeerConnectionEvent_WithDifferentPeer_TimeoutError(t *testing.T) { - evtWaitTimeout = 100 * time.Microsecond - defer func() { - evtWaitTimeout = 10 * time.Second - }() - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - context.Background(), - db, - ) - require.NoError(t, err) - defer n.Close() - - emitter, err := n.host.EventBus().Emitter(new(event.EvtPeerConnectednessChanged)) - require.NoError(t, err) - - err = emitter.Emit(event.EvtPeerConnectednessChanged{}) - require.NoError(t, err) - - err = n.WaitForPeerConnectionEvent(n.PeerID()) - require.ErrorIs(t, err, ErrPeerConnectionWaitTimout) -} - -func TestWaitForPeerConnectionEvent_WithDifferentPeerAndContextClosed_NoError(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - context.Background(), - db, - ) - require.NoError(t, err) - defer n.Close() - - emitter, err := n.host.EventBus().Emitter(new(event.EvtPeerConnectednessChanged)) - require.NoError(t, err) - - err = emitter.Emit(event.EvtPeerConnectednessChanged{}) - require.NoError(t, err) - - n.cancel() - - err = n.WaitForPeerConnectionEvent(n.PeerID()) - require.NoError(t, err) -} - -func TestWaitForPubSubEvent_WithSamePeer_NoError(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - context.Background(), - db, - ) - require.NoError(t, err) - defer n.Close() - - emitter, err := n.host.EventBus().Emitter(new(EvtPubSub)) - require.NoError(t, err) - - err = emitter.Emit(EvtPubSub{ - Peer: n.PeerID(), - }) - require.NoError(t, err) - - err = n.WaitForPubSubEvent(n.PeerID()) - require.NoError(t, err) -} - -func TestWaitForPubSubEvent_WithDifferentPeer_TimeoutError(t *testing.T) { - evtWaitTimeout = 100 * time.Microsecond - defer func() { - evtWaitTimeout = 10 * time.Second - }() - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - context.Background(), - db, - ) - require.NoError(t, err) - defer n.Close() - - emitter, err := n.host.EventBus().Emitter(new(EvtPubSub)) - require.NoError(t, err) - - err = emitter.Emit(EvtPubSub{}) - require.NoError(t, err) - - err = n.WaitForPubSubEvent(n.PeerID()) - require.ErrorIs(t, err, ErrPubSubWaitTimeout) -} - -func TestWaitForPubSubEvent_WithDifferentPeerAndContextClosed_NoError(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - context.Background(), - db, - ) - require.NoError(t, err) - defer n.Close() - - emitter, err := n.host.EventBus().Emitter(new(EvtPubSub)) - require.NoError(t, err) - - err = emitter.Emit(EvtPubSub{}) - require.NoError(t, err) - - n.cancel() - - err = n.WaitForPubSubEvent(n.PeerID()) - require.NoError(t, err) -} - -func TestWaitForPushLogByPeerEvent_WithSamePeer_NoError(t *testing.T) { - ctx := context.Background() - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - ctx, - db, - ) - require.NoError(t, err) - defer n.Close() - - emitter, err := n.host.EventBus().Emitter(new(EvtReceivedPushLog)) - require.NoError(t, err) - - err = emitter.Emit(EvtReceivedPushLog{ - ByPeer: n.PeerID(), - }) - require.NoError(t, err) - - err = n.WaitForPushLogByPeerEvent(n.PeerID()) - require.NoError(t, err) -} - -func TestWaitForPushLogByPeerEvent_WithDifferentPeer_TimeoutError(t *testing.T) { - evtWaitTimeout = 100 * time.Microsecond - defer func() { - evtWaitTimeout = 10 * time.Second - }() - ctx := context.Background() - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - ctx, - db, - ) - require.NoError(t, err) - defer n.Close() - - emitter, err := n.host.EventBus().Emitter(new(EvtReceivedPushLog)) - require.NoError(t, err) - - err = emitter.Emit(EvtReceivedPushLog{}) - require.NoError(t, err) - - err = n.WaitForPushLogByPeerEvent(n.PeerID()) - require.ErrorIs(t, err, ErrPushLogWaitTimeout) -} - -func TestWaitForPushLogByPeerEvent_WithDifferentPeerAndContextClosed_NoError(t *testing.T) { - ctx := context.Background() - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - ctx, - db, - ) - require.NoError(t, err) - defer n.Close() - - emitter, err := n.host.EventBus().Emitter(new(EvtReceivedPushLog)) - require.NoError(t, err) - - err = emitter.Emit(EvtReceivedPushLog{}) - require.NoError(t, err) - - n.cancel() - - err = n.WaitForPushLogByPeerEvent(n.PeerID()) - require.NoError(t, err) -} - -func TestWaitForPushLogFromPeerEvent_WithSamePeer_NoError(t *testing.T) { - ctx := context.Background() - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - ctx, - db, - ) - require.NoError(t, err) - defer n.Close() - - emitter, err := n.host.EventBus().Emitter(new(EvtReceivedPushLog)) - require.NoError(t, err) - - err = emitter.Emit(EvtReceivedPushLog{ - FromPeer: n.PeerID(), - }) - require.NoError(t, err) - - err = n.WaitForPushLogFromPeerEvent(n.PeerID()) - require.NoError(t, err) -} - -func TestWaitForPushLogFromPeerEvent_WithDifferentPeer_TimeoutError(t *testing.T) { - evtWaitTimeout = 100 * time.Microsecond - defer func() { - evtWaitTimeout = 10 * time.Second - }() - ctx := context.Background() - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - ctx, - db, - ) - require.NoError(t, err) - defer n.Close() - - emitter, err := n.host.EventBus().Emitter(new(EvtReceivedPushLog)) - require.NoError(t, err) - - err = emitter.Emit(EvtReceivedPushLog{}) - require.NoError(t, err) - - err = n.WaitForPushLogFromPeerEvent(n.PeerID()) - require.ErrorIs(t, err, ErrPushLogWaitTimeout) -} - -func TestWaitForPushLogFromPeerEvent_WithDifferentPeerAndContextClosed_NoError(t *testing.T) { - ctx := context.Background() - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - ctx, - db, - ) - require.NoError(t, err) - defer n.Close() - - emitter, err := n.host.EventBus().Emitter(new(EvtReceivedPushLog)) - require.NoError(t, err) - - err = emitter.Emit(EvtReceivedPushLog{}) - require.NoError(t, err) - - n.cancel() - - err = n.WaitForPushLogFromPeerEvent(n.PeerID()) - require.NoError(t, err) -} diff --git a/net/peer.go b/net/peer.go index a6ca285e2c..adb749de70 100644 --- a/net/peer.go +++ b/net/peer.go @@ -35,7 +35,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/event" "github.com/sourcenetwork/defradb/internal/core" corenet "github.com/sourcenetwork/defradb/internal/core/net" "github.com/sourcenetwork/defradb/internal/merkle/clock" @@ -47,8 +47,8 @@ import ( type Peer struct { //config?? - db client.DB - updateChannel chan events.Update + db client.DB + updateSub *event.Subscription host host.Host dht routing.Routing @@ -142,16 +142,11 @@ func (p *Peer) Start() error { } if p.ps != nil { - if !p.db.Events().Updates.HasValue() { - return ErrNilUpdateChannel - } - - updateChannel, err := p.db.Events().Updates.Value().Subscribe() + sub, err := p.db.Events().Subscribe(event.UpdateName) if err != nil { return err } - p.updateChannel = updateChannel - + p.updateSub = sub log.InfoContext(p.ctx, "Starting internal broadcaster for pubsub network") go p.handleBroadcastLoop() } @@ -186,22 +181,9 @@ func (p *Peer) Close() { } } stopGRPCServer(p.ctx, p.p2pRPC) - // stopGRPCServer(p.tcpRPC) - // close event emitters - if p.server.pubSubEmitter != nil { - if err := p.server.pubSubEmitter.Close(); err != nil { - log.ErrorContextE(p.ctx, "Could not close pubsub event emitter", err) - } - } - if p.server.pushLogEmitter != nil { - if err := p.server.pushLogEmitter.Close(); err != nil { - log.ErrorContextE(p.ctx, "Could not close push log event emitter", err) - } - } - - if p.db.Events().Updates.HasValue() { - p.db.Events().Updates.Value().Unsubscribe(p.updateChannel) + if p.updateSub != nil { + p.db.Events().Unsubscribe(p.updateSub) } if err := p.bserv.Close(); err != nil { @@ -219,10 +201,14 @@ func (p *Peer) Close() { // from the internal broadcaster to the external pubsub network func (p *Peer) handleBroadcastLoop() { for { - update, isOpen := <-p.updateChannel + msg, isOpen := <-p.updateSub.Message() if !isOpen { return } + update, ok := msg.Data.(event.Update) + if !ok { + continue // ignore invalid value + } var err error if update.IsCreate { @@ -311,7 +297,7 @@ func (p *Peer) pushToReplicator( continue } - evt := events.Update{ + evt := event.Update{ DocID: docIDResult.ID.String(), Cid: c, SchemaRoot: collection.SchemaRoot(), @@ -378,7 +364,7 @@ func (p *Peer) loadP2PCollections(ctx context.Context) (map[string]struct{}, err return colMap, nil } -func (p *Peer) handleDocCreateLog(evt events.Update) error { +func (p *Peer) handleDocCreateLog(evt event.Update) error { docID, err := client.NewDocIDFromString(evt.DocID) if err != nil { return NewErrFailedToGetDocID(err) @@ -396,7 +382,7 @@ func (p *Peer) handleDocCreateLog(evt events.Update) error { return nil } -func (p *Peer) handleDocUpdateLog(evt events.Update) error { +func (p *Peer) handleDocUpdateLog(evt event.Update) error { docID, err := client.NewDocIDFromString(evt.DocID) if err != nil { return NewErrFailedToGetDocID(err) @@ -429,7 +415,7 @@ func (p *Peer) handleDocUpdateLog(evt events.Update) error { return nil } -func (p *Peer) pushLogToReplicators(lg events.Update) { +func (p *Peer) pushLogToReplicators(lg event.Update) { // push to each peer (replicator) peers := make(map[string]struct{}) for _, peer := range p.ps.ListPeers(lg.DocID) { @@ -488,15 +474,6 @@ func stopGRPCServer(ctx context.Context, server *grpc.Server) { } } -type EvtReceivedPushLog struct { - ByPeer peer.ID - FromPeer peer.ID -} - -type EvtPubSub struct { - Peer peer.ID -} - // rollbackAddPubSubTopics removes the given topics from the pubsub system. func (p *Peer) rollbackAddPubSubTopics(topics []string, cause error) error { for _, topic := range topics { diff --git a/net/peer_test.go b/net/peer_test.go index eff618dc01..6f6fda67ad 100644 --- a/net/peer_test.go +++ b/net/peer_test.go @@ -32,7 +32,7 @@ import ( acpIdentity "github.com/sourcenetwork/defradb/acp/identity" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore/memory" - "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/event" coreblock "github.com/sourcenetwork/defradb/internal/core/block" "github.com/sourcenetwork/defradb/internal/core/crdt" "github.com/sourcenetwork/defradb/internal/db" @@ -75,7 +75,7 @@ func newTestNode(ctx context.Context, t *testing.T) (client.DB, *Node) { store := memory.NewDatastore(ctx) acpLocal := acp.NewLocalACP() acpLocal.Init(context.Background(), "") - db, err := db.NewDB(ctx, store, immutable.Some[acp.ACP](acpLocal), nil, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, immutable.Some[acp.ACP](acpLocal), nil) require.NoError(t, err) n, err := NewNode( @@ -91,7 +91,7 @@ func newTestNode(ctx context.Context, t *testing.T) (client.DB, *Node) { func TestNewPeer_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, nil) require.NoError(t, err) h, err := libp2p.New() @@ -114,7 +114,7 @@ func TestNewPeer_NoDB_NilDBError(t *testing.T) { func TestNewPeer_WithExistingTopic_TopicAlreadyExistsError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, store, acp.NoACP, nil) require.NoError(t, err) _, err = db.AddSchema(ctx, `type User { @@ -164,11 +164,11 @@ func TestStartAndClose_NoError(t *testing.T) { func TestStart_WithKnownPeer_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db1, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) + db1, err := db.NewDB(ctx, store, acp.NoACP, nil) require.NoError(t, err) store2 := memory.NewDatastore(ctx) - db2, err := db.NewDB(ctx, store2, acp.NoACP, nil, db.WithUpdateEvents()) + db2, err := db.NewDB(ctx, store2, acp.NoACP, nil) require.NoError(t, err) n1, err := NewNode( @@ -200,11 +200,11 @@ func TestStart_WithKnownPeer_NoError(t *testing.T) { func TestStart_WithOfflineKnownPeer_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) - db1, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) + db1, err := db.NewDB(ctx, store, acp.NoACP, nil) require.NoError(t, err) store2 := memory.NewDatastore(ctx) - db2, err := db.NewDB(ctx, store2, acp.NoACP, nil, db.WithUpdateEvents()) + db2, err := db.NewDB(ctx, store2, acp.NoACP, nil) require.NoError(t, err) n1, err := NewNode( @@ -237,46 +237,6 @@ func TestStart_WithOfflineKnownPeer_NoError(t *testing.T) { db2.Close() } -func TestStart_WithNoUpdateChannel_NilUpdateChannelError(t *testing.T) { - ctx := context.Background() - store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP, nil) - require.NoError(t, err) - - n, err := NewNode( - ctx, - db, - WithEnablePubSub(true), - ) - require.NoError(t, err) - - err = n.Start() - require.ErrorIs(t, err, ErrNilUpdateChannel) - - db.Close() -} - -func TestStart_WitClosedUpdateChannel_ClosedChannelError(t *testing.T) { - ctx := context.Background() - store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, acp.NoACP, nil, db.WithUpdateEvents()) - require.NoError(t, err) - - n, err := NewNode( - ctx, - db, - WithEnablePubSub(true), - ) - require.NoError(t, err) - - db.Events().Updates.Value().Close() - - err = n.Start() - require.ErrorContains(t, err, "cannot subscribe to a closed channel") - - db.Close() -} - func TestRegisterNewDocument_NoError(t *testing.T) { ctx := context.Background() db, n := newTestNode(ctx, t) @@ -934,7 +894,7 @@ func TestHandleDocCreateLog_NoError(t *testing.T) { b, err := db.Blockstore().AsIPLDStorage().Get(ctx, headCID.KeyString()) require.NoError(t, err) - err = n.handleDocCreateLog(events.Update{ + err = n.handleDocCreateLog(event.Update{ DocID: doc.ID().String(), Cid: headCID, SchemaRoot: col.SchemaRoot(), @@ -948,7 +908,7 @@ func TestHandleDocCreateLog_WithInvalidDocID_NoError(t *testing.T) { _, n := newTestNode(ctx, t) defer n.Close() - err := n.handleDocCreateLog(events.Update{ + err := n.handleDocCreateLog(event.Update{ DocID: "some-invalid-key", }) require.ErrorContains(t, err, "failed to get DocID from broadcast message: selected encoding not supported") @@ -977,7 +937,7 @@ func TestHandleDocCreateLog_WithExistingTopic_TopicExistsError(t *testing.T) { _, err = rpc.NewTopic(ctx, n.ps, n.host.ID(), doc.ID().String(), true) require.NoError(t, err) - err = n.handleDocCreateLog(events.Update{ + err = n.handleDocCreateLog(event.Update{ DocID: doc.ID().String(), SchemaRoot: col.SchemaRoot(), }) @@ -1010,7 +970,7 @@ func TestHandleDocUpdateLog_NoError(t *testing.T) { b, err := db.Blockstore().AsIPLDStorage().Get(ctx, headCID.KeyString()) require.NoError(t, err) - err = n.handleDocUpdateLog(events.Update{ + err = n.handleDocUpdateLog(event.Update{ DocID: doc.ID().String(), Cid: headCID, SchemaRoot: col.SchemaRoot(), @@ -1024,7 +984,7 @@ func TestHandleDoUpdateLog_WithInvalidDocID_NoError(t *testing.T) { _, n := newTestNode(ctx, t) defer n.Close() - err := n.handleDocUpdateLog(events.Update{ + err := n.handleDocUpdateLog(event.Update{ DocID: "some-invalid-key", }) require.ErrorContains(t, err, "failed to get DocID from broadcast message: selected encoding not supported") @@ -1059,7 +1019,7 @@ func TestHandleDocUpdateLog_WithExistingDocIDTopic_TopicExistsError(t *testing.T _, err = rpc.NewTopic(ctx, n.ps, n.host.ID(), doc.ID().String(), true) require.NoError(t, err) - err = n.handleDocUpdateLog(events.Update{ + err = n.handleDocUpdateLog(event.Update{ DocID: doc.ID().String(), Cid: headCID, SchemaRoot: col.SchemaRoot(), @@ -1097,7 +1057,7 @@ func TestHandleDocUpdateLog_WithExistingSchemaTopic_TopicExistsError(t *testing. _, err = rpc.NewTopic(ctx, n.ps, n.host.ID(), col.SchemaRoot(), true) require.NoError(t, err) - err = n.handleDocUpdateLog(events.Update{ + err = n.handleDocUpdateLog(event.Update{ DocID: doc.ID().String(), Cid: headCID, SchemaRoot: col.SchemaRoot(), diff --git a/net/server.go b/net/server.go index 0eb0d27058..413f391064 100644 --- a/net/server.go +++ b/net/server.go @@ -18,7 +18,6 @@ import ( "sync" cid "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p/core/event" libpeer "github.com/libp2p/go-libp2p/core/peer" "github.com/sourcenetwork/corelog" rpc "github.com/sourcenetwork/go-libp2p-pubsub-rpc" @@ -29,7 +28,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/event" coreblock "github.com/sourcenetwork/defradb/internal/core/block" pb "github.com/sourcenetwork/defradb/net/pb" ) @@ -48,9 +47,6 @@ type server struct { conns map[libpeer.ID]*grpc.ClientConn - pubSubEmitter event.Emitter - pushLogEmitter event.Emitter - pb.UnimplementedServiceServer } @@ -110,16 +106,6 @@ func newServer(p *Peer, opts ...grpc.DialOption) (*server, error) { } } - var err error - s.pubSubEmitter, err = s.peer.host.EventBus().Emitter(new(EvtPubSub)) - if err != nil { - log.ErrorContextE(s.peer.ctx, "could not create event emitter", err) - } - s.pushLogEmitter, err = s.peer.host.EventBus().Emitter(new(EvtReceivedPushLog)) - if err != nil { - log.ErrorContextE(s.peer.ctx, "could not create event emitter", err) - } - return s, nil } @@ -158,45 +144,26 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL if err != nil { return nil, err } + byPeer, err := libpeer.Decode(req.Body.Creator) + if err != nil { + return nil, err + } block, err := coreblock.GetFromBytes(req.Body.Log.Block) if err != nil { return nil, err } - - defer func() { - if s.pushLogEmitter != nil { - byPeer, err := libpeer.Decode(req.Body.Creator) - if err != nil { - log.ErrorContextE(ctx, "could not decode the PeerID of the log creator", err) - } - err = s.pushLogEmitter.Emit(EvtReceivedPushLog{ - FromPeer: pid, - ByPeer: byPeer, - }) - if err != nil { - // logging instead of returning an error because the event bus should - // not break the PushLog execution. - log.ErrorContextE(ctx, "could not emit push log event", err) - } - } - }() - err = syncDAG(ctx, s.peer.bserv, block) if err != nil { return nil, err } - if s.peer.db.Events().DAGMerges.HasValue() { - wg := &sync.WaitGroup{} - wg.Add(1) - s.peer.db.Events().DAGMerges.Value().Publish(events.DAGMerge{ - DocID: docID.String(), - Cid: headCID, - SchemaRoot: string(req.Body.SchemaRoot), - Wg: wg, - }) - wg.Wait() - } + s.peer.db.Events().Publish(event.NewMessage(event.MergeName, event.Merge{ + DocID: docID.String(), + ByPeer: byPeer, + FromPeer: pid, + Cid: headCID, + SchemaRoot: string(req.Body.SchemaRoot), + })) // Once processed, subscribe to the DocID topic on the pubsub network unless we already // suscribe to the collection. @@ -343,15 +310,10 @@ func (s *server) pubSubEventHandler(from libpeer.ID, topic string, msg []byte) { corelog.String("Topic", topic), corelog.String("Message", string(msg)), ) - - if s.pubSubEmitter != nil { - err := s.pubSubEmitter.Emit(EvtPubSub{ - Peer: from, - }) - if err != nil { - log.ErrorContextE(s.peer.ctx, "could not emit pubsub event", err) - } - } + evt := event.NewMessage(event.PubSubName, event.PubSub{ + Peer: from, + }) + s.peer.db.Events().Publish(evt) } // addr implements net.Addr and holds a libp2p peer ID. @@ -375,18 +337,3 @@ func peerIDFromContext(ctx context.Context) (libpeer.ID, error) { } return pid, nil } - -// KEEPING AS REFERENCE -// -// logFromProto returns a thread log from a proto log. -// func logFromProto(l *pb.Log) thread.LogInfo { -// return thread.LogInfo{ -// ID: l.ID.ID, -// PubKey: l.PubKey.PubKey, -// Addrs: addrsFromProto(l.Addrs), -// Head: thread.Head{ -// ID: l.Head.Cid, -// Counter: l.Counter, -// }, -// } -// } diff --git a/net/sync_dag.go b/net/sync_dag.go index 6e9801ebd7..c15ebb7552 100644 --- a/net/sync_dag.go +++ b/net/sync_dag.go @@ -43,7 +43,10 @@ func syncDAG(ctx context.Context, bserv blockservice.BlockService, block *corebl ctx, cancel := context.WithTimeout(ctx, syncDAGTimeout) defer cancel() + // use a session to make remote fetches more efficient + ctx = blockservice.ContextWithSession(ctx, bserv) store := &bsrvadapter.Adapter{Wrapped: bserv} + lsys := cidlink.DefaultLinkSystem() lsys.SetWriteStorage(store) lsys.SetReadStorage(store) diff --git a/tests/clients/cli/wrapper.go b/tests/clients/cli/wrapper.go index 25e4c177bf..18e306c0f4 100644 --- a/tests/clients/cli/wrapper.go +++ b/tests/clients/cli/wrapper.go @@ -29,7 +29,7 @@ import ( "github.com/sourcenetwork/defradb/cli" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/event" "github.com/sourcenetwork/defradb/http" "github.com/sourcenetwork/defradb/net" ) @@ -525,7 +525,7 @@ func (w *Wrapper) Close() { w.node.Close() } -func (w *Wrapper) Events() events.Events { +func (w *Wrapper) Events() *event.Bus { return w.node.Events() } @@ -540,11 +540,3 @@ func (w *Wrapper) PrintDump(ctx context.Context) error { func (w *Wrapper) Bootstrap(addrs []peer.AddrInfo) { w.node.Bootstrap(addrs) } - -func (w *Wrapper) WaitForPushLogByPeerEvent(id peer.ID) error { - return w.node.WaitForPushLogByPeerEvent(id) -} - -func (w *Wrapper) WaitForPushLogFromPeerEvent(id peer.ID) error { - return w.node.WaitForPushLogFromPeerEvent(id) -} diff --git a/tests/clients/clients.go b/tests/clients/clients.go index 10df14212f..249b1e767f 100644 --- a/tests/clients/clients.go +++ b/tests/clients/clients.go @@ -21,6 +21,4 @@ import ( type Client interface { client.P2P Bootstrap([]peer.AddrInfo) - WaitForPushLogByPeerEvent(peer.ID) error - WaitForPushLogFromPeerEvent(peer.ID) error } diff --git a/tests/clients/http/wrapper.go b/tests/clients/http/wrapper.go index 4727542cce..89b5bce5e7 100644 --- a/tests/clients/http/wrapper.go +++ b/tests/clients/http/wrapper.go @@ -22,7 +22,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/event" "github.com/sourcenetwork/defradb/http" "github.com/sourcenetwork/defradb/net" ) @@ -221,7 +221,7 @@ func (w *Wrapper) Close() { w.node.Close() } -func (w *Wrapper) Events() events.Events { +func (w *Wrapper) Events() *event.Bus { return w.node.Events() } @@ -236,11 +236,3 @@ func (w *Wrapper) PrintDump(ctx context.Context) error { func (w *Wrapper) Bootstrap(addrs []peer.AddrInfo) { w.node.Bootstrap(addrs) } - -func (w *Wrapper) WaitForPushLogByPeerEvent(id peer.ID) error { - return w.node.WaitForPushLogByPeerEvent(id) -} - -func (w *Wrapper) WaitForPushLogFromPeerEvent(id peer.ID) error { - return w.node.WaitForPushLogFromPeerEvent(id) -} diff --git a/tests/integration/db.go b/tests/integration/db.go index 1e8fe82731..ab15e2d5fc 100644 --- a/tests/integration/db.go +++ b/tests/integration/db.go @@ -19,7 +19,6 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/crypto" - "github.com/sourcenetwork/defradb/internal/db" "github.com/sourcenetwork/defradb/node" changeDetector "github.com/sourcenetwork/defradb/tests/change_detector" ) @@ -73,7 +72,6 @@ func init() { func NewBadgerMemoryDB(ctx context.Context) (client.DB, error) { opts := []node.Option{ node.WithInMemory(true), - db.WithUpdateEvents(), } node, err := node.NewNode(ctx, opts...) @@ -104,8 +102,6 @@ func NewBadgerFileDB(ctx context.Context, t testing.TB) (client.DB, error) { // select the datastore implementation to use. func setupDatabase(s *state) (client.DB, string, error) { opts := []node.Option{ - db.WithUpdateEvents(), - db.WithDAGMergeEvents(), node.WithLensPoolSize(lensPoolSize), // The test framework sets this up elsewhere when required so that it may be wrapped // into a [client.DB]. diff --git a/tests/integration/events/utils.go b/tests/integration/events/utils.go index eb514bce2b..4988f25a66 100644 --- a/tests/integration/events/utils.go +++ b/tests/integration/events/utils.go @@ -20,6 +20,7 @@ import ( "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/event" testUtils "github.com/sourcenetwork/defradb/tests/integration" ) @@ -79,14 +80,20 @@ func ExecuteRequestTestCase( testRoutineClosedChan := make(chan struct{}) closeTestRoutineChan := make(chan struct{}) - eventsChan, err := db.Events().Updates.Value().Subscribe() + + eventsSub, err := db.Events().Subscribe(event.UpdateName) require.NoError(t, err) indexOfNextExpectedUpdate := 0 go func() { for { select { - case update := <-eventsChan: + case value := <-eventsSub.Message(): + update, ok := value.Data.(event.Update) + if !ok { + continue // ignore invalid value + } + if indexOfNextExpectedUpdate >= len(testCase.ExpectedUpdates) { assert.Fail(t, "More events recieved than were expected", update) testRoutineClosedChan <- struct{}{} diff --git a/tests/integration/p2p.go b/tests/integration/p2p.go index 0cace429ae..d990a3d322 100644 --- a/tests/integration/p2p.go +++ b/tests/integration/p2p.go @@ -14,8 +14,8 @@ import ( "time" "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/event" "github.com/sourcenetwork/defradb/net" - "github.com/sourcenetwork/defradb/tests/clients" "github.com/libp2p/go-libp2p/core/peer" "github.com/sourcenetwork/corelog" @@ -162,22 +162,17 @@ func connectPeers( // allowed to complete before documentation begins or it will not even try and sync it. So for now, we // sleep a little. time.Sleep(100 * time.Millisecond) - setupPeerWaitSync(s, 0, cfg, sourceNode, targetNode) + setupPeerWaitSync(s, 0, cfg) } func setupPeerWaitSync( s *state, startIndex int, cfg ConnectPeers, - sourceNode clients.Client, - targetNode clients.Client, ) { sourceToTargetEvents := []int{0} targetToSourceEvents := []int{0} - sourcePeerInfo := sourceNode.PeerInfo() - targetPeerInfo := targetNode.PeerInfo() - nodeCollections := map[int][]int{} waitIndex := 0 for i := startIndex; i < len(s.testCase.Actions); i++ { @@ -258,24 +253,7 @@ func setupPeerWaitSync( } nodeSynced := make(chan struct{}) - ready := make(chan struct{}) - go func(ready chan struct{}) { - ready <- struct{}{} - for waitIndex := 0; waitIndex < len(sourceToTargetEvents); waitIndex++ { - for i := 0; i < targetToSourceEvents[waitIndex]; i++ { - err := sourceNode.WaitForPushLogByPeerEvent(targetPeerInfo.ID) - require.NoError(s.t, err) - } - for i := 0; i < sourceToTargetEvents[waitIndex]; i++ { - err := targetNode.WaitForPushLogByPeerEvent(sourcePeerInfo.ID) - require.NoError(s.t, err) - } - nodeSynced <- struct{}{} - } - }(ready) - // Ensure that the wait routine is ready to receive events before we continue. - <-ready - + go waitForMerge(s, cfg.SourceNodeID, cfg.TargetNodeID, sourceToTargetEvents, targetToSourceEvents, nodeSynced) s.syncChans = append(s.syncChans, nodeSynced) } @@ -317,7 +295,7 @@ func configureReplicator( expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, cfg.ExpectedError) assertExpectedErrorRaised(s.t, s.testCase.Description, cfg.ExpectedError, expectedErrorRaised) if err == nil { - setupReplicatorWaitSync(s, 0, cfg, sourceNode, targetNode) + setupReplicatorWaitSync(s, 0, cfg) } } @@ -338,15 +316,10 @@ func setupReplicatorWaitSync( s *state, startIndex int, cfg ConfigureReplicator, - sourceNode clients.Client, - targetNode clients.Client, ) { sourceToTargetEvents := []int{0} targetToSourceEvents := []int{0} - sourcePeerInfo := sourceNode.PeerInfo() - targetPeerInfo := targetNode.PeerInfo() - docIDsSyncedToSource := map[int]struct{}{} waitIndex := 0 currentDocID := 0 @@ -393,24 +366,7 @@ func setupReplicatorWaitSync( } nodeSynced := make(chan struct{}) - ready := make(chan struct{}) - go func(ready chan struct{}) { - ready <- struct{}{} - for waitIndex := 0; waitIndex < len(sourceToTargetEvents); waitIndex++ { - for i := 0; i < targetToSourceEvents[waitIndex]; i++ { - err := sourceNode.WaitForPushLogByPeerEvent(targetPeerInfo.ID) - require.NoError(s.t, err) - } - for i := 0; i < sourceToTargetEvents[waitIndex]; i++ { - err := targetNode.WaitForPushLogByPeerEvent(sourcePeerInfo.ID) - require.NoError(s.t, err) - } - nodeSynced <- struct{}{} - } - }(ready) - // Ensure that the wait routine is ready to receive events before we continue. - <-ready - + go waitForMerge(s, cfg.SourceNodeID, cfg.TargetNodeID, sourceToTargetEvents, targetToSourceEvents, nodeSynced) s.syncChans = append(s.syncChans, nodeSynced) } @@ -532,6 +488,46 @@ func waitForSync( } } +// waitForMerge waits for the source and target nodes to synchronize their state +// by listening to merge events sent from the network subsystem on the event bus. +// +// sourceToTargetEvents and targetToSourceEvents are slices containing the number +// of expected merge events to be received after each test action has executed. +func waitForMerge( + s *state, + sourceNodeID int, + targetNodeID int, + sourceToTargetEvents []int, + targetToSourceEvents []int, + nodeSynced chan struct{}, +) { + sourceSub := s.eventSubs[sourceNodeID] + targetSub := s.eventSubs[targetNodeID] + + sourcePeerInfo := s.nodeAddresses[sourceNodeID] + targetPeerInfo := s.nodeAddresses[targetNodeID] + + for waitIndex := 0; waitIndex < len(sourceToTargetEvents); waitIndex++ { + for i := 0; i < targetToSourceEvents[waitIndex]; i++ { + // wait for message or unsubscribe + msg, ok := <-sourceSub.Message() + if ok { + // ensure the message is sent from the target node + require.Equal(s.t, targetPeerInfo.ID, msg.Data.(event.Merge).ByPeer) + } + } + for i := 0; i < sourceToTargetEvents[waitIndex]; i++ { + // wait for message or unsubscribe + msg, ok := <-targetSub.Message() + if ok { + // ensure the message is sent from the source node + require.Equal(s.t, sourcePeerInfo.ID, msg.Data.(event.Merge).ByPeer) + } + } + nodeSynced <- struct{}{} + } +} + func RandomNetworkingConfig() ConfigureNode { return func() []net.NodeOpt { return []net.NodeOpt{ diff --git a/tests/integration/state.go b/tests/integration/state.go index c92e3710ba..47affa8160 100644 --- a/tests/integration/state.go +++ b/tests/integration/state.go @@ -18,6 +18,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/event" "github.com/sourcenetwork/defradb/net" "github.com/sourcenetwork/defradb/tests/clients" ) @@ -52,6 +53,9 @@ type state struct { // These synchronisation channels allow async actions to track their completion. syncChans []chan struct{} + // eventSubs is a list of all event subscriptions + eventSubs []*event.Subscription + // The addresses of any nodes configured. nodeAddresses []peer.AddrInfo @@ -104,6 +108,7 @@ func newState( allActionsDone: make(chan struct{}), subscriptionResultsChans: []chan func(){}, syncChans: []chan struct{}{}, + eventSubs: []*event.Subscription{}, nodeAddresses: []peer.AddrInfo{}, nodeConfigs: [][]net.NodeOpt{}, nodes: []clients.Client{}, diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index f7bceebd19..5e9d089ccd 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -32,6 +32,7 @@ import ( "github.com/sourcenetwork/defradb/datastore" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/event" "github.com/sourcenetwork/defradb/internal/db" "github.com/sourcenetwork/defradb/internal/request/graphql" "github.com/sourcenetwork/defradb/net" @@ -707,6 +708,11 @@ func restartNodes( c, err := setupClient(s, n) require.NoError(s.t, err) s.nodes[i] = c + + // subscribe to merge complete events + sub, err := c.Events().Subscribe(event.MergeCompleteName) + require.NoError(s.t, err) + s.eventSubs[i] = sub } // The index of the action after the last wait action before the current restart action. @@ -728,15 +734,11 @@ actionLoop: case ConnectPeers: // Give the nodes a chance to connect to each other and learn about each other's subscribed topics. time.Sleep(100 * time.Millisecond) - setupPeerWaitSync( - s, waitGroupStartIndex, action, s.nodes[action.SourceNodeID], s.nodes[action.TargetNodeID], - ) + setupPeerWaitSync(s, waitGroupStartIndex, action) case ConfigureReplicator: // Give the nodes a chance to connect to each other and learn about each other's subscribed topics. time.Sleep(100 * time.Millisecond) - setupReplicatorWaitSync( - s, waitGroupStartIndex, action, s.nodes[action.SourceNodeID], s.nodes[action.TargetNodeID], - ) + setupReplicatorWaitSync(s, waitGroupStartIndex, action) } } @@ -812,6 +814,11 @@ func configureNode( s.nodes = append(s.nodes, c) s.dbPaths = append(s.dbPaths, path) + + // subscribe to merge complete events + sub, err := c.Events().Subscribe(event.MergeCompleteName) + require.NoError(s.t, err) + s.eventSubs = append(s.eventSubs, sub) } func refreshDocuments( From c1b612ea28b866093da2d7d1f473ea9de1072a93 Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Thu, 20 Jun 2024 10:32:34 -0700 Subject: [PATCH 59/78] docs(i): Update contributors (#2743) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index db0a3598b7..be76a151ff 100644 --- a/README.md +++ b/README.md @@ -479,5 +479,6 @@ DefraDB's code is released under the [Business Source License (BSL)](licenses/BS - Orpheus Lummis ([@orpheuslummis](https://github.com/orpheuslummis)) - Fred Carle ([@fredcarle](https://github.com/fredcarle)) - Islam Aliev ([@islamaliev](https://github.com/islamaliev)) +- Keenan Nemetz ([@nasdf](https://github.com/nasdf)) You are invited to contribute to DefraDB. Follow the [Contributing guide](./CONTRIBUTING.md) to get started. From 77af4fc83dbebd7bbd3b2908e42fe3c54f1e1b13 Mon Sep 17 00:00:00 2001 From: Fred Carle Date: Thu, 20 Jun 2024 22:42:51 -0400 Subject: [PATCH 60/78] fix: Use node representation for Block (#2746) ## Relevant issue(s) Resolves #2745 ## Description This PR ensures that the node being passed to the link system is in it's representation form. This will allow the use of optional fields. Node that the 2 added `block` unit tests are there to cover when the optional `isEncrypted` field will be added to `Block` ipld schema. --- .../i2746-use-node-representation.md | 2 + internal/core/block/block.go | 15 ++-- internal/core/block/block_test.go | 64 ++++++++++++++-- internal/db/merge_test.go | 2 +- net/sync_dag.go | 2 +- .../events/simple/with_update_test.go | 4 +- .../mutation/create/with_version_test.go | 2 +- .../integration/query/commits/simple_test.go | 44 +++++------ .../query/commits/with_cid_test.go | 8 +- .../query/commits/with_depth_test.go | 34 ++++----- .../query/commits/with_doc_id_cid_test.go | 4 +- .../query/commits/with_doc_id_count_test.go | 6 +- .../query/commits/with_doc_id_field_test.go | 4 +- .../commits/with_doc_id_limit_offset_test.go | 4 +- .../query/commits/with_doc_id_limit_test.go | 4 +- .../with_doc_id_order_limit_offset_test.go | 4 +- .../query/commits/with_doc_id_order_test.go | 74 +++++++++---------- .../query/commits/with_doc_id_test.go | 46 ++++++------ .../commits/with_doc_id_typename_test.go | 6 +- .../query/commits/with_field_test.go | 6 +- .../query/commits/with_group_test.go | 16 ++-- .../latest_commits/with_doc_id_field_test.go | 8 +- .../query/latest_commits/with_doc_id_test.go | 8 +- .../query/one_to_many/with_cid_doc_id_test.go | 8 +- .../query/simple/with_cid_doc_id_test.go | 18 ++--- .../query/simple/with_version_test.go | 30 ++++---- 26 files changed, 240 insertions(+), 183 deletions(-) create mode 100644 docs/data_format_changes/i2746-use-node-representation.md diff --git a/docs/data_format_changes/i2746-use-node-representation.md b/docs/data_format_changes/i2746-use-node-representation.md new file mode 100644 index 0000000000..c701aed809 --- /dev/null +++ b/docs/data_format_changes/i2746-use-node-representation.md @@ -0,0 +1,2 @@ +# Use node representation +To enable the use of optional IPLD schema fields, we change to using the node representation when saving with the link system. \ No newline at end of file diff --git a/internal/core/block/block.go b/internal/core/block/block.go index c9a3f629c2..8482a23d91 100644 --- a/internal/core/block/block.go +++ b/internal/core/block/block.go @@ -30,7 +30,7 @@ import ( // Schema is the IPLD schema type that represents a `Block`. var ( Schema schema.Type - SchemaPrototype schema.TypedPrototype + SchemaPrototype ipld.NodePrototype ) func init() { @@ -49,7 +49,7 @@ type schemaDefinition interface { IPLDSchemaBytes() []byte } -func mustSetSchema(schemas ...schemaDefinition) (schema.Type, schema.TypedPrototype) { +func mustSetSchema(schemas ...schemaDefinition) (schema.Type, ipld.NodePrototype) { schemaBytes := make([][]byte, 0, len(schemas)) for _, s := range schemas { schemaBytes = append(schemaBytes, s.IPLDSchemaBytes()) @@ -66,7 +66,7 @@ func mustSetSchema(schemas ...schemaDefinition) (schema.Type, schema.TypedProtot // If [Block] and `blockSchematype` do not match, this will panic. proto := bindnode.Prototype(&Block{}, blockSchemaType) - return blockSchemaType, proto + return blockSchemaType, proto.Representation() } // DAGLink represents a link to another object in a DAG. @@ -201,9 +201,9 @@ func (block *Block) Unmarshal(b []byte) error { return nil } -// GenerateNode generates an IPLD node from the block. +// GenerateNode generates an IPLD node from the block in its representation form. func (block *Block) GenerateNode() (node ipld.Node) { - return bindnode.Wrap(block, Schema) + return bindnode.Wrap(block, Schema).Representation() } // GetLinkByName returns the link by name. It will return false if the link does not exist. @@ -219,11 +219,14 @@ func (block *Block) GetLinkByName(name string) (cidlink.Link, bool) { // GenerateLink generates a cid link for the block. func (block *Block) GenerateLink() (cidlink.Link, error) { node := bindnode.Wrap(block, Schema) - return GetLinkFromNode(node) + return GetLinkFromNode(node.Representation()) } // GetLinkFromNode returns the cid link from the node. func GetLinkFromNode(node ipld.Node) (cidlink.Link, error) { + if typedNode, ok := node.(schema.TypedNode); ok { + node = typedNode.Representation() + } lsys := cidlink.DefaultLinkSystem() link, err := lsys.ComputeLink(GetLinkPrototype(), node) if err != nil { diff --git a/internal/core/block/block_test.go b/internal/core/block/block_test.go index 75a6ce1780..5b68cf9067 100644 --- a/internal/core/block/block_test.go +++ b/internal/core/block/block_test.go @@ -17,6 +17,7 @@ import ( "github.com/ipld/go-ipld-prime/linking" cidlink "github.com/ipld/go-ipld-prime/linking/cid" "github.com/ipld/go-ipld-prime/node/basicnode" + "github.com/ipld/go-ipld-prime/node/bindnode" "github.com/ipld/go-ipld-prime/storage/memstore" "github.com/stretchr/testify/require" @@ -107,7 +108,11 @@ func generateBlocks(lsys *linking.LinkSystem) (cidlink.Link, error) { }, }, } - compositeUpdateBlockLink, err := lsys.Store(ipld.LinkContext{}, GetLinkPrototype(), compositeUpdateBlock.GenerateNode()) + compositeUpdateBlockLink, err := lsys.Store( + ipld.LinkContext{}, + GetLinkPrototype(), + compositeUpdateBlock.GenerateNode(), + ) if err != nil { return cidlink.Link{}, err } @@ -124,8 +129,7 @@ func TestBlock(t *testing.T) { rootLink, err := generateBlocks(&lsys) require.NoError(t, err) - proto := SchemaPrototype.Representation() - nd, err := lsys.Load(ipld.LinkContext{}, rootLink, proto) + nd, err := lsys.Load(ipld.LinkContext{}, rootLink, SchemaPrototype) require.NoError(t, err) block, err := GetFromNode(nd) @@ -139,7 +143,7 @@ func TestBlock(t *testing.T) { require.Equal(t, block, newBlock) - newNode := block.GenerateNode() + newNode := bindnode.Wrap(block, Schema) require.Equal(t, nd, newNode) link, err := block.GenerateLink() @@ -165,8 +169,7 @@ func TestBlockDeltaPriority(t *testing.T) { rootLink, err := generateBlocks(&lsys) require.NoError(t, err) - proto := SchemaPrototype.Representation() - nd, err := lsys.Load(ipld.LinkContext{}, rootLink, proto) + nd, err := lsys.Load(ipld.LinkContext{}, rootLink, SchemaPrototype) require.NoError(t, err) block, err := GetFromNode(nd) @@ -176,3 +179,52 @@ func TestBlockDeltaPriority(t *testing.T) { // which results in a priority of 2. require.Equal(t, uint64(2), block.Delta.GetPriority()) } + +func TestBlockMarshal_IsEncryptedNotSet_ShouldNotContainIsEcryptedField(t *testing.T) { + lsys := cidlink.DefaultLinkSystem() + store := memstore.Store{} + lsys.SetReadStorage(&store) + lsys.SetWriteStorage(&store) + + fieldBlock := Block{ + Delta: crdt.CRDT{ + LWWRegDelta: &crdt.LWWRegDelta{ + DocID: []byte("docID"), + FieldName: "name", + Priority: 1, + SchemaVersionID: "schemaVersionID", + Data: []byte("John"), + }, + }, + } + + b, err := fieldBlock.Marshal() + require.NoError(t, err) + require.NotContains(t, string(b), "isEncrypted") +} + +func TestBlockMarshal_IsEncryptedNotSetWithLinkSystem_ShouldLoadWithNoError(t *testing.T) { + lsys := cidlink.DefaultLinkSystem() + store := memstore.Store{} + lsys.SetReadStorage(&store) + lsys.SetWriteStorage(&store) + + fieldBlock := Block{ + Delta: crdt.CRDT{ + LWWRegDelta: &crdt.LWWRegDelta{ + DocID: []byte("docID"), + FieldName: "name", + Priority: 1, + SchemaVersionID: "schemaVersionID", + Data: []byte("John"), + }, + }, + } + fieldBlockLink, err := lsys.Store(ipld.LinkContext{}, GetLinkPrototype(), fieldBlock.GenerateNode()) + require.NoError(t, err) + + nd, err := lsys.Load(ipld.LinkContext{}, fieldBlockLink, SchemaPrototype) + require.NoError(t, err) + _, err = GetFromNode(nd) + require.NoError(t, err) +} diff --git a/internal/db/merge_test.go b/internal/db/merge_test.go index 0c9a06ac42..f620003bbe 100644 --- a/internal/db/merge_test.go +++ b/internal/db/merge_test.go @@ -186,7 +186,7 @@ func TestMerge_DualBranchWithOneIncomplete_CouldNotFindCID(t *testing.T) { Cid: compInfo3.link.Cid, SchemaRoot: col.SchemaRoot(), }) - require.ErrorContains(t, err, "could not find bafyreichk7jctbxhrodk5au3r4c4iqm627d4fi2cii2beseu4h6caoiwla") + require.ErrorContains(t, err, "could not find bafyreifi4sa4auy4uk6psoljwuzqepgwqzsjk3h6p2xjdtsty7bdjz4uzm") // Verify the document was created with the expected values doc, err := col.Get(ctx, docID, false) diff --git a/net/sync_dag.go b/net/sync_dag.go index c15ebb7552..d856f36b2a 100644 --- a/net/sync_dag.go +++ b/net/sync_dag.go @@ -78,7 +78,7 @@ func syncDAG(ctx context.Context, bserv blockservice.BlockService, block *corebl // // any errors encountered during preload are ignored preloader := func(pctx preload.PreloadContext, l preload.Link) { - go lsys.Load(linking.LinkContext{Ctx: pctx.Ctx}, l.Link, basicnode.Prototype.Any) //nolint:errcheck + go lsys.Load(linking.LinkContext{Ctx: pctx.Ctx}, l.Link, coreblock.SchemaPrototype) //nolint:errcheck } config := traversal.Config{ Ctx: ctx, diff --git a/tests/integration/events/simple/with_update_test.go b/tests/integration/events/simple/with_update_test.go index 689734dc73..0b49486aa4 100644 --- a/tests/integration/events/simple/with_update_test.go +++ b/tests/integration/events/simple/with_update_test.go @@ -66,14 +66,14 @@ func TestEventsSimpleWithUpdate(t *testing.T) { ExpectedUpdates: []testUtils.ExpectedUpdate{ { DocID: immutable.Some(docID1), - Cid: immutable.Some("bafyreifvrmwmlwtglxe3afki36spu6d5qs6vvza57kxs4giyi53r5vbbnu"), + Cid: immutable.Some("bafyreih5kmftjua6lihlm7lwohamezecomnwgxv6jtowfnrrfdev43lquq"), }, { DocID: immutable.Some(docID2), }, { DocID: immutable.Some(docID1), - Cid: immutable.Some("bafyreihfijpchdbc6fb3klay3a2ktcwav7mse6udbxpauslwzsmn6qczna"), + Cid: immutable.Some("bafyreifzav4o7q4sljthu2vks3idyd67hg34llnyv44ii6pstal2woc65q"), }, }, } diff --git a/tests/integration/mutation/create/with_version_test.go b/tests/integration/mutation/create/with_version_test.go index ea44ba6038..4ade869073 100644 --- a/tests/integration/mutation/create/with_version_test.go +++ b/tests/integration/mutation/create/with_version_test.go @@ -39,7 +39,7 @@ func TestMutationCreate_ReturnsVersionCID(t *testing.T) { { "_version": []map[string]any{ { - "cid": "bafyreicceacb554vtciciumodqmz6vmnfvr6uod2hfhnwujmfqx5pgq3fi", + "cid": "bafyreia5ph2hvwebdsxe7m2f6bwuq7ngwxzqp7esiuzjihtcz2jswma6xu", }, }, }, diff --git a/tests/integration/query/commits/simple_test.go b/tests/integration/query/commits/simple_test.go index bcd7ccc1c0..dfb0fcc0ad 100644 --- a/tests/integration/query/commits/simple_test.go +++ b/tests/integration/query/commits/simple_test.go @@ -36,13 +36,13 @@ func TestQueryCommits(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", }, { - "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", + "cid": "bafyreic2sba5sffkfnt32wfeoaw4qsqozjb5acwwtouxuzllb3aymjwute", }, { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", }, }, }, @@ -79,22 +79,22 @@ func TestQueryCommitsMultipleDocs(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreiazgtllwk7znzuapv3fsukzhpekqqjjvgv4fzypkfp7mljfabie3q", + "cid": "bafyreihpasbgxcoxmzv5bp6euq3lbaoh5y5wjbbgfthtxqs3nppk36kebq", }, { - "cid": "bafyreicbr2jo7y4d6773q66kxvzq4k3jss2rw5ysr3co2mjdhcdyiz7buq", + "cid": "bafyreihe3jydldbt7mvkiae6asrchdxajzkxwid6syi436nmrpcqhwt7xa", }, { - "cid": "bafyreihmvuytwy5ofcm5bqyazxwnquksutxvybznavmw23vddb7nooh6pq", + "cid": "bafyreihb5eo3luqoojztdmxtg3tdpvm6pc64mkyrzlefbdauker5qlnop4", }, { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", }, { - "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", + "cid": "bafyreic2sba5sffkfnt32wfeoaw4qsqozjb5acwwtouxuzllb3aymjwute", }, { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", }, }, }, @@ -125,15 +125,15 @@ func TestQueryCommitsWithSchemaVersionIDField(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", "schemaVersionId": "bafkreicprhqxzlw3akyssz2v6pifwfueavp7jq2yj3dghapi3qcq6achs4", }, { - "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", + "cid": "bafyreic2sba5sffkfnt32wfeoaw4qsqozjb5acwwtouxuzllb3aymjwute", "schemaVersionId": "bafkreicprhqxzlw3akyssz2v6pifwfueavp7jq2yj3dghapi3qcq6achs4", }, { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", "schemaVersionId": "bafkreicprhqxzlw3akyssz2v6pifwfueavp7jq2yj3dghapi3qcq6achs4", }, }, @@ -349,7 +349,7 @@ func TestQuery_CommitsWithAllFieldsWithUpdate_NoError(t *testing.T) { `, Results: []map[string]any{ { - "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", + "cid": "bafyreiay56ley5dvsptso37fsonfcrtbuphwlfhi67d2y52vzzexba6vua", "collectionID": int64(1), "delta": testUtils.CBORValue(22), "docID": "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", @@ -358,13 +358,13 @@ func TestQuery_CommitsWithAllFieldsWithUpdate_NoError(t *testing.T) { "height": int64(2), "links": []map[string]any{ { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", "name": "_head", }, }, }, { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", "collectionID": int64(1), "delta": testUtils.CBORValue(21), "docID": "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", @@ -374,7 +374,7 @@ func TestQuery_CommitsWithAllFieldsWithUpdate_NoError(t *testing.T) { "links": []map[string]any{}, }, { - "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", + "cid": "bafyreic2sba5sffkfnt32wfeoaw4qsqozjb5acwwtouxuzllb3aymjwute", "collectionID": int64(1), "delta": testUtils.CBORValue("John"), "docID": "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", @@ -384,7 +384,7 @@ func TestQuery_CommitsWithAllFieldsWithUpdate_NoError(t *testing.T) { "links": []map[string]any{}, }, { - "cid": "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba", + "cid": "bafyreicsavx5oblk6asfoqyssz4ge2gf5ekfouvi7o6l7adly275op5oje", "collectionID": int64(1), "delta": nil, "docID": "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", @@ -393,17 +393,17 @@ func TestQuery_CommitsWithAllFieldsWithUpdate_NoError(t *testing.T) { "height": int64(2), "links": []map[string]any{ { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", "name": "_head", }, { - "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", + "cid": "bafyreiay56ley5dvsptso37fsonfcrtbuphwlfhi67d2y52vzzexba6vua", "name": "age", }, }, }, { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", "collectionID": int64(1), "delta": nil, "docID": "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", @@ -412,11 +412,11 @@ func TestQuery_CommitsWithAllFieldsWithUpdate_NoError(t *testing.T) { "height": int64(1), "links": []map[string]any{ { - "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", + "cid": "bafyreic2sba5sffkfnt32wfeoaw4qsqozjb5acwwtouxuzllb3aymjwute", "name": "name", }, { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", "name": "age", }, }, diff --git a/tests/integration/query/commits/with_cid_test.go b/tests/integration/query/commits/with_cid_test.go index 6a85691e74..443577ac28 100644 --- a/tests/integration/query/commits/with_cid_test.go +++ b/tests/integration/query/commits/with_cid_test.go @@ -38,14 +38,14 @@ func TestQueryCommitsWithCid(t *testing.T) { testUtils.Request{ Request: `query { commits( - cid: "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq" + cid: "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q" ) { cid } }`, Results: []map[string]any{ { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", }, }, }, @@ -71,14 +71,14 @@ func TestQueryCommitsWithCidForFieldCommit(t *testing.T) { testUtils.Request{ Request: `query { commits( - cid: "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq" + cid: "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q" ) { cid } }`, Results: []map[string]any{ { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", }, }, }, diff --git a/tests/integration/query/commits/with_depth_test.go b/tests/integration/query/commits/with_depth_test.go index 7660a04585..a55828115f 100644 --- a/tests/integration/query/commits/with_depth_test.go +++ b/tests/integration/query/commits/with_depth_test.go @@ -36,13 +36,13 @@ func TestQueryCommitsWithDepth1(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", }, { - "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", + "cid": "bafyreic2sba5sffkfnt32wfeoaw4qsqozjb5acwwtouxuzllb3aymjwute", }, { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", }, }, }, @@ -81,16 +81,16 @@ func TestQueryCommitsWithDepth1WithUpdate(t *testing.T) { Results: []map[string]any{ { // "Age" field head - "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", + "cid": "bafyreiay56ley5dvsptso37fsonfcrtbuphwlfhi67d2y52vzzexba6vua", "height": int64(2), }, { // "Name" field head (unchanged from create) - "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", + "cid": "bafyreic2sba5sffkfnt32wfeoaw4qsqozjb5acwwtouxuzllb3aymjwute", "height": int64(1), }, { - "cid": "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba", + "cid": "bafyreicsavx5oblk6asfoqyssz4ge2gf5ekfouvi7o6l7adly275op5oje", "height": int64(2), }, }, @@ -137,27 +137,27 @@ func TestQueryCommitsWithDepth2WithUpdate(t *testing.T) { Results: []map[string]any{ { // Composite head - "cid": "bafyreicoci4ah2uft5giiyl2lfg4jgcegwbvt3mbllnqnmfh3oy24usxsy", + "cid": "bafyreibpiyrugj4gku336wp5lvcw3fgyxqpjvugm3t4z7v5h3ulwxs3x2y", "height": int64(3), }, { // Composite head -1 - "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", + "cid": "bafyreiay56ley5dvsptso37fsonfcrtbuphwlfhi67d2y52vzzexba6vua", "height": int64(2), }, { // "Name" field head (unchanged from create) - "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", + "cid": "bafyreic2sba5sffkfnt32wfeoaw4qsqozjb5acwwtouxuzllb3aymjwute", "height": int64(1), }, { // "Age" field head - "cid": "bafyreichbcwfiwampbb2w2stlrk5yryu5ao4ubz2utybv5fc6qioj3qhu4", + "cid": "bafyreieydjk3sqrxs5aqhsiy7ct25vu5qtbtpmzbytzee4apeidx6dq7je", "height": int64(3), }, { // "Age" field head -1 - "cid": "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba", + "cid": "bafyreicsavx5oblk6asfoqyssz4ge2gf5ekfouvi7o6l7adly275op5oje", "height": int64(2), }, }, @@ -195,22 +195,22 @@ func TestQueryCommitsWithDepth1AndMultipleDocs(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreibqku2yhbclue774qtkh2gmn3oeaw7gan7ervjyqwqsxfr2c7ymwu", + "cid": "bafyreibzfxudkhrcsz7lsgtb637gzyegsdkehlugvb2dg76smhhnkg46dm", }, { - "cid": "bafyreicrku377qakqjzzlh4phornkj36otic6oc27ano6mril5hu5fuulu", + "cid": "bafyreiabiarng2rcvkfgoirnnyy3yvd7yi3c66akovkbmhivrxvdawtcna", }, { - "cid": "bafyreibqvl2lwj3icgbg7rconymk5pmnpzr6htvd37qw4fmawe7bxvpuse", + "cid": "bafyreibubqh6ltxbxmtrtd5oczaekcfw5knqfyocnwkdwhpjatl7johoue", }, { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", }, { - "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", + "cid": "bafyreic2sba5sffkfnt32wfeoaw4qsqozjb5acwwtouxuzllb3aymjwute", }, { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", }, }, }, diff --git a/tests/integration/query/commits/with_doc_id_cid_test.go b/tests/integration/query/commits/with_doc_id_cid_test.go index 493f07b477..7cbdc10307 100644 --- a/tests/integration/query/commits/with_doc_id_cid_test.go +++ b/tests/integration/query/commits/with_doc_id_cid_test.go @@ -104,14 +104,14 @@ func TestQueryCommitsWithDocIDAndCidWithUpdate(t *testing.T) { Request: ` { commits( docID: "bae-c9fb0fa4-1195-589c-aa54-e68333fb90b3", - cid: "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba" + cid: "bafyreicsavx5oblk6asfoqyssz4ge2gf5ekfouvi7o6l7adly275op5oje" ) { cid } }`, Results: []map[string]any{ { - "cid": "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba", + "cid": "bafyreicsavx5oblk6asfoqyssz4ge2gf5ekfouvi7o6l7adly275op5oje", }, }, }, diff --git a/tests/integration/query/commits/with_doc_id_count_test.go b/tests/integration/query/commits/with_doc_id_count_test.go index 11034ac09f..d3a7ca0fbb 100644 --- a/tests/integration/query/commits/with_doc_id_count_test.go +++ b/tests/integration/query/commits/with_doc_id_count_test.go @@ -37,15 +37,15 @@ func TestQueryCommitsWithDocIDAndLinkCount(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", "_count": 0, }, { - "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", + "cid": "bafyreic2sba5sffkfnt32wfeoaw4qsqozjb5acwwtouxuzllb3aymjwute", "_count": 0, }, { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", "_count": 2, }, }, diff --git a/tests/integration/query/commits/with_doc_id_field_test.go b/tests/integration/query/commits/with_doc_id_field_test.go index 702f4cc93b..84821938a0 100644 --- a/tests/integration/query/commits/with_doc_id_field_test.go +++ b/tests/integration/query/commits/with_doc_id_field_test.go @@ -118,7 +118,7 @@ func TestQueryCommitsWithDocIDAndFieldId(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", }, }, }, @@ -150,7 +150,7 @@ func TestQueryCommitsWithDocIDAndCompositeFieldId(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", }, }, }, diff --git a/tests/integration/query/commits/with_doc_id_limit_offset_test.go b/tests/integration/query/commits/with_doc_id_limit_offset_test.go index 42779293c0..68cca30bdc 100644 --- a/tests/integration/query/commits/with_doc_id_limit_offset_test.go +++ b/tests/integration/query/commits/with_doc_id_limit_offset_test.go @@ -57,10 +57,10 @@ func TestQueryCommitsWithDocIDAndLimitAndOffset(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreicoci4ah2uft5giiyl2lfg4jgcegwbvt3mbllnqnmfh3oy24usxsy", + "cid": "bafyreibpiyrugj4gku336wp5lvcw3fgyxqpjvugm3t4z7v5h3ulwxs3x2y", }, { - "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", + "cid": "bafyreiay56ley5dvsptso37fsonfcrtbuphwlfhi67d2y52vzzexba6vua", }, }, }, diff --git a/tests/integration/query/commits/with_doc_id_limit_test.go b/tests/integration/query/commits/with_doc_id_limit_test.go index 6be85f5339..090417fc64 100644 --- a/tests/integration/query/commits/with_doc_id_limit_test.go +++ b/tests/integration/query/commits/with_doc_id_limit_test.go @@ -50,10 +50,10 @@ func TestQueryCommitsWithDocIDAndLimit(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreicoci4ah2uft5giiyl2lfg4jgcegwbvt3mbllnqnmfh3oy24usxsy", + "cid": "bafyreibpiyrugj4gku336wp5lvcw3fgyxqpjvugm3t4z7v5h3ulwxs3x2y", }, { - "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", + "cid": "bafyreiay56ley5dvsptso37fsonfcrtbuphwlfhi67d2y52vzzexba6vua", }, }, }, diff --git a/tests/integration/query/commits/with_doc_id_order_limit_offset_test.go b/tests/integration/query/commits/with_doc_id_order_limit_offset_test.go index c1685e7ad4..4260940370 100644 --- a/tests/integration/query/commits/with_doc_id_order_limit_offset_test.go +++ b/tests/integration/query/commits/with_doc_id_order_limit_offset_test.go @@ -58,11 +58,11 @@ func TestQueryCommitsWithDocIDAndOrderAndLimitAndOffset(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba", + "cid": "bafyreicsavx5oblk6asfoqyssz4ge2gf5ekfouvi7o6l7adly275op5oje", "height": int64(2), }, { - "cid": "bafyreicoci4ah2uft5giiyl2lfg4jgcegwbvt3mbllnqnmfh3oy24usxsy", + "cid": "bafyreibpiyrugj4gku336wp5lvcw3fgyxqpjvugm3t4z7v5h3ulwxs3x2y", "height": int64(3), }, }, diff --git a/tests/integration/query/commits/with_doc_id_order_test.go b/tests/integration/query/commits/with_doc_id_order_test.go index f7fb045a1f..922c87dc7b 100644 --- a/tests/integration/query/commits/with_doc_id_order_test.go +++ b/tests/integration/query/commits/with_doc_id_order_test.go @@ -44,23 +44,23 @@ func TestQueryCommitsWithDocIDAndOrderHeightDesc(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", + "cid": "bafyreiay56ley5dvsptso37fsonfcrtbuphwlfhi67d2y52vzzexba6vua", "height": int64(2), }, { - "cid": "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba", + "cid": "bafyreicsavx5oblk6asfoqyssz4ge2gf5ekfouvi7o6l7adly275op5oje", "height": int64(2), }, { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", "height": int64(1), }, { - "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", + "cid": "bafyreic2sba5sffkfnt32wfeoaw4qsqozjb5acwwtouxuzllb3aymjwute", "height": int64(1), }, { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", "height": int64(1), }, }, @@ -99,23 +99,23 @@ func TestQueryCommitsWithDocIDAndOrderHeightAsc(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", "height": int64(1), }, { - "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", + "cid": "bafyreic2sba5sffkfnt32wfeoaw4qsqozjb5acwwtouxuzllb3aymjwute", "height": int64(1), }, { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", "height": int64(1), }, { - "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", + "cid": "bafyreiay56ley5dvsptso37fsonfcrtbuphwlfhi67d2y52vzzexba6vua", "height": int64(2), }, { - "cid": "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba", + "cid": "bafyreicsavx5oblk6asfoqyssz4ge2gf5ekfouvi7o6l7adly275op5oje", "height": int64(2), }, }, @@ -154,24 +154,24 @@ func TestQueryCommitsWithDocIDAndOrderCidDesc(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", - "height": int64(2), + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", + "height": int64(1), }, { - "cid": "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba", - "height": int64(2), + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", + "height": int64(1), }, { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", - "height": int64(1), + "cid": "bafyreicsavx5oblk6asfoqyssz4ge2gf5ekfouvi7o6l7adly275op5oje", + "height": int64(2), }, { - "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", + "cid": "bafyreic2sba5sffkfnt32wfeoaw4qsqozjb5acwwtouxuzllb3aymjwute", "height": int64(1), }, { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", - "height": int64(1), + "cid": "bafyreiay56ley5dvsptso37fsonfcrtbuphwlfhi67d2y52vzzexba6vua", + "height": int64(2), }, }, }, @@ -209,24 +209,24 @@ func TestQueryCommitsWithDocIDAndOrderCidAsc(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", - "height": int64(1), + "cid": "bafyreiay56ley5dvsptso37fsonfcrtbuphwlfhi67d2y52vzzexba6vua", + "height": int64(2), }, { - "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", + "cid": "bafyreic2sba5sffkfnt32wfeoaw4qsqozjb5acwwtouxuzllb3aymjwute", "height": int64(1), }, { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", - "height": int64(1), + "cid": "bafyreicsavx5oblk6asfoqyssz4ge2gf5ekfouvi7o6l7adly275op5oje", + "height": int64(2), }, { - "cid": "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba", - "height": int64(2), + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", + "height": int64(1), }, { - "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", - "height": int64(2), + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", + "height": int64(1), }, }, }, @@ -278,39 +278,39 @@ func TestQueryCommitsWithDocIDAndOrderAndMultiUpdatesCidAsc(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", "height": int64(1), }, { - "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", + "cid": "bafyreic2sba5sffkfnt32wfeoaw4qsqozjb5acwwtouxuzllb3aymjwute", "height": int64(1), }, { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", "height": int64(1), }, { - "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", + "cid": "bafyreiay56ley5dvsptso37fsonfcrtbuphwlfhi67d2y52vzzexba6vua", "height": int64(2), }, { - "cid": "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba", + "cid": "bafyreicsavx5oblk6asfoqyssz4ge2gf5ekfouvi7o6l7adly275op5oje", "height": int64(2), }, { - "cid": "bafyreicoci4ah2uft5giiyl2lfg4jgcegwbvt3mbllnqnmfh3oy24usxsy", + "cid": "bafyreibpiyrugj4gku336wp5lvcw3fgyxqpjvugm3t4z7v5h3ulwxs3x2y", "height": int64(3), }, { - "cid": "bafyreichbcwfiwampbb2w2stlrk5yryu5ao4ubz2utybv5fc6qioj3qhu4", + "cid": "bafyreieydjk3sqrxs5aqhsiy7ct25vu5qtbtpmzbytzee4apeidx6dq7je", "height": int64(3), }, { - "cid": "bafyreiefwtmw7gtwjmvhapfpq2gmi6j772a6zx5uyyrys6ft4za4oljwfm", + "cid": "bafyreic6rjkn7qsoxpboviode2l64ahg4yajsrb3p25zeooisnaxcweccu", "height": int64(4), }, { - "cid": "bafyreidde6teqyfjruflxo3yy25rayu7yrxg54siqvloxzqt7o32g2wicy", + "cid": "bafyreieifkfzufdvlvni4o5pbdtuvm3w6x4fnqyelyq2owvsliiwjvddpi", "height": int64(4), }, }, diff --git a/tests/integration/query/commits/with_doc_id_test.go b/tests/integration/query/commits/with_doc_id_test.go index e082e6e790..c716cd296e 100644 --- a/tests/integration/query/commits/with_doc_id_test.go +++ b/tests/integration/query/commits/with_doc_id_test.go @@ -62,13 +62,13 @@ func TestQueryCommitsWithDocID(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", }, { - "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", + "cid": "bafyreic2sba5sffkfnt32wfeoaw4qsqozjb5acwwtouxuzllb3aymjwute", }, { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", }, }, }, @@ -102,22 +102,22 @@ func TestQueryCommitsWithDocIDAndLinks(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", "links": []map[string]any{}, }, { - "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", + "cid": "bafyreic2sba5sffkfnt32wfeoaw4qsqozjb5acwwtouxuzllb3aymjwute", "links": []map[string]any{}, }, { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", "links": []map[string]any{ { - "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", + "cid": "bafyreic2sba5sffkfnt32wfeoaw4qsqozjb5acwwtouxuzllb3aymjwute", "name": "name", }, { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", "name": "age", }, }, @@ -158,23 +158,23 @@ func TestQueryCommitsWithDocIDAndUpdate(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", + "cid": "bafyreiay56ley5dvsptso37fsonfcrtbuphwlfhi67d2y52vzzexba6vua", "height": int64(2), }, { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", "height": int64(1), }, { - "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", + "cid": "bafyreic2sba5sffkfnt32wfeoaw4qsqozjb5acwwtouxuzllb3aymjwute", "height": int64(1), }, { - "cid": "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba", + "cid": "bafyreicsavx5oblk6asfoqyssz4ge2gf5ekfouvi7o6l7adly275op5oje", "height": int64(2), }, { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", "height": int64(1), }, }, @@ -219,44 +219,44 @@ func TestQueryCommitsWithDocIDAndUpdateAndLinks(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", + "cid": "bafyreiay56ley5dvsptso37fsonfcrtbuphwlfhi67d2y52vzzexba6vua", "links": []map[string]any{ { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", "name": "_head", }, }, }, { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", "links": []map[string]any{}, }, { - "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", + "cid": "bafyreic2sba5sffkfnt32wfeoaw4qsqozjb5acwwtouxuzllb3aymjwute", "links": []map[string]any{}, }, { - "cid": "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba", + "cid": "bafyreicsavx5oblk6asfoqyssz4ge2gf5ekfouvi7o6l7adly275op5oje", "links": []map[string]any{ { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", "name": "_head", }, { - "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", + "cid": "bafyreiay56ley5dvsptso37fsonfcrtbuphwlfhi67d2y52vzzexba6vua", "name": "age", }, }, }, { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", "links": []map[string]any{ { - "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", + "cid": "bafyreic2sba5sffkfnt32wfeoaw4qsqozjb5acwwtouxuzllb3aymjwute", "name": "name", }, { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", "name": "age", }, }, diff --git a/tests/integration/query/commits/with_doc_id_typename_test.go b/tests/integration/query/commits/with_doc_id_typename_test.go index 77a58d2fdc..a26580d113 100644 --- a/tests/integration/query/commits/with_doc_id_typename_test.go +++ b/tests/integration/query/commits/with_doc_id_typename_test.go @@ -37,15 +37,15 @@ func TestQueryCommitsWithDocIDWithTypeName(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", "__typename": "Commit", }, { - "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", + "cid": "bafyreic2sba5sffkfnt32wfeoaw4qsqozjb5acwwtouxuzllb3aymjwute", "__typename": "Commit", }, { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", "__typename": "Commit", }, }, diff --git a/tests/integration/query/commits/with_field_test.go b/tests/integration/query/commits/with_field_test.go index 15cc2a4cd9..48c5225ca5 100644 --- a/tests/integration/query/commits/with_field_test.go +++ b/tests/integration/query/commits/with_field_test.go @@ -66,7 +66,7 @@ func TestQueryCommitsWithFieldId(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", }, }, }, @@ -98,7 +98,7 @@ func TestQueryCommitsWithCompositeFieldId(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", }, }, }, @@ -131,7 +131,7 @@ func TestQueryCommitsWithCompositeFieldIdWithReturnedSchemaVersionID(t *testing. }`, Results: []map[string]any{ { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", "schemaVersionId": "bafkreicprhqxzlw3akyssz2v6pifwfueavp7jq2yj3dghapi3qcq6achs4", }, }, diff --git a/tests/integration/query/commits/with_group_test.go b/tests/integration/query/commits/with_group_test.go index fb958ad077..5405ea49c8 100644 --- a/tests/integration/query/commits/with_group_test.go +++ b/tests/integration/query/commits/with_group_test.go @@ -89,10 +89,10 @@ func TestQueryCommitsWithGroupByHeightWithChild(t *testing.T) { "height": int64(2), "_group": []map[string]any{ { - "cid": "bafyreigurfgpfvcm4uzqxjf4ur3xegxbebn6yoogjrvyaw6x7d2ji6igim", + "cid": "bafyreiay56ley5dvsptso37fsonfcrtbuphwlfhi67d2y52vzzexba6vua", }, { - "cid": "bafyreif632ewkphjjwxcthemgbkgtm25faw22mvw7eienu5gnazrao33ba", + "cid": "bafyreicsavx5oblk6asfoqyssz4ge2gf5ekfouvi7o6l7adly275op5oje", }, }, }, @@ -100,13 +100,13 @@ func TestQueryCommitsWithGroupByHeightWithChild(t *testing.T) { "height": int64(1), "_group": []map[string]any{ { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", }, { - "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", + "cid": "bafyreic2sba5sffkfnt32wfeoaw4qsqozjb5acwwtouxuzllb3aymjwute", }, { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", }, }, }, @@ -142,7 +142,7 @@ func TestQueryCommitsWithGroupByCidWithChild(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", "_group": []map[string]any{ { "height": int64(1), @@ -150,7 +150,7 @@ func TestQueryCommitsWithGroupByCidWithChild(t *testing.T) { }, }, { - "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", + "cid": "bafyreic2sba5sffkfnt32wfeoaw4qsqozjb5acwwtouxuzllb3aymjwute", "_group": []map[string]any{ { "height": int64(1), @@ -158,7 +158,7 @@ func TestQueryCommitsWithGroupByCidWithChild(t *testing.T) { }, }, { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", "_group": []map[string]any{ { "height": int64(1), diff --git a/tests/integration/query/latest_commits/with_doc_id_field_test.go b/tests/integration/query/latest_commits/with_doc_id_field_test.go index 300aabbc46..04f0065b0a 100644 --- a/tests/integration/query/latest_commits/with_doc_id_field_test.go +++ b/tests/integration/query/latest_commits/with_doc_id_field_test.go @@ -68,7 +68,7 @@ func TestQueryLatestCommitsWithDocIDAndFieldId(t *testing.T) { }, Results: []map[string]any{ { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", "links": []map[string]any{}, }, }, @@ -101,14 +101,14 @@ func TestQueryLatestCommitsWithDocIDAndCompositeFieldId(t *testing.T) { }, Results: []map[string]any{ { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", "links": []map[string]any{ { - "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", + "cid": "bafyreic2sba5sffkfnt32wfeoaw4qsqozjb5acwwtouxuzllb3aymjwute", "name": "name", }, { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", "name": "age", }, }, diff --git a/tests/integration/query/latest_commits/with_doc_id_test.go b/tests/integration/query/latest_commits/with_doc_id_test.go index 1eea07f6de..d39becb3d3 100644 --- a/tests/integration/query/latest_commits/with_doc_id_test.go +++ b/tests/integration/query/latest_commits/with_doc_id_test.go @@ -38,14 +38,14 @@ func TestQueryLatestCommitsWithDocID(t *testing.T) { }, Results: []map[string]any{ { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", "links": []map[string]any{ { - "cid": "bafyreidksmyoo6txzmcygby6quhdkzymlqoaxpg75ehlxjdneotjzbih6y", + "cid": "bafyreic2sba5sffkfnt32wfeoaw4qsqozjb5acwwtouxuzllb3aymjwute", "name": "name", }, { - "cid": "bafyreietqxguz3xlady4gfaqnbeamwsnrwfkufykkpprxej7a77ba7siay", + "cid": "bafyreifzyy7bmpx2eywj4lznxzrzrvh6vrz6l7bhthkpexdq3wtho3vz6i", "name": "age", }, }, @@ -75,7 +75,7 @@ func TestQueryLatestCommitsWithDocIDWithSchemaVersionIDField(t *testing.T) { }, Results: []map[string]any{ { - "cid": "bafyreichxrfyhajs7rzp3wh5f2zrmt3zkjqan5dmxoy4qno5ozy7omzfpq", + "cid": "bafyreihv7jqe32wsuff5vwzlp7izoo6pqg6kgqf5edknp3mqm3344gu35q", "schemaVersionId": "bafkreicprhqxzlw3akyssz2v6pifwfueavp7jq2yj3dghapi3qcq6achs4", }, }, diff --git a/tests/integration/query/one_to_many/with_cid_doc_id_test.go b/tests/integration/query/one_to_many/with_cid_doc_id_test.go index d335159697..3f415ab288 100644 --- a/tests/integration/query/one_to_many/with_cid_doc_id_test.go +++ b/tests/integration/query/one_to_many/with_cid_doc_id_test.go @@ -104,7 +104,7 @@ func TestQueryOneToManyWithCidAndDocID(t *testing.T) { testUtils.Request{ Request: `query { Book ( - cid: "bafyreiauqb3yovbcofeysjckmex5xdzd6ilvspvypk7cqooguimi6kac5e" + cid: "bafyreicjhmyweoyzopsqf7qc4uqqpq7mwnqlpsfb2rzk3j2jg3a4d6fqy4" docID: "bae-5366ba09-54e8-5381-8169-a770aa9282ae" ) { name @@ -179,7 +179,7 @@ func TestQueryOneToManyWithChildUpdateAndFirstCidAndDocID(t *testing.T) { testUtils.Request{ Request: `query { Book ( - cid: "bafyreiauqb3yovbcofeysjckmex5xdzd6ilvspvypk7cqooguimi6kac5e", + cid: "bafyreicjhmyweoyzopsqf7qc4uqqpq7mwnqlpsfb2rzk3j2jg3a4d6fqy4", docID: "bae-5366ba09-54e8-5381-8169-a770aa9282ae" ) { name @@ -252,7 +252,7 @@ func TestQueryOneToManyWithParentUpdateAndFirstCidAndDocID(t *testing.T) { testUtils.Request{ Request: `query { Book ( - cid: "bafyreiauqb3yovbcofeysjckmex5xdzd6ilvspvypk7cqooguimi6kac5e", + cid: "bafyreicjhmyweoyzopsqf7qc4uqqpq7mwnqlpsfb2rzk3j2jg3a4d6fqy4", docID: "bae-5366ba09-54e8-5381-8169-a770aa9282ae" ) { name @@ -325,7 +325,7 @@ func TestQueryOneToManyWithParentUpdateAndLastCidAndDocID(t *testing.T) { testUtils.Request{ Request: `query { Book ( - cid: "bafyreifc646gthndgspdnkftmeaiowwangpfbtm7bpngosfsd72oul5a3u", + cid: "bafyreigyxgn2tss7objjzen5s77w6hijpe6wmmz4z3ercpxdcrq7uwnhl4", docID: "bae-5366ba09-54e8-5381-8169-a770aa9282ae" ) { name diff --git a/tests/integration/query/simple/with_cid_doc_id_test.go b/tests/integration/query/simple/with_cid_doc_id_test.go index dcf0d1a1d1..34dbbd5512 100644 --- a/tests/integration/query/simple/with_cid_doc_id_test.go +++ b/tests/integration/query/simple/with_cid_doc_id_test.go @@ -93,7 +93,7 @@ func TestQuerySimpleWithCidAndDocID(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafyreicceacb554vtciciumodqmz6vmnfvr6uod2hfhnwujmfqx5pgq3fi", + cid: "bafyreia5ph2hvwebdsxe7m2f6bwuq7ngwxzqp7esiuzjihtcz2jswma6xu", docID: "bae-6845cfdf-cb0f-56a3-be3a-b5a67be5fbdc" ) { name @@ -135,7 +135,7 @@ func TestQuerySimpleWithUpdateAndFirstCidAndDocID(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafyreicceacb554vtciciumodqmz6vmnfvr6uod2hfhnwujmfqx5pgq3fi", + cid: "bafyreia5ph2hvwebdsxe7m2f6bwuq7ngwxzqp7esiuzjihtcz2jswma6xu", docID: "bae-6845cfdf-cb0f-56a3-be3a-b5a67be5fbdc" ) { name @@ -177,7 +177,7 @@ func TestQuerySimpleWithUpdateAndLastCidAndDocID(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafyreic3z3mjat7i7nm52jwprew7f7dimyob7uzgcuoypmdqekrhknnwba", + cid: "bafyreid2idg2eod3zv7zqxnv7pvrbcmlxiizho3s6xnk6c5zmxemi5gcxu", docID: "bae-6845cfdf-cb0f-56a3-be3a-b5a67be5fbdc" ) { name @@ -224,7 +224,7 @@ func TestQuerySimpleWithUpdateAndMiddleCidAndDocID(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafyreic3z3mjat7i7nm52jwprew7f7dimyob7uzgcuoypmdqekrhknnwba", + cid: "bafyreid2idg2eod3zv7zqxnv7pvrbcmlxiizho3s6xnk6c5zmxemi5gcxu", docID: "bae-6845cfdf-cb0f-56a3-be3a-b5a67be5fbdc" ) { name @@ -266,7 +266,7 @@ func TestQuerySimpleWithUpdateAndFirstCidAndDocIDAndSchemaVersion(t *testing.T) testUtils.Request{ Request: `query { Users ( - cid: "bafyreicceacb554vtciciumodqmz6vmnfvr6uod2hfhnwujmfqx5pgq3fi", + cid: "bafyreia5ph2hvwebdsxe7m2f6bwuq7ngwxzqp7esiuzjihtcz2jswma6xu", docID: "bae-6845cfdf-cb0f-56a3-be3a-b5a67be5fbdc" ) { name @@ -324,7 +324,7 @@ func TestCidAndDocIDQuery_ContainsPNCounterWithIntKind_NoError(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafyreienkinjn7cvsonvhs4tslqvmmcnezuu4aif57jn75cyp6i3vdvkpm", + cid: "bafyreib4e2q5uvv6iabkja4s4yoep6b4ifs7rclkteqhwrwr7lkzn56po4", docID: "bae-d8cb53d4-ac5a-5c55-8306-64df633d400d" ) { name @@ -376,7 +376,7 @@ func TestCidAndDocIDQuery_ContainsPNCounterWithFloatKind_NoError(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafyreiceodj32fyhq3v7ryk6mmcjanwx3zr7ajl2k47w4setngmyx7nc3e", + cid: "bafyreia4qbtcckxhyaplunzj5waoli5btfpwyhsx5fpk7o62xawuqjxjti", docID: "bae-d420ebcd-023a-5800-ae2e-8ea89442318e" ) { name @@ -423,7 +423,7 @@ func TestCidAndDocIDQuery_ContainsPCounterWithIntKind_NoError(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafyreieypgt2mq43g4ute2hkzombdqw5v6wctleyxyy6vdkzitrfje636i", + cid: "bafyreibkt7zaipwafro2wajbsqbipma27g6hc2uedquhtjfwyulmwzz4ry", docID: "bae-d8cb53d4-ac5a-5c55-8306-64df633d400d" ) { name @@ -470,7 +470,7 @@ func TestCidAndDocIDQuery_ContainsPCounterWithFloatKind_NoError(t *testing.T) { testUtils.Request{ Request: `query { Users ( - cid: "bafyreigb3ujvnxie7kwl53w4chiq6cjcyuhranchseo5gmx5i6vfje67da", + cid: "bafyreihbfsbje6lqmot6xpv2hcseduxu5rv5hf3adsmk7hlkzfuaa6lj5e", docID: "bae-d420ebcd-023a-5800-ae2e-8ea89442318e" ) { name diff --git a/tests/integration/query/simple/with_version_test.go b/tests/integration/query/simple/with_version_test.go index 67edabea9d..732c1b6f1a 100644 --- a/tests/integration/query/simple/with_version_test.go +++ b/tests/integration/query/simple/with_version_test.go @@ -46,14 +46,14 @@ func TestQuerySimpleWithEmbeddedLatestCommit(t *testing.T) { "Age": int64(21), "_version": []map[string]any{ { - "cid": "bafyreiamhlxewin3mgbr6dh3mrbwzvjfngfbwif2qdpjvaldzciivojaiu", + "cid": "bafyreiby7drdzfsg4wwo7f6vkdqhurbe74s4lhayn3k3226zvkgwjd2fbu", "links": []map[string]any{ { - "cid": "bafyreibmearhvd62tofeoxhhodzwkz446ehjybll22fyb4tgmnvx2kwfma", + "cid": "bafyreid4sasigytiflrh3rupyevo6wy43b6mlfi4jwkjjwvohgjcd3oscu", "name": "Age", }, { - "cid": "bafyreid45hs4k3kxxl5t6glfn5ohd6pebjbdhyvtrjmino6g3l2dqdzwiq", + "cid": "bafyreieg3p2kpyxwiowskvb3pp35nedzawmapjuib7glrvszcgmv6z37fm", "name": "Name", }, }, @@ -171,14 +171,14 @@ func TestQuerySimpleWithMultipleAliasedEmbeddedLatestCommit(t *testing.T) { "Age": int64(21), "_version": []map[string]any{ { - "cid": "bafyreiamhlxewin3mgbr6dh3mrbwzvjfngfbwif2qdpjvaldzciivojaiu", + "cid": "bafyreiby7drdzfsg4wwo7f6vkdqhurbe74s4lhayn3k3226zvkgwjd2fbu", "L1": []map[string]any{ { - "cid": "bafyreibmearhvd62tofeoxhhodzwkz446ehjybll22fyb4tgmnvx2kwfma", + "cid": "bafyreid4sasigytiflrh3rupyevo6wy43b6mlfi4jwkjjwvohgjcd3oscu", "name": "Age", }, { - "cid": "bafyreid45hs4k3kxxl5t6glfn5ohd6pebjbdhyvtrjmino6g3l2dqdzwiq", + "cid": "bafyreieg3p2kpyxwiowskvb3pp35nedzawmapjuib7glrvszcgmv6z37fm", "name": "Name", }, }, @@ -242,7 +242,7 @@ func TestQuery_WithAllCommitFields_NoError(t *testing.T) { "_docID": docID, "_version": []map[string]any{ { - "cid": "bafyreiamhlxewin3mgbr6dh3mrbwzvjfngfbwif2qdpjvaldzciivojaiu", + "cid": "bafyreiby7drdzfsg4wwo7f6vkdqhurbe74s4lhayn3k3226zvkgwjd2fbu", "collectionID": int64(1), "delta": nil, "docID": "bae-d4303725-7db9-53d2-b324-f3ee44020e52", @@ -251,11 +251,11 @@ func TestQuery_WithAllCommitFields_NoError(t *testing.T) { "height": int64(1), "links": []map[string]any{ { - "cid": "bafyreibmearhvd62tofeoxhhodzwkz446ehjybll22fyb4tgmnvx2kwfma", + "cid": "bafyreid4sasigytiflrh3rupyevo6wy43b6mlfi4jwkjjwvohgjcd3oscu", "name": "Age", }, { - "cid": "bafyreid45hs4k3kxxl5t6glfn5ohd6pebjbdhyvtrjmino6g3l2dqdzwiq", + "cid": "bafyreieg3p2kpyxwiowskvb3pp35nedzawmapjuib7glrvszcgmv6z37fm", "name": "Name", }, }, @@ -321,7 +321,7 @@ func TestQuery_WithAllCommitFieldsWithUpdate_NoError(t *testing.T) { "_docID": docID, "_version": []map[string]any{ { - "cid": "bafyreiewiyarxxkzmgss6g35i4h2uiyzoe6kbnmtwaxv4cab6xefnjlzka", + "cid": "bafyreigfstknvmsl77pg443lqqf2g64y7hr575tts5c4nxuzk3dynffkem", "collectionID": int64(1), "delta": nil, "docID": docID, @@ -330,18 +330,18 @@ func TestQuery_WithAllCommitFieldsWithUpdate_NoError(t *testing.T) { "height": int64(2), "links": []map[string]any{ { - "cid": "bafyreiamhlxewin3mgbr6dh3mrbwzvjfngfbwif2qdpjvaldzciivojaiu", + "cid": "bafyreiby7drdzfsg4wwo7f6vkdqhurbe74s4lhayn3k3226zvkgwjd2fbu", "name": "_head", }, { - "cid": "bafyreiejccdc662jvyhia2ee57dvuuzsrsrhbx3hoenojtasofxtix3k7y", + "cid": "bafyreiapjg22e47sanhjtqgu453mvmxcfcl4ksrcoctyfl6nfsh3xwfcvm", "name": "Age", }, }, "schemaVersionId": "bafkreigqmcqzkbg3elpe24vfza4rjle2r6cxu7ihzvg56aov57crhaebry", }, { - "cid": "bafyreiamhlxewin3mgbr6dh3mrbwzvjfngfbwif2qdpjvaldzciivojaiu", + "cid": "bafyreiby7drdzfsg4wwo7f6vkdqhurbe74s4lhayn3k3226zvkgwjd2fbu", "collectionID": int64(1), "delta": nil, "docID": docID, @@ -350,11 +350,11 @@ func TestQuery_WithAllCommitFieldsWithUpdate_NoError(t *testing.T) { "height": int64(1), "links": []map[string]any{ { - "cid": "bafyreibmearhvd62tofeoxhhodzwkz446ehjybll22fyb4tgmnvx2kwfma", + "cid": "bafyreid4sasigytiflrh3rupyevo6wy43b6mlfi4jwkjjwvohgjcd3oscu", "name": "Age", }, { - "cid": "bafyreid45hs4k3kxxl5t6glfn5ohd6pebjbdhyvtrjmino6g3l2dqdzwiq", + "cid": "bafyreieg3p2kpyxwiowskvb3pp35nedzawmapjuib7glrvszcgmv6z37fm", "name": "Name", }, }, From f8953151d73c337c1f00b5964a1a4ffb343aa57c Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 24 Jun 2024 14:28:27 -0400 Subject: [PATCH 61/78] bot: Update dependencies (bulk dependabot PRs) 24-06-2024 (#2761) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ✅ This PR was created by combining the following PRs: #2753 bot: Bump github.com/lestrrat-go/jwx/v2 from 2.0.21 to 2.1.0 #2752 bot: Bump github.com/cosmos/gogoproto from 1.4.12 to 1.5.0 #2751 bot: Bump graphiql from 3.2.3 to 3.3.1 in /playground #2749 bot: Bump typescript from 5.4.5 to 5.5.2 in /playground ⚠️ The following PR was resolved manually due to merge conflicts: #2750 bot: Bump graphql from 16.8.2 to 16.9.0 in /playground --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Shahzad Lone --- go.mod | 16 +- go.sum | 32 +- playground/package-lock.json | 545 ++++++++++++++++------------------- playground/package.json | 6 +- 4 files changed, 278 insertions(+), 321 deletions(-) diff --git a/go.mod b/go.mod index c684675448..7ffa87cf4c 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.21.3 require ( github.com/bits-and-blooms/bitset v1.13.0 github.com/bxcodec/faker v2.0.1+incompatible - github.com/cosmos/gogoproto v1.4.12 + github.com/cosmos/gogoproto v1.5.0 github.com/cyware/ssi-sdk v0.0.0-20231229164914-f93f3006379f github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 github.com/evanphx/json-patch/v5 v5.9.0 @@ -27,7 +27,7 @@ require ( github.com/ipld/go-ipld-prime/storage/bsrvadapter v0.0.0-20240322071758-198d7dba8fb8 github.com/jbenet/goprocess v0.1.4 github.com/lens-vm/lens/host-go v0.0.0-20231127204031-8d858ed2926c - github.com/lestrrat-go/jwx/v2 v2.0.21 + github.com/lestrrat-go/jwx/v2 v2.1.0 github.com/libp2p/go-libp2p v0.33.2 github.com/libp2p/go-libp2p-gostream v0.6.0 github.com/libp2p/go-libp2p-kad-dht v0.25.2 @@ -55,7 +55,7 @@ require ( go.opentelemetry.io/otel/sdk/metric v1.27.0 go.uber.org/zap v1.27.0 golang.org/x/exp v0.0.0-20240404231335-c0f41cb1a7a0 - golang.org/x/term v0.20.0 + golang.org/x/term v0.21.0 google.golang.org/grpc v1.64.0 google.golang.org/protobuf v1.34.1 ) @@ -111,7 +111,7 @@ require ( github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-playground/validator/v10 v10.15.1 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect - github.com/goccy/go-json v0.10.2 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.2.0 // indirect @@ -250,13 +250,13 @@ require ( go.uber.org/fx v1.20.1 // indirect go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.23.0 // indirect + golang.org/x/crypto v0.24.0 // indirect golang.org/x/mod v0.17.0 // indirect golang.org/x/net v0.25.0 // indirect golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.20.0 // indirect - golang.org/x/text v0.15.0 // indirect - golang.org/x/tools v0.20.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect gonum.org/v1/gonum v0.14.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240415141817-7cd4c1c1f9ec // indirect diff --git a/go.sum b/go.sum index fb05912276..b457ef68e9 100644 --- a/go.sum +++ b/go.sum @@ -102,8 +102,8 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8 github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cosmos/cosmos-db v1.0.2 h1:hwMjozuY1OlJs/uh6vddqnk9j7VamLv+0DBlbEXbAKs= github.com/cosmos/cosmos-db v1.0.2/go.mod h1:Z8IXcFJ9PqKK6BIsVOB3QXtkKoqUOp1vRvPT39kOXEA= -github.com/cosmos/gogoproto v1.4.12 h1:vB6Lbe/rtnYGjQuFxkPiPYiCybqFT8QvLipDZP8JpFE= -github.com/cosmos/gogoproto v1.4.12/go.mod h1:LnZob1bXRdUoqMMtwYlcR3wjiElmlC+FkjaZRv1/eLY= +github.com/cosmos/gogoproto v1.5.0 h1:SDVwzEqZDDBoslaeZg+dGE55hdzHfgUA40pEanMh52o= +github.com/cosmos/gogoproto v1.5.0/go.mod h1:iUM31aofn3ymidYG6bUR5ZFrk+Om8p5s754eMUcyp8I= github.com/cosmos/iavl v1.1.2 h1:zL9FK7C4L/P4IF1Dm5fIwz0WXCnn7Bp1M2FxH0ayM7Y= github.com/cosmos/iavl v1.1.2/go.mod h1:jLeUvm6bGT1YutCaL2fIar/8vGUE8cPZvh/gXEWDaDM= github.com/cosmos/ics23/go v0.10.0 h1:iXqLLgp2Lp+EdpIuwXTYIQU+AiHj9mOC2X9ab++bZDM= @@ -214,8 +214,8 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4 github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= -github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= -github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= @@ -444,8 +444,8 @@ github.com/lestrrat-go/httprc v1.0.5 h1:bsTfiH8xaKOJPrg1R+E3iE/AWZr/x0Phj9PBTG/O github.com/lestrrat-go/httprc v1.0.5/go.mod h1:mwwz3JMTPBjHUkkDv/IGJ39aALInZLrhBp0X7KGUZlo= github.com/lestrrat-go/iter v1.0.2 h1:gMXo1q4c2pHmC3dn8LzRhJfP1ceCbgSiT9lUydIzltI= github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4= -github.com/lestrrat-go/jwx/v2 v2.0.21 h1:jAPKupy4uHgrHFEdjVjNkUgoBKtVDgrQPB/h55FHrR0= -github.com/lestrrat-go/jwx/v2 v2.0.21/go.mod h1:09mLW8zto6bWL9GbwnqAli+ArLf+5M33QLQPDggkUWM= +github.com/lestrrat-go/jwx/v2 v2.1.0 h1:0zs7Ya6+39qoit7gwAf+cYm1zzgS3fceIdo7RmQ5lkw= +github.com/lestrrat-go/jwx/v2 v2.1.0/go.mod h1:Xpw9QIaUGiIUD1Wx0NcY1sIHwFf8lDuZn/cmxtXYRys= github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU= github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= @@ -842,8 +842,8 @@ golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240404231335-c0f41cb1a7a0 h1:985EYyeCOxTpcgOTJpflJUwOeEz0CQOdPt73OzpE9F8= golang.org/x/exp v0.0.0-20240404231335-c0f41cb1a7a0/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI= @@ -938,19 +938,19 @@ golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= @@ -973,8 +973,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= -golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/playground/package-lock.json b/playground/package-lock.json index d7626f6caa..0994b93557 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -8,8 +8,8 @@ "name": "playground", "version": "0.0.0", "dependencies": { - "graphiql": "^3.2.3", - "graphql": "^16.8.2", + "graphiql": "^3.3.1", + "graphql": "^16.9.0", "react": "^18.3.1", "react-dom": "^18.3.1", "swagger-ui-react": "^5.17.14" @@ -24,7 +24,7 @@ "eslint": "^8.57.0", "eslint-plugin-react-hooks": "^4.6.2", "eslint-plugin-react-refresh": "^0.4.7", - "typescript": "^5.4.5", + "typescript": "^5.5.2", "vite": "^5.3.1" } }, @@ -77,9 +77,9 @@ "peer": true }, "node_modules/@codemirror/view": { - "version": "6.26.3", - "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.26.3.tgz", - "integrity": "sha512-gmqxkPALZjkgSxIeeweY/wGQXBfwTUaLs8h7OKtSwfbj9Ct3L11lD+u1sS7XHppxFQoMDiMDp07P9f3I2jWOHw==", + "version": "6.28.2", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.28.2.tgz", + "integrity": "sha512-A3DmyVfjgPsGIjiJqM/zvODUAPQdQl3ci0ghehYNnbt5x+o76xq+dL5+mMBuysDXnI3kapgOkoeJ0sbtL/3qPw==", "peer": true, "dependencies": { "@codemirror/state": "^6.4.0", @@ -583,9 +583,9 @@ "integrity": "sha512-J4yDIIthosAsRZ5CPYP/jQvUAQtlZTTD/4suA08/FEnlxqW3sKS9iAhgsa9VYLZ6vDHn/ixJgIqRQPotoBjxIw==" }, "node_modules/@graphiql/react": { - "version": "0.22.2", - "resolved": "https://registry.npmjs.org/@graphiql/react/-/react-0.22.2.tgz", - "integrity": "sha512-46UV7CBQdZ0iU537uOkOU6HOOs7P1o7vQpFSUezB4VRem0Y3I4TDaYQADCOo7gFlwBs5Vb9YOup8r7cmXGIr7A==", + "version": "0.22.3", + "resolved": "https://registry.npmjs.org/@graphiql/react/-/react-0.22.3.tgz", + "integrity": "sha512-rZGs0BbJ4ImFy6l489aXUEB3HzGVoD7hI8CycydNRXR6+qYgp/fuNFCXMJe+q9gDyC/XhBXni8Pdugk8HxJ05g==", "dependencies": { "@graphiql/toolkit": "^0.9.1", "@headlessui/react": "^1.7.15", @@ -824,26 +824,22 @@ } }, "node_modules/@radix-ui/primitive": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.0.1.tgz", - "integrity": "sha512-yQ8oGX2GVsEYMWGxcovu1uGWPCxV5BFfeeYxqPmuAzUyLT9qmaMXSAhXpb0WrspIeqYzdJpkh2vHModJPgRIaw==", - "dependencies": { - "@babel/runtime": "^7.13.10" - } + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.0.tgz", + "integrity": "sha512-4Z8dn6Upk0qk4P74xBhZ6Hd/w0mPEzOOLxy4xiPXOXqjF7jZS0VAKk7/x/H6FyY2zCkYJqePf1G5KmkmNJ4RBA==" }, "node_modules/@radix-ui/react-arrow": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.0.3.tgz", - "integrity": "sha512-wSP+pHsB/jQRaL6voubsQ/ZlrGBHHrOjmBnr19hxYgtS0WvAFwZhK2WP/YY5yF9uKECCEEDGxuLxq1NBK51wFA==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.1.0.tgz", + "integrity": "sha512-FmlW1rCg7hBpEBwFbjHwCW6AmWLQM6g/v0Sn8XbP9NvmSZ2San1FpQeyPtufzOMSIx7Y4dzjlHoifhp+7NkZhw==", "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/react-primitive": "1.0.3" + "@radix-ui/react-primitive": "2.0.0" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0", - "react-dom": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -855,21 +851,20 @@ } }, "node_modules/@radix-ui/react-collection": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.0.3.tgz", - "integrity": "sha512-3SzW+0PW7yBBoQlT8wNcGtaxaD0XSu0uLUFgrtHY08Acx05TaHaOmVLR73c0j/cqpDy53KBMO7s0dx2wmOIDIA==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.1.0.tgz", + "integrity": "sha512-GZsZslMJEyo1VKm5L1ZJY8tGDxZNPAoUeQUIbKeJfoi7Q4kmig5AsgLMYYuyYbfjd8fBmFORAIwYAkXMnXZgZw==", "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/react-compose-refs": "1.0.1", - "@radix-ui/react-context": "1.0.1", - "@radix-ui/react-primitive": "1.0.3", - "@radix-ui/react-slot": "1.0.2" + "@radix-ui/react-compose-refs": "1.1.0", + "@radix-ui/react-context": "1.1.0", + "@radix-ui/react-primitive": "2.0.0", + "@radix-ui/react-slot": "1.1.0" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0", - "react-dom": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -881,15 +876,12 @@ } }, "node_modules/@radix-ui/react-compose-refs": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.0.1.tgz", - "integrity": "sha512-fDSBgd44FKHa1FRMU59qBMPFcl2PZE+2nmqunj+BWFyYYjnhIDWL2ItDs3rrbJDQOtzt5nIebLCQc4QRfz6LJw==", - "dependencies": { - "@babel/runtime": "^7.13.10" - }, + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.0.tgz", + "integrity": "sha512-b4inOtiaOnYf9KWyO3jAeeCG6FeyfY6ldiEPanbUjWd+xIk5wZeHa8yVwmrJ2vderhu/BQvzCrJI0lHd+wIiqw==", "peerDependencies": { "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -898,15 +890,12 @@ } }, "node_modules/@radix-ui/react-context": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.0.1.tgz", - "integrity": "sha512-ebbrdFoYTcuZ0v4wG5tedGnp9tzcV8awzsxYph7gXUyvnNLuTIcCk1q17JEbnVhXAKG9oX3KtchwiMIAYp9NLg==", - "dependencies": { - "@babel/runtime": "^7.13.10" - }, + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.0.tgz", + "integrity": "sha512-OKrckBy+sMEgYM/sMmqmErVn0kZqrHPJze+Ql3DzYsDDp0hl0L62nx/2122/Bvps1qz645jlcu2tD9lrRSdf8A==", "peerDependencies": { "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -915,31 +904,30 @@ } }, "node_modules/@radix-ui/react-dialog": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.0.5.tgz", - "integrity": "sha512-GjWJX/AUpB703eEBanuBnIWdIXg6NvJFCXcNlSZk4xdszCdhrJgBoUd1cGk67vFO+WdA2pfI/plOpqz/5GUP6Q==", - "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/primitive": "1.0.1", - "@radix-ui/react-compose-refs": "1.0.1", - "@radix-ui/react-context": "1.0.1", - "@radix-ui/react-dismissable-layer": "1.0.5", - "@radix-ui/react-focus-guards": "1.0.1", - "@radix-ui/react-focus-scope": "1.0.4", - "@radix-ui/react-id": "1.0.1", - "@radix-ui/react-portal": "1.0.4", - "@radix-ui/react-presence": "1.0.1", - "@radix-ui/react-primitive": "1.0.3", - "@radix-ui/react-slot": "1.0.2", - "@radix-ui/react-use-controllable-state": "1.0.1", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.1.1.tgz", + "integrity": "sha512-zysS+iU4YP3STKNS6USvFVqI4qqx8EpiwmT5TuCApVEBca+eRCbONi4EgzfNSuVnOXvC5UPHHMjs8RXO6DH9Bg==", + "dependencies": { + "@radix-ui/primitive": "1.1.0", + "@radix-ui/react-compose-refs": "1.1.0", + "@radix-ui/react-context": "1.1.0", + "@radix-ui/react-dismissable-layer": "1.1.0", + "@radix-ui/react-focus-guards": "1.1.0", + "@radix-ui/react-focus-scope": "1.1.0", + "@radix-ui/react-id": "1.1.0", + "@radix-ui/react-portal": "1.1.1", + "@radix-ui/react-presence": "1.1.0", + "@radix-ui/react-primitive": "2.0.0", + "@radix-ui/react-slot": "1.1.0", + "@radix-ui/react-use-controllable-state": "1.1.0", "aria-hidden": "^1.1.1", - "react-remove-scroll": "2.5.5" + "react-remove-scroll": "2.5.7" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0", - "react-dom": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -951,15 +939,12 @@ } }, "node_modules/@radix-ui/react-direction": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-direction/-/react-direction-1.0.1.tgz", - "integrity": "sha512-RXcvnXgyvYvBEOhCBuddKecVkoMiI10Jcm5cTI7abJRAHYfFxeu+FBQs/DvdxSYucxR5mna0dNsL6QFlds5TMA==", - "dependencies": { - "@babel/runtime": "^7.13.10" - }, + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-direction/-/react-direction-1.1.0.tgz", + "integrity": "sha512-BUuBvgThEiAXh2DWu93XsT+a3aWrGqolGlqqw5VU1kG7p/ZH2cuDlM1sRLNnY3QcBS69UIz2mcKhMxDsdewhjg==", "peerDependencies": { "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -968,22 +953,21 @@ } }, "node_modules/@radix-ui/react-dismissable-layer": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.0.5.tgz", - "integrity": "sha512-aJeDjQhywg9LBu2t/At58hCvr7pEm0o2Ke1x33B+MhjNmmZ17sy4KImo0KPLgsnc/zN7GPdce8Cnn0SWvwZO7g==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.0.tgz", + "integrity": "sha512-/UovfmmXGptwGcBQawLzvn2jOfM0t4z3/uKffoBlj724+n3FvBbZ7M0aaBOmkp6pqFYpO4yx8tSVJjx3Fl2jig==", "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/primitive": "1.0.1", - "@radix-ui/react-compose-refs": "1.0.1", - "@radix-ui/react-primitive": "1.0.3", - "@radix-ui/react-use-callback-ref": "1.0.1", - "@radix-ui/react-use-escape-keydown": "1.0.3" + "@radix-ui/primitive": "1.1.0", + "@radix-ui/react-compose-refs": "1.1.0", + "@radix-ui/react-primitive": "2.0.0", + "@radix-ui/react-use-callback-ref": "1.1.0", + "@radix-ui/react-use-escape-keydown": "1.1.0" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0", - "react-dom": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -995,24 +979,23 @@ } }, "node_modules/@radix-ui/react-dropdown-menu": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/@radix-ui/react-dropdown-menu/-/react-dropdown-menu-2.0.6.tgz", - "integrity": "sha512-i6TuFOoWmLWq+M/eCLGd/bQ2HfAX1RJgvrBQ6AQLmzfvsLdefxbWu8G9zczcPFfcSPehz9GcpF6K9QYreFV8hA==", - "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/primitive": "1.0.1", - "@radix-ui/react-compose-refs": "1.0.1", - "@radix-ui/react-context": "1.0.1", - "@radix-ui/react-id": "1.0.1", - "@radix-ui/react-menu": "2.0.6", - "@radix-ui/react-primitive": "1.0.3", - "@radix-ui/react-use-controllable-state": "1.0.1" + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dropdown-menu/-/react-dropdown-menu-2.1.1.tgz", + "integrity": "sha512-y8E+x9fBq9qvteD2Zwa4397pUVhYsh9iq44b5RD5qu1GMJWBCBuVg1hMyItbc6+zH00TxGRqd9Iot4wzf3OoBQ==", + "dependencies": { + "@radix-ui/primitive": "1.1.0", + "@radix-ui/react-compose-refs": "1.1.0", + "@radix-ui/react-context": "1.1.0", + "@radix-ui/react-id": "1.1.0", + "@radix-ui/react-menu": "2.1.1", + "@radix-ui/react-primitive": "2.0.0", + "@radix-ui/react-use-controllable-state": "1.1.0" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0", - "react-dom": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -1024,15 +1007,12 @@ } }, "node_modules/@radix-ui/react-focus-guards": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.0.1.tgz", - "integrity": "sha512-Rect2dWbQ8waGzhMavsIbmSVCgYxkXLxxR3ZvCX79JOglzdEy4JXMb98lq4hPxUbLr77nP0UOGf4rcMU+s1pUA==", - "dependencies": { - "@babel/runtime": "^7.13.10" - }, + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.1.0.tgz", + "integrity": "sha512-w6XZNUPVv6xCpZUqb/yN9DL6auvpGX3C/ee6Hdi16v2UUy25HV2Q5bcflsiDyT/g5RwbPQ/GIT1vLkeRb+ITBw==", "peerDependencies": { "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -1041,20 +1021,19 @@ } }, "node_modules/@radix-ui/react-focus-scope": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.0.4.tgz", - "integrity": "sha512-sL04Mgvf+FmyvZeYfNu1EPAaaxD+aw7cYeIB9L9Fvq8+urhltTRaEo5ysKOpHuKPclsZcSUMKlN05x4u+CINpA==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.0.tgz", + "integrity": "sha512-200UD8zylvEyL8Bx+z76RJnASR2gRMuxlgFCPAe/Q/679a/r0eK3MBVYMb7vZODZcffZBdob1EGnky78xmVvcA==", "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/react-compose-refs": "1.0.1", - "@radix-ui/react-primitive": "1.0.3", - "@radix-ui/react-use-callback-ref": "1.0.1" + "@radix-ui/react-compose-refs": "1.1.0", + "@radix-ui/react-primitive": "2.0.0", + "@radix-ui/react-use-callback-ref": "1.1.0" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0", - "react-dom": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -1066,16 +1045,15 @@ } }, "node_modules/@radix-ui/react-id": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.0.1.tgz", - "integrity": "sha512-tI7sT/kqYp8p96yGWY1OAnLHrqDgzHefRBKQ2YAkBS5ja7QLcZ9Z/uY7bEjPUatf8RomoXM8/1sMj1IJaE5UzQ==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.1.0.tgz", + "integrity": "sha512-EJUrI8yYh7WOjNOqpoJaf1jlFIH2LvtgAl+YcFqNCa+4hj64ZXmPkAKOFs/ukjz3byN6bdb/AVUqHkI8/uWWMA==", "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/react-use-layout-effect": "1.0.1" + "@radix-ui/react-use-layout-effect": "1.1.0" }, "peerDependencies": { "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -1084,35 +1062,34 @@ } }, "node_modules/@radix-ui/react-menu": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/@radix-ui/react-menu/-/react-menu-2.0.6.tgz", - "integrity": "sha512-BVkFLS+bUC8HcImkRKPSiVumA1VPOOEC5WBMiT+QAVsPzW1FJzI9KnqgGxVDPBcql5xXrHkD3JOVoXWEXD8SYA==", - "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/primitive": "1.0.1", - "@radix-ui/react-collection": "1.0.3", - "@radix-ui/react-compose-refs": "1.0.1", - "@radix-ui/react-context": "1.0.1", - "@radix-ui/react-direction": "1.0.1", - "@radix-ui/react-dismissable-layer": "1.0.5", - "@radix-ui/react-focus-guards": "1.0.1", - "@radix-ui/react-focus-scope": "1.0.4", - "@radix-ui/react-id": "1.0.1", - "@radix-ui/react-popper": "1.1.3", - "@radix-ui/react-portal": "1.0.4", - "@radix-ui/react-presence": "1.0.1", - "@radix-ui/react-primitive": "1.0.3", - "@radix-ui/react-roving-focus": "1.0.4", - "@radix-ui/react-slot": "1.0.2", - "@radix-ui/react-use-callback-ref": "1.0.1", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-menu/-/react-menu-2.1.1.tgz", + "integrity": "sha512-oa3mXRRVjHi6DZu/ghuzdylyjaMXLymx83irM7hTxutQbD+7IhPKdMdRHD26Rm+kHRrWcrUkkRPv5pd47a2xFQ==", + "dependencies": { + "@radix-ui/primitive": "1.1.0", + "@radix-ui/react-collection": "1.1.0", + "@radix-ui/react-compose-refs": "1.1.0", + "@radix-ui/react-context": "1.1.0", + "@radix-ui/react-direction": "1.1.0", + "@radix-ui/react-dismissable-layer": "1.1.0", + "@radix-ui/react-focus-guards": "1.1.0", + "@radix-ui/react-focus-scope": "1.1.0", + "@radix-ui/react-id": "1.1.0", + "@radix-ui/react-popper": "1.2.0", + "@radix-ui/react-portal": "1.1.1", + "@radix-ui/react-presence": "1.1.0", + "@radix-ui/react-primitive": "2.0.0", + "@radix-ui/react-roving-focus": "1.1.0", + "@radix-ui/react-slot": "1.1.0", + "@radix-ui/react-use-callback-ref": "1.1.0", "aria-hidden": "^1.1.1", - "react-remove-scroll": "2.5.5" + "react-remove-scroll": "2.5.7" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0", - "react-dom": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -1124,27 +1101,26 @@ } }, "node_modules/@radix-ui/react-popper": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.1.3.tgz", - "integrity": "sha512-cKpopj/5RHZWjrbF2846jBNacjQVwkP068DfmgrNJXpvVWrOvlAmE9xSiy5OqeE+Gi8D9fP+oDhUnPqNMY8/5w==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.0.tgz", + "integrity": "sha512-ZnRMshKF43aBxVWPWvbj21+7TQCvhuULWJ4gNIKYpRlQt5xGRhLx66tMp8pya2UkGHTSlhpXwmjqltDYHhw7Vg==", "dependencies": { - "@babel/runtime": "^7.13.10", "@floating-ui/react-dom": "^2.0.0", - "@radix-ui/react-arrow": "1.0.3", - "@radix-ui/react-compose-refs": "1.0.1", - "@radix-ui/react-context": "1.0.1", - "@radix-ui/react-primitive": "1.0.3", - "@radix-ui/react-use-callback-ref": "1.0.1", - "@radix-ui/react-use-layout-effect": "1.0.1", - "@radix-ui/react-use-rect": "1.0.1", - "@radix-ui/react-use-size": "1.0.1", - "@radix-ui/rect": "1.0.1" + "@radix-ui/react-arrow": "1.1.0", + "@radix-ui/react-compose-refs": "1.1.0", + "@radix-ui/react-context": "1.1.0", + "@radix-ui/react-primitive": "2.0.0", + "@radix-ui/react-use-callback-ref": "1.1.0", + "@radix-ui/react-use-layout-effect": "1.1.0", + "@radix-ui/react-use-rect": "1.1.0", + "@radix-ui/react-use-size": "1.1.0", + "@radix-ui/rect": "1.1.0" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0", - "react-dom": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -1156,18 +1132,18 @@ } }, "node_modules/@radix-ui/react-portal": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.0.4.tgz", - "integrity": "sha512-Qki+C/EuGUVCQTOTD5vzJzJuMUlewbzuKyUy+/iHM2uwGiru9gZeBJtHAPKAEkB5KWGi9mP/CHKcY0wt1aW45Q==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.1.tgz", + "integrity": "sha512-A3UtLk85UtqhzFqtoC8Q0KvR2GbXF3mtPgACSazajqq6A41mEQgo53iPzY4i6BwDxlIFqWIhiQ2G729n+2aw/g==", "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/react-primitive": "1.0.3" + "@radix-ui/react-primitive": "2.0.0", + "@radix-ui/react-use-layout-effect": "1.1.0" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0", - "react-dom": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -1179,19 +1155,18 @@ } }, "node_modules/@radix-ui/react-presence": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.0.1.tgz", - "integrity": "sha512-UXLW4UAbIY5ZjcvzjfRFo5gxva8QirC9hF7wRE4U5gz+TP0DbRk+//qyuAQ1McDxBt1xNMBTaciFGvEmJvAZCg==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.0.tgz", + "integrity": "sha512-Gq6wuRN/asf9H/E/VzdKoUtT8GC9PQc9z40/vEr0VCJ4u5XvvhWIrSsCB6vD2/cH7ugTdSfYq9fLJCcM00acrQ==", "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/react-compose-refs": "1.0.1", - "@radix-ui/react-use-layout-effect": "1.0.1" + "@radix-ui/react-compose-refs": "1.1.0", + "@radix-ui/react-use-layout-effect": "1.1.0" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0", - "react-dom": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -1203,18 +1178,17 @@ } }, "node_modules/@radix-ui/react-primitive": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-1.0.3.tgz", - "integrity": "sha512-yi58uVyoAcK/Nq1inRY56ZSjKypBNKTa/1mcL8qdl6oJeEaDbOldlzrGn7P6Q3Id5d+SYNGc5AJgc4vGhjs5+g==", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.0.0.tgz", + "integrity": "sha512-ZSpFm0/uHa8zTvKBDjLFWLo8dkr4MBsiDLz0g3gMUwqgLHz9rTaRRGYDgvZPtBJgYCBKXkS9fzmoySgr8CO6Cw==", "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/react-slot": "1.0.2" + "@radix-ui/react-slot": "1.1.0" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0", - "react-dom": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -1226,26 +1200,25 @@ } }, "node_modules/@radix-ui/react-roving-focus": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@radix-ui/react-roving-focus/-/react-roving-focus-1.0.4.tgz", - "integrity": "sha512-2mUg5Mgcu001VkGy+FfzZyzbmuUWzgWkj3rvv4yu+mLw03+mTzbxZHvfcGyFp2b8EkQeMkpRQ5FiA2Vr2O6TeQ==", - "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/primitive": "1.0.1", - "@radix-ui/react-collection": "1.0.3", - "@radix-ui/react-compose-refs": "1.0.1", - "@radix-ui/react-context": "1.0.1", - "@radix-ui/react-direction": "1.0.1", - "@radix-ui/react-id": "1.0.1", - "@radix-ui/react-primitive": "1.0.3", - "@radix-ui/react-use-callback-ref": "1.0.1", - "@radix-ui/react-use-controllable-state": "1.0.1" + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-roving-focus/-/react-roving-focus-1.1.0.tgz", + "integrity": "sha512-EA6AMGeq9AEeQDeSH0aZgG198qkfHSbvWTf1HvoDmOB5bBG/qTxjYMWUKMnYiV6J/iP/J8MEFSuB2zRU2n7ODA==", + "dependencies": { + "@radix-ui/primitive": "1.1.0", + "@radix-ui/react-collection": "1.1.0", + "@radix-ui/react-compose-refs": "1.1.0", + "@radix-ui/react-context": "1.1.0", + "@radix-ui/react-direction": "1.1.0", + "@radix-ui/react-id": "1.1.0", + "@radix-ui/react-primitive": "2.0.0", + "@radix-ui/react-use-callback-ref": "1.1.0", + "@radix-ui/react-use-controllable-state": "1.1.0" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0", - "react-dom": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -1257,16 +1230,15 @@ } }, "node_modules/@radix-ui/react-slot": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.0.2.tgz", - "integrity": "sha512-YeTpuq4deV+6DusvVUW4ivBgnkHwECUu0BiN43L5UCDFgdhsRUWAghhTF5MbvNTPzmiFOx90asDSUjWuCNapwg==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.0.tgz", + "integrity": "sha512-FUCf5XMfmW4dtYl69pdS4DbxKy8nj4M7SafBgPllysxmdachynNflAdp/gCsnYWNDnge6tI9onzMp5ARYc1KNw==", "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/react-compose-refs": "1.0.1" + "@radix-ui/react-compose-refs": "1.1.0" }, "peerDependencies": { "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -1275,29 +1247,28 @@ } }, "node_modules/@radix-ui/react-tooltip": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.0.7.tgz", - "integrity": "sha512-lPh5iKNFVQ/jav/j6ZrWq3blfDJ0OH9R6FlNUHPMqdLuQ9vwDgFsRxvl8b7Asuy5c8xmoojHUxKHQSOAvMHxyw==", - "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/primitive": "1.0.1", - "@radix-ui/react-compose-refs": "1.0.1", - "@radix-ui/react-context": "1.0.1", - "@radix-ui/react-dismissable-layer": "1.0.5", - "@radix-ui/react-id": "1.0.1", - "@radix-ui/react-popper": "1.1.3", - "@radix-ui/react-portal": "1.0.4", - "@radix-ui/react-presence": "1.0.1", - "@radix-ui/react-primitive": "1.0.3", - "@radix-ui/react-slot": "1.0.2", - "@radix-ui/react-use-controllable-state": "1.0.1", - "@radix-ui/react-visually-hidden": "1.0.3" + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.1.1.tgz", + "integrity": "sha512-LLE8nzNE4MzPMw3O2zlVlkLFid3y9hMUs7uCbSHyKSo+tCN4yMCf+ZCCcfrYgsOC0TiHBPQ1mtpJ2liY3ZT3SQ==", + "dependencies": { + "@radix-ui/primitive": "1.1.0", + "@radix-ui/react-compose-refs": "1.1.0", + "@radix-ui/react-context": "1.1.0", + "@radix-ui/react-dismissable-layer": "1.1.0", + "@radix-ui/react-id": "1.1.0", + "@radix-ui/react-popper": "1.2.0", + "@radix-ui/react-portal": "1.1.1", + "@radix-ui/react-presence": "1.1.0", + "@radix-ui/react-primitive": "2.0.0", + "@radix-ui/react-slot": "1.1.0", + "@radix-ui/react-use-controllable-state": "1.1.0", + "@radix-ui/react-visually-hidden": "1.1.0" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0", - "react-dom": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -1309,15 +1280,12 @@ } }, "node_modules/@radix-ui/react-use-callback-ref": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.0.1.tgz", - "integrity": "sha512-D94LjX4Sp0xJFVaoQOd3OO9k7tpBYNOXdVhkltUbGv2Qb9OXdrg/CpsjlZv7ia14Sylv398LswWBVVu5nqKzAQ==", - "dependencies": { - "@babel/runtime": "^7.13.10" - }, + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.0.tgz", + "integrity": "sha512-CasTfvsy+frcFkbXtSJ2Zu9JHpN8TYKxkgJGWbjiZhFivxaeW7rMeZt7QELGVLaYVfFMsKHjb7Ak0nMEe+2Vfw==", "peerDependencies": { "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -1326,16 +1294,15 @@ } }, "node_modules/@radix-ui/react-use-controllable-state": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.0.1.tgz", - "integrity": "sha512-Svl5GY5FQeN758fWKrjM6Qb7asvXeiZltlT4U2gVfl8Gx5UAv2sMR0LWo8yhsIZh2oQ0eFdZ59aoOOMV7b47VA==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.1.0.tgz", + "integrity": "sha512-MtfMVJiSr2NjzS0Aa90NPTnvTSg6C/JLCV7ma0W6+OMV78vd8OyRpID+Ng9LxzsPbLeuBnWBA1Nq30AtBIDChw==", "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/react-use-callback-ref": "1.0.1" + "@radix-ui/react-use-callback-ref": "1.1.0" }, "peerDependencies": { "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -1344,16 +1311,15 @@ } }, "node_modules/@radix-ui/react-use-escape-keydown": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.0.3.tgz", - "integrity": "sha512-vyL82j40hcFicA+M4Ex7hVkB9vHgSse1ZWomAqV2Je3RleKGO5iM8KMOEtfoSB0PnIelMd2lATjTGMYqN5ylTg==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.0.tgz", + "integrity": "sha512-L7vwWlR1kTTQ3oh7g1O0CBF3YCyyTj8NmhLR+phShpyA50HCfBFKVJTpshm9PzLiKmehsrQzTYTpX9HvmC9rhw==", "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/react-use-callback-ref": "1.0.1" + "@radix-ui/react-use-callback-ref": "1.1.0" }, "peerDependencies": { "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -1362,15 +1328,12 @@ } }, "node_modules/@radix-ui/react-use-layout-effect": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.0.1.tgz", - "integrity": "sha512-v/5RegiJWYdoCvMnITBkNNx6bCj20fiaJnWtRkU18yITptraXjffz5Qbn05uOiQnOvi+dbkznkoaMltz1GnszQ==", - "dependencies": { - "@babel/runtime": "^7.13.10" - }, + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.0.tgz", + "integrity": "sha512-+FPE0rOdziWSrH9athwI1R0HDVbWlEhd+FR+aSDk4uWGmSJ9Z54sdZVDQPZAinJhJXwfT+qnj969mCsT2gfm5w==", "peerDependencies": { "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -1379,16 +1342,15 @@ } }, "node_modules/@radix-ui/react-use-rect": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-use-rect/-/react-use-rect-1.0.1.tgz", - "integrity": "sha512-Cq5DLuSiuYVKNU8orzJMbl15TXilTnJKUCltMVQg53BQOF1/C5toAaGrowkgksdBQ9H+SRL23g0HDmg9tvmxXw==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-rect/-/react-use-rect-1.1.0.tgz", + "integrity": "sha512-0Fmkebhr6PiseyZlYAOtLS+nb7jLmpqTrJyv61Pe68MKYW6OWdRE2kI70TaYY27u7H0lajqM3hSMMLFq18Z7nQ==", "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/rect": "1.0.1" + "@radix-ui/rect": "1.1.0" }, "peerDependencies": { "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -1397,16 +1359,15 @@ } }, "node_modules/@radix-ui/react-use-size": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-use-size/-/react-use-size-1.0.1.tgz", - "integrity": "sha512-ibay+VqrgcaI6veAojjofPATwledXiSmX+C0KrBk/xgpX9rBzPV3OsfwlhQdUOFbh+LKQorLYT+xTXW9V8yd0g==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-size/-/react-use-size-1.1.0.tgz", + "integrity": "sha512-XW3/vWuIXHa+2Uwcc2ABSfcCledmXhhQPlGbfcRXbiUQI5Icjcg19BGCZVKKInYbvUCut/ufbbLLPFC5cbb1hw==", "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/react-use-layout-effect": "1.0.1" + "@radix-ui/react-use-layout-effect": "1.1.0" }, "peerDependencies": { "@types/react": "*", - "react": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -1415,18 +1376,17 @@ } }, "node_modules/@radix-ui/react-visually-hidden": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.0.3.tgz", - "integrity": "sha512-D4w41yN5YRKtu464TLnByKzMDG/JlMPHtfZgQAu9v6mNakUqGUI9vUrfQKz8NK41VMm/xbZbh76NUTVtIYqOMA==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.1.0.tgz", + "integrity": "sha512-N8MDZqtgCgG5S3aV60INAB475osJousYpZ4cTJ2cFbMpdHS5Y6loLTH8LPtkj2QN0x93J30HT/M3qJXM0+lyeQ==", "dependencies": { - "@babel/runtime": "^7.13.10", - "@radix-ui/react-primitive": "1.0.3" + "@radix-ui/react-primitive": "2.0.0" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", - "react": "^16.8 || ^17.0 || ^18.0", - "react-dom": "^16.8 || ^17.0 || ^18.0" + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -1438,12 +1398,9 @@ } }, "node_modules/@radix-ui/rect": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@radix-ui/rect/-/rect-1.0.1.tgz", - "integrity": "sha512-fyrgCaedtvMg9NK3en0pnOYJdtfwxUcNolezkNPUsoX57X8oQk+NkqcvzHXD2uKNij6GXmWU9NDru2IWjrO4BQ==", - "dependencies": { - "@babel/runtime": "^7.13.10" - } + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/rect/-/rect-1.1.0.tgz", + "integrity": "sha512-A9+lCBZoaMJlVKcRBz2YByCG+Cp2t6nAnMnNba+XiWxnj6r4JUFqfsgwocMBZU9LPtdxC6wB56ySYpc7LQIoJg==" }, "node_modules/@rollup/rollup-android-arm-eabi": { "version": "4.17.2", @@ -2323,11 +2280,11 @@ } }, "node_modules/@tanstack/react-virtual": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/@tanstack/react-virtual/-/react-virtual-3.5.0.tgz", - "integrity": "sha512-rtvo7KwuIvqK9zb0VZ5IL7fiJAEnG+0EiFZz8FUOs+2mhGqdGmjKIaT1XU7Zq0eFqL0jonLlhbayJI/J2SA/Bw==", + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/@tanstack/react-virtual/-/react-virtual-3.7.0.tgz", + "integrity": "sha512-3RtOwEU1HKS4iFBoTcCrV3Szqt4KoERMhZr8v57dvnh5o70sR9GAdF+0aE/qhiOmePrKujGwAayFNJSr/8Dbqw==", "dependencies": { - "@tanstack/virtual-core": "3.5.0" + "@tanstack/virtual-core": "3.7.0" }, "funding": { "type": "github", @@ -2339,9 +2296,9 @@ } }, "node_modules/@tanstack/virtual-core": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/@tanstack/virtual-core/-/virtual-core-3.5.0.tgz", - "integrity": "sha512-KnPRCkQTyqhanNC0K63GBG3wA8I+D1fQuVnAvcBF8f13akOKeQp1gSbu6f77zCxhEk727iV5oQnbHLYzHrECLg==", + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/@tanstack/virtual-core/-/virtual-core-3.7.0.tgz", + "integrity": "sha512-p0CWuqn+n8iZmsL7/l0Xg7kbyIKnHNqkEJkMDOkg4x3Ni3LohszmnJY8FPhTgG7Ad9ZFGcdKmn1R1mKUGEh9Xg==", "funding": { "type": "github", "url": "https://github.com/sponsors/tannerlinsley" @@ -3825,11 +3782,11 @@ "dev": true }, "node_modules/graphiql": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/graphiql/-/graphiql-3.2.3.tgz", - "integrity": "sha512-b5XuFyTWkORhQkUZULPOPmUXocg+x7HFB53cYEjV7LcH4taB4ViGwmXqHILhfPtv+JcTN80Aw8HELVWSa16iiA==", + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/graphiql/-/graphiql-3.3.1.tgz", + "integrity": "sha512-UA29FQ418Pcxat54CvM//S5G+7DKG7XQ7s9UyAEdb7zMAKPANIDd222XEYNxG2I/FgAxsiq3ZTBpxwvPbB9Mcw==", "dependencies": { - "@graphiql/react": "^0.22.2", + "@graphiql/react": "^0.22.3", "@graphiql/toolkit": "^0.9.1", "graphql-language-service": "^5.2.1", "markdown-it": "^14.1.0" @@ -3841,9 +3798,9 @@ } }, "node_modules/graphql": { - "version": "16.8.2", - "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.8.2.tgz", - "integrity": "sha512-cvVIBILwuoSyD54U4cF/UXDh5yAobhNV/tPygI4lZhgOIJQE/WLWC4waBRb4I6bDVYb3OVx3lfHbaQOEoUD5sg==", + "version": "16.9.0", + "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.9.0.tgz", + "integrity": "sha512-GGTKBX4SD7Wdb8mqeDLni2oaRGYQWjWHGKPQ24ZMnUtKfcsVoiv4uX8+LJr1K6U5VW2Lu1BwJnj7uiori0YtRw==", "engines": { "node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0" } @@ -5042,11 +4999,11 @@ } }, "node_modules/react-remove-scroll": { - "version": "2.5.5", - "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.5.5.tgz", - "integrity": "sha512-ImKhrzJJsyXJfBZ4bzu8Bwpka14c/fQt0k+cyFp/PBhTfyDnU5hjOtM4AG/0AMyy8oKzOTR0lDgJIM7pYXI0kw==", + "version": "2.5.7", + "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.5.7.tgz", + "integrity": "sha512-FnrTWO4L7/Bhhf3CYBNArEG/yROV0tKmTv7/3h9QCFvH6sndeFf1wPqOcbFVu5VAulS5dV1wGT3GZZ/1GawqiA==", "dependencies": { - "react-remove-scroll-bar": "^2.3.3", + "react-remove-scroll-bar": "^2.3.4", "react-style-singleton": "^2.2.1", "tslib": "^2.1.0", "use-callback-ref": "^1.3.0", @@ -5843,9 +5800,9 @@ } }, "node_modules/typescript": { - "version": "5.4.5", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.4.5.tgz", - "integrity": "sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==", + "version": "5.5.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.5.2.tgz", + "integrity": "sha512-NcRtPEOsPFFWjobJEtfihkLCZCXZt/os3zf8nTxjVH3RvTSxjrCamJpbExGvYOF+tFHc3pA65qpdwPbzjohhew==", "dev": true, "bin": { "tsc": "bin/tsc", diff --git a/playground/package.json b/playground/package.json index 9b9638cde3..fd04253371 100644 --- a/playground/package.json +++ b/playground/package.json @@ -10,8 +10,8 @@ "preview": "vite preview" }, "dependencies": { - "graphiql": "^3.2.3", - "graphql": "^16.8.2", + "graphiql": "^3.3.1", + "graphql": "^16.9.0", "react": "^18.3.1", "react-dom": "^18.3.1", "swagger-ui-react": "^5.17.14" @@ -26,7 +26,7 @@ "eslint": "^8.57.0", "eslint-plugin-react-hooks": "^4.6.2", "eslint-plugin-react-refresh": "^0.4.7", - "typescript": "^5.4.5", + "typescript": "^5.5.2", "vite": "^5.3.1" } } From 281aa77c5ff38352a674b82df453848e1ebb312b Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Mon, 24 Jun 2024 15:46:30 -0400 Subject: [PATCH 62/78] fix: Return slice of correct length from db.AddSchema (#2765) ## Relevant issue(s) Resolves #2764 ## Description Return slice of correct length from db.AddSchema, values are appended to this slice later in the function, but the result declaration is done as if they will be set by index. Bug also affected `db.AddView`. --- internal/db/collection_define.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/db/collection_define.go b/internal/db/collection_define.go index a8b9fe9abd..6eb8d9dddb 100644 --- a/internal/db/collection_define.go +++ b/internal/db/collection_define.go @@ -29,7 +29,7 @@ func (db *db) createCollections( ctx context.Context, newDefinitions []client.CollectionDefinition, ) ([]client.CollectionDefinition, error) { - returnDescriptions := make([]client.CollectionDefinition, len(newDefinitions)) + returnDescriptions := make([]client.CollectionDefinition, 0, len(newDefinitions)) existingDefinitions, err := db.getAllActiveDefinitions(ctx) if err != nil { From f627448ae62b9ffef30bb7c4512e3936bce40f4f Mon Sep 17 00:00:00 2001 From: Shahzad Lone Date: Tue, 25 Jun 2024 11:53:15 -0400 Subject: [PATCH 63/78] feat: Ability to generate a new identity (#2760) ## Relevant issue(s) Resolves #2554 ## Description Now that the `core_acp` work is merged the identity generation becomes much more simple, with a `did` instead of `source address`. This util command can be handy to quickly generate new identity pairs with their did. ### Demo Issue the command: ``` $ defradb identity new ``` Output: ```json { "PrivateKey": "8wsOdJZCg06ISJxBqUyrD1ovDF5WgJJrSup2i4avnuU=", "PublicKey": "BG6dw3UW3rv+IiTXfL0LXvACw7Qz3DSBy1KmS/eSNrsrRkToBsize7zp+xwFrelPylOPgZguG7ZmirAtTaY89ow=", "DID": "did:key:z7r8oqgz72xy7Z7V5WGF21iWc7EY8vCpjZA1nGugoKjGeppeMkqeD7FV5AxEQAnciBz5vk5c2zrTZytofJNM8ZxyBzSpT" } ``` ## How has this been tested? - cli test - unit tests Specify the platform(s) on which this was tested: - WSL2 --- acp/identity/errors.go | 10 +++- acp/identity/generate.go | 50 +++++++++++++++++++ acp/identity/identity_test.go | 36 ++++++++++++- cli/cli.go | 6 +++ cli/identity.go | 25 ++++++++++ cli/identity_new.go | 40 +++++++++++++++ cli/identity_new_test.go | 26 ++++++++++ docs/website/references/cli/defradb.md | 1 + .../references/cli/defradb_identity.md | 37 ++++++++++++++ .../references/cli/defradb_identity_new.md | 45 +++++++++++++++++ 10 files changed, 273 insertions(+), 3 deletions(-) create mode 100644 acp/identity/generate.go create mode 100644 cli/identity.go create mode 100644 cli/identity_new.go create mode 100644 cli/identity_new_test.go create mode 100644 docs/website/references/cli/defradb_identity.md create mode 100644 docs/website/references/cli/defradb_identity_new.md diff --git a/acp/identity/errors.go b/acp/identity/errors.go index 54f272b780..3ad815b8bb 100644 --- a/acp/identity/errors.go +++ b/acp/identity/errors.go @@ -16,9 +16,15 @@ import ( "github.com/sourcenetwork/defradb/errors" ) -const errDIDCreation = "could not produce did for key" +const ( + errDIDCreation = "could not produce did for key" + errFailedToGenerateIdentityFromPrivateKey = "failed to generate identity from private key" +) -var ErrDIDCreation = errors.New(errDIDCreation) +var ( + ErrDIDCreation = errors.New(errDIDCreation) + ErrFailedToGenerateIdentityFromPrivateKey = errors.New(errFailedToGenerateIdentityFromPrivateKey) +) func newErrDIDCreation(inner error, keytype string, pubKey []byte) error { return errors.Wrap( diff --git a/acp/identity/generate.go b/acp/identity/generate.go new file mode 100644 index 0000000000..11c9188ff0 --- /dev/null +++ b/acp/identity/generate.go @@ -0,0 +1,50 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package identity + +import "github.com/sourcenetwork/defradb/crypto" + +// RawIdentity holds the raw bytes that make up an actor's identity. +type RawIdentity struct { + // An actor's private key. + PrivateKey []byte + + // An actor's corresponding public key address. + PublicKey []byte + + // An actor's DID. Generated from the public key address. + DID string +} + +// Generate generates a new identity. +func Generate() (RawIdentity, error) { + privateKey, err := crypto.GenerateSecp256k1() + if err != nil { + return RawIdentity{}, err + } + + maybeNewIdentity, err := FromPrivateKey(privateKey) + if err != nil { + return RawIdentity{}, err + } + + if !maybeNewIdentity.HasValue() { + return RawIdentity{}, ErrFailedToGenerateIdentityFromPrivateKey + } + + newIdentity := maybeNewIdentity.Value() + + return RawIdentity{ + PrivateKey: newIdentity.PrivateKey.Serialize(), + PublicKey: newIdentity.PublicKey.SerializeUncompressed(), + DID: newIdentity.DID, + }, nil +} diff --git a/acp/identity/identity_test.go b/acp/identity/identity_test.go index 2f04c01337..bcef99005b 100644 --- a/acp/identity/identity_test.go +++ b/acp/identity/identity_test.go @@ -30,7 +30,7 @@ func Test_DIDFromPublicKey_ProducesDIDForPublicKey(t *testing.T) { require.NoError(t, err) } -func Test_didFromPublicKey_ReturnsErrorWhenProducerFails(t *testing.T) { +func Test_DIDFromPublicKey_ReturnsErrorWhenProducerFails(t *testing.T) { mockedProducer := func(crypto.KeyType, []byte) (*key.DIDKey, error) { return nil, fmt.Errorf("did generation err") } @@ -42,3 +42,37 @@ func Test_didFromPublicKey_ReturnsErrorWhenProducerFails(t *testing.T) { require.Empty(t, did) require.ErrorIs(t, err, ErrDIDCreation) } + +func Test_RawIdentityGeneration_ReturnsNewRawIdentity(t *testing.T) { + newIdentity, err := Generate() + require.NoError(t, err) + + // Check that both private and public key are not empty. + require.NotEmpty(t, newIdentity.PrivateKey) + require.NotEmpty(t, newIdentity.PublicKey) + + // Check leading `did:key` prefix. + require.Equal(t, newIdentity.DID[:7], "did:key") +} + +func Test_RawIdentityGenerationIsNotFixed_ReturnsUniqueRawIdentites(t *testing.T) { + newIdentity1, err1 := Generate() + newIdentity2, err2 := Generate() + require.NoError(t, err1) + require.NoError(t, err2) + + // Check that both private and public key are not empty. + require.NotEmpty(t, newIdentity1.PrivateKey) + require.NotEmpty(t, newIdentity1.PublicKey) + require.NotEmpty(t, newIdentity2.PrivateKey) + require.NotEmpty(t, newIdentity2.PublicKey) + + // Check leading `did:key` prefix. + require.Equal(t, newIdentity1.DID[:7], "did:key") + require.Equal(t, newIdentity2.DID[:7], "did:key") + + // Check both are different. + require.NotEqual(t, newIdentity1.PrivateKey, newIdentity2.PrivateKey) + require.NotEqual(t, newIdentity1.PublicKey, newIdentity2.PublicKey) + require.NotEqual(t, newIdentity1.DID, newIdentity2.DID) +} diff --git a/cli/cli.go b/cli/cli.go index 7099f57f99..e87ea8dec9 100644 --- a/cli/cli.go +++ b/cli/cli.go @@ -129,10 +129,16 @@ func NewDefraCommand() *cobra.Command { MakeKeyringExportCommand(), ) + identity := MakeIdentityCommand() + identity.AddCommand( + MakeIdentityNewCommand(), + ) + root := MakeRootCommand() root.AddCommand( client, keyring, + identity, MakeStartCommand(), MakeServerDumpCmd(), MakeVersionCommand(), diff --git a/cli/identity.go b/cli/identity.go new file mode 100644 index 0000000000..66efcec098 --- /dev/null +++ b/cli/identity.go @@ -0,0 +1,25 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" +) + +func MakeIdentityCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "identity", + Short: "Interact with identity features of DefraDB instance", + Long: `Interact with identity features of DefraDB instance`, + } + + return cmd +} diff --git a/cli/identity_new.go b/cli/identity_new.go new file mode 100644 index 0000000000..cd903979fc --- /dev/null +++ b/cli/identity_new.go @@ -0,0 +1,40 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/acp/identity" +) + +func MakeIdentityNewCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "new", + Short: "Generate a new identity", + Long: `Generate a new identity + +Example: generate a new identity: + defradb identity new + +`, + RunE: func(cmd *cobra.Command, args []string) error { + newIdentity, err := identity.Generate() + if err != nil { + return err + } + + return writeJSON(cmd, newIdentity) + }, + } + + return cmd +} diff --git a/cli/identity_new_test.go b/cli/identity_new_test.go new file mode 100644 index 0000000000..cb4367abe3 --- /dev/null +++ b/cli/identity_new_test.go @@ -0,0 +1,26 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewIdentityGeneration(t *testing.T) { + cmd := NewDefraCommand() + + cmd.SetArgs([]string{"identity", "new"}) + + err := cmd.Execute() + require.NoError(t, err) +} diff --git a/docs/website/references/cli/defradb.md b/docs/website/references/cli/defradb.md index 3edc08b8d9..f2a6c85840 100644 --- a/docs/website/references/cli/defradb.md +++ b/docs/website/references/cli/defradb.md @@ -31,6 +31,7 @@ Start a DefraDB node, interact with a local or remote node, and much more. ### SEE ALSO * [defradb client](defradb_client.md) - Interact with a DefraDB node +* [defradb identity](defradb_identity.md) - Interact with identity features of DefraDB instance * [defradb keyring](defradb_keyring.md) - Manage DefraDB private keys * [defradb server-dump](defradb_server-dump.md) - Dumps the state of the entire database * [defradb start](defradb_start.md) - Start a DefraDB node diff --git a/docs/website/references/cli/defradb_identity.md b/docs/website/references/cli/defradb_identity.md new file mode 100644 index 0000000000..1f658047b2 --- /dev/null +++ b/docs/website/references/cli/defradb_identity.md @@ -0,0 +1,37 @@ +## defradb identity + +Interact with identity features of DefraDB instance + +### Synopsis + +Interact with identity features of DefraDB instance + +### Options + +``` + -h, --help help for identity +``` + +### Options inherited from parent commands + +``` + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") +``` + +### SEE ALSO + +* [defradb](defradb.md) - DefraDB Edge Database +* [defradb identity new](defradb_identity_new.md) - Generate a new identity + diff --git a/docs/website/references/cli/defradb_identity_new.md b/docs/website/references/cli/defradb_identity_new.md new file mode 100644 index 0000000000..dd8cf0af7b --- /dev/null +++ b/docs/website/references/cli/defradb_identity_new.md @@ -0,0 +1,45 @@ +## defradb identity new + +Generate a new identity + +### Synopsis + +Generate a new identity + +Example: generate a new identity: + defradb identity new + + + +``` +defradb identity new [flags] +``` + +### Options + +``` + -h, --help help for new +``` + +### Options inherited from parent commands + +``` + --keyring-backend string Keyring backend to use. Options are file or system (default "file") + --keyring-namespace string Service name to use when using the system backend (default "defradb") + --keyring-path string Path to store encrypted keys when using the file backend (default "keys") + --log-format string Log format to use. Options are text or json (default "text") + --log-level string Log level to use. Options are debug, info, error, fatal (default "info") + --log-output string Log output path. Options are stderr or stdout. (default "stderr") + --log-overrides string Logger config overrides. Format ,=,...;,... + --log-source Include source location in logs + --log-stacktrace Include stacktrace in error and fatal logs + --no-keyring Disable the keyring and generate ephemeral keys + --no-log-color Disable colored log output + --rootdir string Directory for persistent data (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181") +``` + +### SEE ALSO + +* [defradb identity](defradb_identity.md) - Interact with identity features of DefraDB instance + From a9f8dee14d4286fec14b282b0dfee2caf60b304b Mon Sep 17 00:00:00 2001 From: Shahzad Lone Date: Tue, 25 Jun 2024 13:20:18 -0400 Subject: [PATCH 64/78] tools(i): Make one command do all the steps (#2772) ## Relevant issue(s) Resolves #2771 ## Description Runs all the developer make commands in one command. ## How has this been tested? locally Specify the platform(s) on which this was tested: - WSL2 --- Makefile | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/Makefile b/Makefile index f4cfa7a7bd..134291390f 100644 --- a/Makefile +++ b/Makefile @@ -380,3 +380,11 @@ docs\:godoc: .PHONY: toc toc: bash tools/scripts/md-toc/gh-md-toc --insert --no-backup --hide-footer --skip-header README.md + +.PHONY: fix +fix: + @$(MAKE) deps + @$(MAKE) lint\:fix + @$(MAKE) tidy + @$(MAKE) mocks + @$(MAKE) docs From 0a0657f63030345861e6560429fcbd67e99ff619 Mon Sep 17 00:00:00 2001 From: Shahzad Lone Date: Tue, 25 Jun 2024 14:56:20 -0400 Subject: [PATCH 65/78] fix: Change new identity keys to hex format (#2773) ## Relevant issue(s) Resolves #2770 ## Description - Return the priv/pub keys in hex Specify the platform(s) on which this was tested: - WSL2 --- acp/identity/generate.go | 21 ++++++++++++------- cli/identity_new.go | 6 ++++++ .../references/cli/defradb_identity_new.md | 6 ++++++ 3 files changed, 25 insertions(+), 8 deletions(-) diff --git a/acp/identity/generate.go b/acp/identity/generate.go index 11c9188ff0..cf37ce6e46 100644 --- a/acp/identity/generate.go +++ b/acp/identity/generate.go @@ -10,17 +10,22 @@ package identity -import "github.com/sourcenetwork/defradb/crypto" +import ( + "encoding/hex" + + "github.com/sourcenetwork/defradb/crypto" +) // RawIdentity holds the raw bytes that make up an actor's identity. type RawIdentity struct { - // An actor's private key. - PrivateKey []byte + // PrivateKey is a secp256k1 private key that is a 256-bit big-endian + // binary-encoded number, padded to a length of 32 bytes in HEX format. + PrivateKey string - // An actor's corresponding public key address. - PublicKey []byte + // PublicKey is a compressed 33-byte secp256k1 public key in HEX format. + PublicKey string - // An actor's DID. Generated from the public key address. + // DID is `did:key` key generated from the public key address. DID string } @@ -43,8 +48,8 @@ func Generate() (RawIdentity, error) { newIdentity := maybeNewIdentity.Value() return RawIdentity{ - PrivateKey: newIdentity.PrivateKey.Serialize(), - PublicKey: newIdentity.PublicKey.SerializeUncompressed(), + PrivateKey: hex.EncodeToString(newIdentity.PrivateKey.Serialize()), + PublicKey: hex.EncodeToString(newIdentity.PublicKey.SerializeCompressed()), DID: newIdentity.DID, }, nil } diff --git a/cli/identity_new.go b/cli/identity_new.go index cd903979fc..e7101c1bae 100644 --- a/cli/identity_new.go +++ b/cli/identity_new.go @@ -22,6 +22,12 @@ func MakeIdentityNewCommand() *cobra.Command { Short: "Generate a new identity", Long: `Generate a new identity +The generated identity contains: +- A secp256k1 private key that is a 256-bit big-endian binary-encoded number, +padded to a length of 32 bytes in HEX format. +- A compressed 33-byte secp256k1 public key in HEX format. +- A "did:key" generated from the public key. + Example: generate a new identity: defradb identity new diff --git a/docs/website/references/cli/defradb_identity_new.md b/docs/website/references/cli/defradb_identity_new.md index dd8cf0af7b..24a995e8f8 100644 --- a/docs/website/references/cli/defradb_identity_new.md +++ b/docs/website/references/cli/defradb_identity_new.md @@ -6,6 +6,12 @@ Generate a new identity Generate a new identity +The generated identity contains: +- A secp256k1 private key that is a 256-bit big-endian binary-encoded number, +padded to a length of 32 bytes in HEX format. +- A compressed 33-byte secp256k1 public key in HEX format. +- A "did:key" generated from the public key. + Example: generate a new identity: defradb identity new From 58559691472dbb3855d18b6eac8ff6cb061415e8 Mon Sep 17 00:00:00 2001 From: Shahzad Lone Date: Tue, 25 Jun 2024 15:33:10 -0400 Subject: [PATCH 66/78] chore(i): Bump mockery to `v2.43.0` & fix deprecated config (#2776) ## Relevant issue(s) Resolves #2775 ## Description - Remove the deprecated config option that causes errors. - Bump mockery to `2.43.0` ## How has this been tested? locally Specify the platform(s) on which this was tested: - WSL2 --- Makefile | 2 +- client/mocks/collection.go | 76 +++++++++++++ client/mocks/db.go | 100 ++++++++++++++++++ datastore/mocks/dag_store.go | 32 ++++++ datastore/mocks/ds_reader_writer.go | 28 +++++ datastore/mocks/results.go | 24 +++++ datastore/mocks/root_store.go | 40 +++++++ datastore/mocks/txn.go | 32 ++++++ internal/db/fetcher/mocks/encoded_document.go | 16 +++ internal/db/fetcher/mocks/fetcher.go | 16 +++ tools/configs/mockery.yaml | 2 - 11 files changed, 365 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 134291390f..efa4748d40 100644 --- a/Makefile +++ b/Makefile @@ -160,7 +160,7 @@ deps\:modules: .PHONY: deps\:mocks deps\:mocks: - go install github.com/vektra/mockery/v2@v2.32.0 + go install github.com/vektra/mockery/v2@v2.43.0 .PHONY: deps\:playground deps\:playground: diff --git a/client/mocks/collection.go b/client/mocks/collection.go index 9e1cf9b654..3b80849661 100644 --- a/client/mocks/collection.go +++ b/client/mocks/collection.go @@ -29,6 +29,10 @@ func (_m *Collection) EXPECT() *Collection_Expecter { func (_m *Collection) Create(ctx context.Context, doc *client.Document) error { ret := _m.Called(ctx, doc) + if len(ret) == 0 { + panic("no return value specified for Create") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *client.Document) error); ok { r0 = rf(ctx, doc) @@ -72,6 +76,10 @@ func (_c *Collection_Create_Call) RunAndReturn(run func(context.Context, *client func (_m *Collection) CreateIndex(_a0 context.Context, _a1 client.IndexDescription) (client.IndexDescription, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for CreateIndex") + } + var r0 client.IndexDescription var r1 error if rf, ok := ret.Get(0).(func(context.Context, client.IndexDescription) (client.IndexDescription, error)); ok { @@ -125,6 +133,10 @@ func (_c *Collection_CreateIndex_Call) RunAndReturn(run func(context.Context, cl func (_m *Collection) CreateMany(ctx context.Context, docs []*client.Document) error { ret := _m.Called(ctx, docs) + if len(ret) == 0 { + panic("no return value specified for CreateMany") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, []*client.Document) error); ok { r0 = rf(ctx, docs) @@ -168,6 +180,10 @@ func (_c *Collection_CreateMany_Call) RunAndReturn(run func(context.Context, []* func (_m *Collection) Definition() client.CollectionDefinition { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Definition") + } + var r0 client.CollectionDefinition if rf, ok := ret.Get(0).(func() client.CollectionDefinition); ok { r0 = rf() @@ -209,6 +225,10 @@ func (_c *Collection_Definition_Call) RunAndReturn(run func() client.CollectionD func (_m *Collection) Delete(ctx context.Context, docID client.DocID) (bool, error) { ret := _m.Called(ctx, docID) + if len(ret) == 0 { + panic("no return value specified for Delete") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(context.Context, client.DocID) (bool, error)); ok { @@ -262,6 +282,10 @@ func (_c *Collection_Delete_Call) RunAndReturn(run func(context.Context, client. func (_m *Collection) DeleteWithFilter(ctx context.Context, filter interface{}) (*client.DeleteResult, error) { ret := _m.Called(ctx, filter) + if len(ret) == 0 { + panic("no return value specified for DeleteWithFilter") + } + var r0 *client.DeleteResult var r1 error if rf, ok := ret.Get(0).(func(context.Context, interface{}) (*client.DeleteResult, error)); ok { @@ -317,6 +341,10 @@ func (_c *Collection_DeleteWithFilter_Call) RunAndReturn(run func(context.Contex func (_m *Collection) Description() client.CollectionDescription { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Description") + } + var r0 client.CollectionDescription if rf, ok := ret.Get(0).(func() client.CollectionDescription); ok { r0 = rf() @@ -358,6 +386,10 @@ func (_c *Collection_Description_Call) RunAndReturn(run func() client.Collection func (_m *Collection) DropIndex(ctx context.Context, indexName string) error { ret := _m.Called(ctx, indexName) + if len(ret) == 0 { + panic("no return value specified for DropIndex") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { r0 = rf(ctx, indexName) @@ -401,6 +433,10 @@ func (_c *Collection_DropIndex_Call) RunAndReturn(run func(context.Context, stri func (_m *Collection) Exists(ctx context.Context, docID client.DocID) (bool, error) { ret := _m.Called(ctx, docID) + if len(ret) == 0 { + panic("no return value specified for Exists") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(context.Context, client.DocID) (bool, error)); ok { @@ -454,6 +490,10 @@ func (_c *Collection_Exists_Call) RunAndReturn(run func(context.Context, client. func (_m *Collection) Get(ctx context.Context, docID client.DocID, showDeleted bool) (*client.Document, error) { ret := _m.Called(ctx, docID, showDeleted) + if len(ret) == 0 { + panic("no return value specified for Get") + } + var r0 *client.Document var r1 error if rf, ok := ret.Get(0).(func(context.Context, client.DocID, bool) (*client.Document, error)); ok { @@ -510,6 +550,10 @@ func (_c *Collection_Get_Call) RunAndReturn(run func(context.Context, client.Doc func (_m *Collection) GetAllDocIDs(ctx context.Context) (<-chan client.DocIDResult, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for GetAllDocIDs") + } + var r0 <-chan client.DocIDResult var r1 error if rf, ok := ret.Get(0).(func(context.Context) (<-chan client.DocIDResult, error)); ok { @@ -564,6 +608,10 @@ func (_c *Collection_GetAllDocIDs_Call) RunAndReturn(run func(context.Context) ( func (_m *Collection) GetIndexes(ctx context.Context) ([]client.IndexDescription, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for GetIndexes") + } + var r0 []client.IndexDescription var r1 error if rf, ok := ret.Get(0).(func(context.Context) ([]client.IndexDescription, error)); ok { @@ -618,6 +666,10 @@ func (_c *Collection_GetIndexes_Call) RunAndReturn(run func(context.Context) ([] func (_m *Collection) ID() uint32 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ID") + } + var r0 uint32 if rf, ok := ret.Get(0).(func() uint32); ok { r0 = rf() @@ -659,6 +711,10 @@ func (_c *Collection_ID_Call) RunAndReturn(run func() uint32) *Collection_ID_Cal func (_m *Collection) Name() immutable.Option[string] { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Name") + } + var r0 immutable.Option[string] if rf, ok := ret.Get(0).(func() immutable.Option[string]); ok { r0 = rf() @@ -700,6 +756,10 @@ func (_c *Collection_Name_Call) RunAndReturn(run func() immutable.Option[string] func (_m *Collection) Save(ctx context.Context, doc *client.Document) error { ret := _m.Called(ctx, doc) + if len(ret) == 0 { + panic("no return value specified for Save") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *client.Document) error); ok { r0 = rf(ctx, doc) @@ -743,6 +803,10 @@ func (_c *Collection_Save_Call) RunAndReturn(run func(context.Context, *client.D func (_m *Collection) Schema() client.SchemaDescription { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Schema") + } + var r0 client.SchemaDescription if rf, ok := ret.Get(0).(func() client.SchemaDescription); ok { r0 = rf() @@ -784,6 +848,10 @@ func (_c *Collection_Schema_Call) RunAndReturn(run func() client.SchemaDescripti func (_m *Collection) SchemaRoot() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for SchemaRoot") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -825,6 +893,10 @@ func (_c *Collection_SchemaRoot_Call) RunAndReturn(run func() string) *Collectio func (_m *Collection) Update(ctx context.Context, docs *client.Document) error { ret := _m.Called(ctx, docs) + if len(ret) == 0 { + panic("no return value specified for Update") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *client.Document) error); ok { r0 = rf(ctx, docs) @@ -868,6 +940,10 @@ func (_c *Collection_Update_Call) RunAndReturn(run func(context.Context, *client func (_m *Collection) UpdateWithFilter(ctx context.Context, filter interface{}, updater string) (*client.UpdateResult, error) { ret := _m.Called(ctx, filter, updater) + if len(ret) == 0 { + panic("no return value specified for UpdateWithFilter") + } + var r0 *client.UpdateResult var r1 error if rf, ok := ret.Get(0).(func(context.Context, interface{}, string) (*client.UpdateResult, error)); ok { diff --git a/client/mocks/db.go b/client/mocks/db.go index c56af31167..396bc5397c 100644 --- a/client/mocks/db.go +++ b/client/mocks/db.go @@ -37,6 +37,10 @@ func (_m *DB) EXPECT() *DB_Expecter { func (_m *DB) AddPolicy(ctx context.Context, policy string) (client.AddPolicyResult, error) { ret := _m.Called(ctx, policy) + if len(ret) == 0 { + panic("no return value specified for AddPolicy") + } + var r0 client.AddPolicyResult var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (client.AddPolicyResult, error)); ok { @@ -90,6 +94,10 @@ func (_c *DB_AddPolicy_Call) RunAndReturn(run func(context.Context, string) (cli func (_m *DB) AddSchema(_a0 context.Context, _a1 string) ([]client.CollectionDescription, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for AddSchema") + } + var r0 []client.CollectionDescription var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) ([]client.CollectionDescription, error)); ok { @@ -145,6 +153,10 @@ func (_c *DB_AddSchema_Call) RunAndReturn(run func(context.Context, string) ([]c func (_m *DB) AddView(ctx context.Context, gqlQuery string, sdl string, transform immutable.Option[model.Lens]) ([]client.CollectionDefinition, error) { ret := _m.Called(ctx, gqlQuery, sdl, transform) + if len(ret) == 0 { + panic("no return value specified for AddView") + } + var r0 []client.CollectionDefinition var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, string, immutable.Option[model.Lens]) ([]client.CollectionDefinition, error)); ok { @@ -202,6 +214,10 @@ func (_c *DB_AddView_Call) RunAndReturn(run func(context.Context, string, string func (_m *DB) BasicExport(ctx context.Context, config *client.BackupConfig) error { ret := _m.Called(ctx, config) + if len(ret) == 0 { + panic("no return value specified for BasicExport") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *client.BackupConfig) error); ok { r0 = rf(ctx, config) @@ -245,6 +261,10 @@ func (_c *DB_BasicExport_Call) RunAndReturn(run func(context.Context, *client.Ba func (_m *DB) BasicImport(ctx context.Context, filepath string) error { ret := _m.Called(ctx, filepath) + if len(ret) == 0 { + panic("no return value specified for BasicImport") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { r0 = rf(ctx, filepath) @@ -288,6 +308,10 @@ func (_c *DB_BasicImport_Call) RunAndReturn(run func(context.Context, string) er func (_m *DB) Blockstore() datastore.DAGStore { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Blockstore") + } + var r0 datastore.DAGStore if rf, ok := ret.Get(0).(func() datastore.DAGStore); ok { r0 = rf() @@ -363,6 +387,10 @@ func (_c *DB_Close_Call) RunAndReturn(run func()) *DB_Close_Call { func (_m *DB) Events() *event.Bus { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Events") + } + var r0 *event.Bus if rf, ok := ret.Get(0).(func() *event.Bus); ok { r0 = rf() @@ -406,6 +434,10 @@ func (_c *DB_Events_Call) RunAndReturn(run func() *event.Bus) *DB_Events_Call { func (_m *DB) ExecRequest(ctx context.Context, request string) *client.RequestResult { ret := _m.Called(ctx, request) + if len(ret) == 0 { + panic("no return value specified for ExecRequest") + } + var r0 *client.RequestResult if rf, ok := ret.Get(0).(func(context.Context, string) *client.RequestResult); ok { r0 = rf(ctx, request) @@ -451,6 +483,10 @@ func (_c *DB_ExecRequest_Call) RunAndReturn(run func(context.Context, string) *c func (_m *DB) GetAllIndexes(_a0 context.Context) (map[string][]client.IndexDescription, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for GetAllIndexes") + } + var r0 map[string][]client.IndexDescription var r1 error if rf, ok := ret.Get(0).(func(context.Context) (map[string][]client.IndexDescription, error)); ok { @@ -505,6 +541,10 @@ func (_c *DB_GetAllIndexes_Call) RunAndReturn(run func(context.Context) (map[str func (_m *DB) GetCollectionByName(_a0 context.Context, _a1 string) (client.Collection, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetCollectionByName") + } + var r0 client.Collection var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (client.Collection, error)); ok { @@ -560,6 +600,10 @@ func (_c *DB_GetCollectionByName_Call) RunAndReturn(run func(context.Context, st func (_m *DB) GetCollections(_a0 context.Context, _a1 client.CollectionFetchOptions) ([]client.Collection, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetCollections") + } + var r0 []client.Collection var r1 error if rf, ok := ret.Get(0).(func(context.Context, client.CollectionFetchOptions) ([]client.Collection, error)); ok { @@ -615,6 +659,10 @@ func (_c *DB_GetCollections_Call) RunAndReturn(run func(context.Context, client. func (_m *DB) GetSchemaByVersionID(_a0 context.Context, _a1 string) (client.SchemaDescription, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetSchemaByVersionID") + } + var r0 client.SchemaDescription var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (client.SchemaDescription, error)); ok { @@ -668,6 +716,10 @@ func (_c *DB_GetSchemaByVersionID_Call) RunAndReturn(run func(context.Context, s func (_m *DB) GetSchemas(_a0 context.Context, _a1 client.SchemaFetchOptions) ([]client.SchemaDescription, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetSchemas") + } + var r0 []client.SchemaDescription var r1 error if rf, ok := ret.Get(0).(func(context.Context, client.SchemaFetchOptions) ([]client.SchemaDescription, error)); ok { @@ -723,6 +775,10 @@ func (_c *DB_GetSchemas_Call) RunAndReturn(run func(context.Context, client.Sche func (_m *DB) Headstore() go_datastore.Read { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Headstore") + } + var r0 go_datastore.Read if rf, ok := ret.Get(0).(func() go_datastore.Read); ok { r0 = rf() @@ -766,6 +822,10 @@ func (_c *DB_Headstore_Call) RunAndReturn(run func() go_datastore.Read) *DB_Head func (_m *DB) LensRegistry() client.LensRegistry { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for LensRegistry") + } + var r0 client.LensRegistry if rf, ok := ret.Get(0).(func() client.LensRegistry); ok { r0 = rf() @@ -809,6 +869,10 @@ func (_c *DB_LensRegistry_Call) RunAndReturn(run func() client.LensRegistry) *DB func (_m *DB) MaxTxnRetries() int { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for MaxTxnRetries") + } + var r0 int if rf, ok := ret.Get(0).(func() int); ok { r0 = rf() @@ -850,6 +914,10 @@ func (_c *DB_MaxTxnRetries_Call) RunAndReturn(run func() int) *DB_MaxTxnRetries_ func (_m *DB) NewConcurrentTxn(_a0 context.Context, _a1 bool) (datastore.Txn, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for NewConcurrentTxn") + } + var r0 datastore.Txn var r1 error if rf, ok := ret.Get(0).(func(context.Context, bool) (datastore.Txn, error)); ok { @@ -905,6 +973,10 @@ func (_c *DB_NewConcurrentTxn_Call) RunAndReturn(run func(context.Context, bool) func (_m *DB) NewTxn(_a0 context.Context, _a1 bool) (datastore.Txn, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for NewTxn") + } + var r0 datastore.Txn var r1 error if rf, ok := ret.Get(0).(func(context.Context, bool) (datastore.Txn, error)); ok { @@ -960,6 +1032,10 @@ func (_c *DB_NewTxn_Call) RunAndReturn(run func(context.Context, bool) (datastor func (_m *DB) PatchCollection(_a0 context.Context, _a1 string) error { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for PatchCollection") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { r0 = rf(_a0, _a1) @@ -1003,6 +1079,10 @@ func (_c *DB_PatchCollection_Call) RunAndReturn(run func(context.Context, string func (_m *DB) PatchSchema(_a0 context.Context, _a1 string, _a2 immutable.Option[model.Lens], _a3 bool) error { ret := _m.Called(_a0, _a1, _a2, _a3) + if len(ret) == 0 { + panic("no return value specified for PatchSchema") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, immutable.Option[model.Lens], bool) error); ok { r0 = rf(_a0, _a1, _a2, _a3) @@ -1048,6 +1128,10 @@ func (_c *DB_PatchSchema_Call) RunAndReturn(run func(context.Context, string, im func (_m *DB) Peerstore() datastore.DSBatching { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Peerstore") + } + var r0 datastore.DSBatching if rf, ok := ret.Get(0).(func() datastore.DSBatching); ok { r0 = rf() @@ -1091,6 +1175,10 @@ func (_c *DB_Peerstore_Call) RunAndReturn(run func() datastore.DSBatching) *DB_P func (_m *DB) PrintDump(ctx context.Context) error { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for PrintDump") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context) error); ok { r0 = rf(ctx) @@ -1133,6 +1221,10 @@ func (_c *DB_PrintDump_Call) RunAndReturn(run func(context.Context) error) *DB_P func (_m *DB) Root() datastore.RootStore { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Root") + } + var r0 datastore.RootStore if rf, ok := ret.Get(0).(func() datastore.RootStore); ok { r0 = rf() @@ -1176,6 +1268,10 @@ func (_c *DB_Root_Call) RunAndReturn(run func() datastore.RootStore) *DB_Root_Ca func (_m *DB) SetActiveSchemaVersion(_a0 context.Context, _a1 string) error { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for SetActiveSchemaVersion") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { r0 = rf(_a0, _a1) @@ -1219,6 +1315,10 @@ func (_c *DB_SetActiveSchemaVersion_Call) RunAndReturn(run func(context.Context, func (_m *DB) SetMigration(_a0 context.Context, _a1 client.LensConfig) error { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for SetMigration") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, client.LensConfig) error); ok { r0 = rf(_a0, _a1) diff --git a/datastore/mocks/dag_store.go b/datastore/mocks/dag_store.go index a9ba9e2af8..f6fe123a80 100644 --- a/datastore/mocks/dag_store.go +++ b/datastore/mocks/dag_store.go @@ -30,6 +30,10 @@ func (_m *DAGStore) EXPECT() *DAGStore_Expecter { func (_m *DAGStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for AllKeysChan") + } + var r0 <-chan cid.Cid var r1 error if rf, ok := ret.Get(0).(func(context.Context) (<-chan cid.Cid, error)); ok { @@ -84,6 +88,10 @@ func (_c *DAGStore_AllKeysChan_Call) RunAndReturn(run func(context.Context) (<-c func (_m *DAGStore) AsIPLDStorage() datastore.IPLDStorage { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for AsIPLDStorage") + } + var r0 datastore.IPLDStorage if rf, ok := ret.Get(0).(func() datastore.IPLDStorage); ok { r0 = rf() @@ -127,6 +135,10 @@ func (_c *DAGStore_AsIPLDStorage_Call) RunAndReturn(run func() datastore.IPLDSto func (_m *DAGStore) DeleteBlock(_a0 context.Context, _a1 cid.Cid) error { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for DeleteBlock") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) error); ok { r0 = rf(_a0, _a1) @@ -170,6 +182,10 @@ func (_c *DAGStore_DeleteBlock_Call) RunAndReturn(run func(context.Context, cid. func (_m *DAGStore) Get(_a0 context.Context, _a1 cid.Cid) (blocks.Block, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Get") + } + var r0 blocks.Block var r1 error if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) (blocks.Block, error)); ok { @@ -225,6 +241,10 @@ func (_c *DAGStore_Get_Call) RunAndReturn(run func(context.Context, cid.Cid) (bl func (_m *DAGStore) GetSize(_a0 context.Context, _a1 cid.Cid) (int, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetSize") + } + var r0 int var r1 error if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) (int, error)); ok { @@ -278,6 +298,10 @@ func (_c *DAGStore_GetSize_Call) RunAndReturn(run func(context.Context, cid.Cid) func (_m *DAGStore) Has(_a0 context.Context, _a1 cid.Cid) (bool, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Has") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) (bool, error)); ok { @@ -364,6 +388,10 @@ func (_c *DAGStore_HashOnRead_Call) RunAndReturn(run func(bool)) *DAGStore_HashO func (_m *DAGStore) Put(_a0 context.Context, _a1 blocks.Block) error { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Put") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, blocks.Block) error); ok { r0 = rf(_a0, _a1) @@ -407,6 +435,10 @@ func (_c *DAGStore_Put_Call) RunAndReturn(run func(context.Context, blocks.Block func (_m *DAGStore) PutMany(_a0 context.Context, _a1 []blocks.Block) error { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for PutMany") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, []blocks.Block) error); ok { r0 = rf(_a0, _a1) diff --git a/datastore/mocks/ds_reader_writer.go b/datastore/mocks/ds_reader_writer.go index 865086c697..989521521e 100644 --- a/datastore/mocks/ds_reader_writer.go +++ b/datastore/mocks/ds_reader_writer.go @@ -31,6 +31,10 @@ func (_m *DSReaderWriter) EXPECT() *DSReaderWriter_Expecter { func (_m *DSReaderWriter) Delete(ctx context.Context, key datastore.Key) error { ret := _m.Called(ctx, key) + if len(ret) == 0 { + panic("no return value specified for Delete") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) error); ok { r0 = rf(ctx, key) @@ -74,6 +78,10 @@ func (_c *DSReaderWriter_Delete_Call) RunAndReturn(run func(context.Context, dat func (_m *DSReaderWriter) Get(ctx context.Context, key datastore.Key) ([]byte, error) { ret := _m.Called(ctx, key) + if len(ret) == 0 { + panic("no return value specified for Get") + } + var r0 []byte var r1 error if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) ([]byte, error)); ok { @@ -129,6 +137,10 @@ func (_c *DSReaderWriter_Get_Call) RunAndReturn(run func(context.Context, datast func (_m *DSReaderWriter) GetIterator(q query.Query) (iterable.Iterator, error) { ret := _m.Called(q) + if len(ret) == 0 { + panic("no return value specified for GetIterator") + } + var r0 iterable.Iterator var r1 error if rf, ok := ret.Get(0).(func(query.Query) (iterable.Iterator, error)); ok { @@ -183,6 +195,10 @@ func (_c *DSReaderWriter_GetIterator_Call) RunAndReturn(run func(query.Query) (i func (_m *DSReaderWriter) GetSize(ctx context.Context, key datastore.Key) (int, error) { ret := _m.Called(ctx, key) + if len(ret) == 0 { + panic("no return value specified for GetSize") + } + var r0 int var r1 error if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) (int, error)); ok { @@ -236,6 +252,10 @@ func (_c *DSReaderWriter_GetSize_Call) RunAndReturn(run func(context.Context, da func (_m *DSReaderWriter) Has(ctx context.Context, key datastore.Key) (bool, error) { ret := _m.Called(ctx, key) + if len(ret) == 0 { + panic("no return value specified for Has") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) (bool, error)); ok { @@ -289,6 +309,10 @@ func (_c *DSReaderWriter_Has_Call) RunAndReturn(run func(context.Context, datast func (_m *DSReaderWriter) Put(ctx context.Context, key datastore.Key, value []byte) error { ret := _m.Called(ctx, key, value) + if len(ret) == 0 { + panic("no return value specified for Put") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, datastore.Key, []byte) error); ok { r0 = rf(ctx, key, value) @@ -333,6 +357,10 @@ func (_c *DSReaderWriter_Put_Call) RunAndReturn(run func(context.Context, datast func (_m *DSReaderWriter) Query(ctx context.Context, q query.Query) (query.Results, error) { ret := _m.Called(ctx, q) + if len(ret) == 0 { + panic("no return value specified for Query") + } + var r0 query.Results var r1 error if rf, ok := ret.Get(0).(func(context.Context, query.Query) (query.Results, error)); ok { diff --git a/datastore/mocks/results.go b/datastore/mocks/results.go index e1fee8f859..d1f68949a7 100644 --- a/datastore/mocks/results.go +++ b/datastore/mocks/results.go @@ -26,6 +26,10 @@ func (_m *Results) EXPECT() *Results_Expecter { func (_m *Results) Close() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Close") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -67,6 +71,10 @@ func (_c *Results_Close_Call) RunAndReturn(run func() error) *Results_Close_Call func (_m *Results) Next() <-chan query.Result { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Next") + } + var r0 <-chan query.Result if rf, ok := ret.Get(0).(func() <-chan query.Result); ok { r0 = rf() @@ -110,6 +118,10 @@ func (_c *Results_Next_Call) RunAndReturn(run func() <-chan query.Result) *Resul func (_m *Results) NextSync() (query.Result, bool) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for NextSync") + } + var r0 query.Result var r1 bool if rf, ok := ret.Get(0).(func() (query.Result, bool)); ok { @@ -161,6 +173,10 @@ func (_c *Results_NextSync_Call) RunAndReturn(run func() (query.Result, bool)) * func (_m *Results) Process() goprocess.Process { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Process") + } + var r0 goprocess.Process if rf, ok := ret.Get(0).(func() goprocess.Process); ok { r0 = rf() @@ -204,6 +220,10 @@ func (_c *Results_Process_Call) RunAndReturn(run func() goprocess.Process) *Resu func (_m *Results) Query() query.Query { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Query") + } + var r0 query.Query if rf, ok := ret.Get(0).(func() query.Query); ok { r0 = rf() @@ -245,6 +265,10 @@ func (_c *Results_Query_Call) RunAndReturn(run func() query.Query) *Results_Quer func (_m *Results) Rest() ([]query.Entry, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Rest") + } + var r0 []query.Entry var r1 error if rf, ok := ret.Get(0).(func() ([]query.Entry, error)); ok { diff --git a/datastore/mocks/root_store.go b/datastore/mocks/root_store.go index 836a059f68..94d2694721 100644 --- a/datastore/mocks/root_store.go +++ b/datastore/mocks/root_store.go @@ -29,6 +29,10 @@ func (_m *RootStore) EXPECT() *RootStore_Expecter { func (_m *RootStore) Batch(ctx context.Context) (datastore.Batch, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for Batch") + } + var r0 datastore.Batch var r1 error if rf, ok := ret.Get(0).(func(context.Context) (datastore.Batch, error)); ok { @@ -83,6 +87,10 @@ func (_c *RootStore_Batch_Call) RunAndReturn(run func(context.Context) (datastor func (_m *RootStore) Close() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Close") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -124,6 +132,10 @@ func (_c *RootStore_Close_Call) RunAndReturn(run func() error) *RootStore_Close_ func (_m *RootStore) Delete(ctx context.Context, key datastore.Key) error { ret := _m.Called(ctx, key) + if len(ret) == 0 { + panic("no return value specified for Delete") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) error); ok { r0 = rf(ctx, key) @@ -167,6 +179,10 @@ func (_c *RootStore_Delete_Call) RunAndReturn(run func(context.Context, datastor func (_m *RootStore) Get(ctx context.Context, key datastore.Key) ([]byte, error) { ret := _m.Called(ctx, key) + if len(ret) == 0 { + panic("no return value specified for Get") + } + var r0 []byte var r1 error if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) ([]byte, error)); ok { @@ -222,6 +238,10 @@ func (_c *RootStore_Get_Call) RunAndReturn(run func(context.Context, datastore.K func (_m *RootStore) GetSize(ctx context.Context, key datastore.Key) (int, error) { ret := _m.Called(ctx, key) + if len(ret) == 0 { + panic("no return value specified for GetSize") + } + var r0 int var r1 error if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) (int, error)); ok { @@ -275,6 +295,10 @@ func (_c *RootStore_GetSize_Call) RunAndReturn(run func(context.Context, datasto func (_m *RootStore) Has(ctx context.Context, key datastore.Key) (bool, error) { ret := _m.Called(ctx, key) + if len(ret) == 0 { + panic("no return value specified for Has") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) (bool, error)); ok { @@ -328,6 +352,10 @@ func (_c *RootStore_Has_Call) RunAndReturn(run func(context.Context, datastore.K func (_m *RootStore) NewTransaction(ctx context.Context, readOnly bool) (datastore.Txn, error) { ret := _m.Called(ctx, readOnly) + if len(ret) == 0 { + panic("no return value specified for NewTransaction") + } + var r0 datastore.Txn var r1 error if rf, ok := ret.Get(0).(func(context.Context, bool) (datastore.Txn, error)); ok { @@ -383,6 +411,10 @@ func (_c *RootStore_NewTransaction_Call) RunAndReturn(run func(context.Context, func (_m *RootStore) Put(ctx context.Context, key datastore.Key, value []byte) error { ret := _m.Called(ctx, key, value) + if len(ret) == 0 { + panic("no return value specified for Put") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, datastore.Key, []byte) error); ok { r0 = rf(ctx, key, value) @@ -427,6 +459,10 @@ func (_c *RootStore_Put_Call) RunAndReturn(run func(context.Context, datastore.K func (_m *RootStore) Query(ctx context.Context, q query.Query) (query.Results, error) { ret := _m.Called(ctx, q) + if len(ret) == 0 { + panic("no return value specified for Query") + } + var r0 query.Results var r1 error if rf, ok := ret.Get(0).(func(context.Context, query.Query) (query.Results, error)); ok { @@ -482,6 +518,10 @@ func (_c *RootStore_Query_Call) RunAndReturn(run func(context.Context, query.Que func (_m *RootStore) Sync(ctx context.Context, prefix datastore.Key) error { ret := _m.Called(ctx, prefix) + if len(ret) == 0 { + panic("no return value specified for Sync") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) error); ok { r0 = rf(ctx, prefix) diff --git a/datastore/mocks/txn.go b/datastore/mocks/txn.go index 711464dc12..7c9872dfb2 100644 --- a/datastore/mocks/txn.go +++ b/datastore/mocks/txn.go @@ -26,6 +26,10 @@ func (_m *Txn) EXPECT() *Txn_Expecter { func (_m *Txn) Commit(ctx context.Context) error { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for Commit") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context) error); ok { r0 = rf(ctx) @@ -68,6 +72,10 @@ func (_c *Txn_Commit_Call) RunAndReturn(run func(context.Context) error) *Txn_Co func (_m *Txn) DAGstore() datastore.DAGStore { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for DAGstore") + } + var r0 datastore.DAGStore if rf, ok := ret.Get(0).(func() datastore.DAGStore); ok { r0 = rf() @@ -111,6 +119,10 @@ func (_c *Txn_DAGstore_Call) RunAndReturn(run func() datastore.DAGStore) *Txn_DA func (_m *Txn) Datastore() datastore.DSReaderWriter { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Datastore") + } + var r0 datastore.DSReaderWriter if rf, ok := ret.Get(0).(func() datastore.DSReaderWriter); ok { r0 = rf() @@ -187,6 +199,10 @@ func (_c *Txn_Discard_Call) RunAndReturn(run func(context.Context)) *Txn_Discard func (_m *Txn) Headstore() datastore.DSReaderWriter { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Headstore") + } + var r0 datastore.DSReaderWriter if rf, ok := ret.Get(0).(func() datastore.DSReaderWriter); ok { r0 = rf() @@ -230,6 +246,10 @@ func (_c *Txn_Headstore_Call) RunAndReturn(run func() datastore.DSReaderWriter) func (_m *Txn) ID() uint64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ID") + } + var r0 uint64 if rf, ok := ret.Get(0).(func() uint64); ok { r0 = rf() @@ -469,6 +489,10 @@ func (_c *Txn_OnSuccessAsync_Call) RunAndReturn(run func(func())) *Txn_OnSuccess func (_m *Txn) Peerstore() datastore.DSBatching { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Peerstore") + } + var r0 datastore.DSBatching if rf, ok := ret.Get(0).(func() datastore.DSBatching); ok { r0 = rf() @@ -512,6 +536,10 @@ func (_c *Txn_Peerstore_Call) RunAndReturn(run func() datastore.DSBatching) *Txn func (_m *Txn) Rootstore() datastore.DSReaderWriter { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Rootstore") + } + var r0 datastore.DSReaderWriter if rf, ok := ret.Get(0).(func() datastore.DSReaderWriter); ok { r0 = rf() @@ -555,6 +583,10 @@ func (_c *Txn_Rootstore_Call) RunAndReturn(run func() datastore.DSReaderWriter) func (_m *Txn) Systemstore() datastore.DSReaderWriter { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Systemstore") + } + var r0 datastore.DSReaderWriter if rf, ok := ret.Get(0).(func() datastore.DSReaderWriter); ok { r0 = rf() diff --git a/internal/db/fetcher/mocks/encoded_document.go b/internal/db/fetcher/mocks/encoded_document.go index 6a517e19dd..1905908e11 100644 --- a/internal/db/fetcher/mocks/encoded_document.go +++ b/internal/db/fetcher/mocks/encoded_document.go @@ -25,6 +25,10 @@ func (_m *EncodedDocument) EXPECT() *EncodedDocument_Expecter { func (_m *EncodedDocument) ID() []byte { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ID") + } + var r0 []byte if rf, ok := ret.Get(0).(func() []byte); ok { r0 = rf() @@ -68,6 +72,10 @@ func (_c *EncodedDocument_ID_Call) RunAndReturn(run func() []byte) *EncodedDocum func (_m *EncodedDocument) Properties(onlyFilterProps bool) (map[client.FieldDefinition]interface{}, error) { ret := _m.Called(onlyFilterProps) + if len(ret) == 0 { + panic("no return value specified for Properties") + } + var r0 map[client.FieldDefinition]interface{} var r1 error if rf, ok := ret.Get(0).(func(bool) (map[client.FieldDefinition]interface{}, error)); ok { @@ -154,6 +162,10 @@ func (_c *EncodedDocument_Reset_Call) RunAndReturn(run func()) *EncodedDocument_ func (_m *EncodedDocument) SchemaVersionID() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for SchemaVersionID") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -195,6 +207,10 @@ func (_c *EncodedDocument_SchemaVersionID_Call) RunAndReturn(run func() string) func (_m *EncodedDocument) Status() client.DocumentStatus { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Status") + } + var r0 client.DocumentStatus if rf, ok := ret.Get(0).(func() client.DocumentStatus); ok { r0 = rf() diff --git a/internal/db/fetcher/mocks/fetcher.go b/internal/db/fetcher/mocks/fetcher.go index b60385d110..2dac3b0079 100644 --- a/internal/db/fetcher/mocks/fetcher.go +++ b/internal/db/fetcher/mocks/fetcher.go @@ -40,6 +40,10 @@ func (_m *Fetcher) EXPECT() *Fetcher_Expecter { func (_m *Fetcher) Close() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Close") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -81,6 +85,10 @@ func (_c *Fetcher_Close_Call) RunAndReturn(run func() error) *Fetcher_Close_Call func (_m *Fetcher) FetchNext(ctx context.Context) (fetcher.EncodedDocument, fetcher.ExecInfo, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for FetchNext") + } + var r0 fetcher.EncodedDocument var r1 fetcher.ExecInfo var r2 error @@ -142,6 +150,10 @@ func (_c *Fetcher_FetchNext_Call) RunAndReturn(run func(context.Context) (fetche func (_m *Fetcher) Init(ctx context.Context, _a1 immutable.Option[identity.Identity], txn datastore.Txn, _a3 immutable.Option[acp.ACP], col client.Collection, fields []client.FieldDefinition, filter *mapper.Filter, docmapper *core.DocumentMapping, reverse bool, showDeleted bool) error { ret := _m.Called(ctx, _a1, txn, _a3, col, fields, filter, docmapper, reverse, showDeleted) + if len(ret) == 0 { + panic("no return value specified for Init") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, immutable.Option[identity.Identity], datastore.Txn, immutable.Option[acp.ACP], client.Collection, []client.FieldDefinition, *mapper.Filter, *core.DocumentMapping, bool, bool) error); ok { r0 = rf(ctx, _a1, txn, _a3, col, fields, filter, docmapper, reverse, showDeleted) @@ -193,6 +205,10 @@ func (_c *Fetcher_Init_Call) RunAndReturn(run func(context.Context, immutable.Op func (_m *Fetcher) Start(ctx context.Context, spans core.Spans) error { ret := _m.Called(ctx, spans) + if len(ret) == 0 { + panic("no return value specified for Start") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, core.Spans) error); ok { r0 = rf(ctx, spans) diff --git a/tools/configs/mockery.yaml b/tools/configs/mockery.yaml index 9216c32fa8..451ae55771 100644 --- a/tools/configs/mockery.yaml +++ b/tools/configs/mockery.yaml @@ -4,8 +4,6 @@ with-expecter: true quiet: False -keeptree: True - disable-version-string: True log-level: "info" From c6c23736b0cabe78637979927e0dbe604230f759 Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Wed, 26 Jun 2024 10:20:56 -0400 Subject: [PATCH 67/78] fix: Remove shared mutable state between database instances (#2777) ## Relevant issue(s) Resolves #2774 ## Description Removes shared mutable GQL state between database instances. These global static GQL type variables are actually mutated by the GQL library we use at different points during database init, and schema update. This means that when multiple Defra instances are created in the same process they were sharing state. As well as just being an inherently bad thing, this also prevented us from running tests within packages in parallel using `t.Parallel()` as the race detector would complain. --- internal/request/graphql/schema/collection.go | 2 +- .../request/graphql/schema/descriptions.go | 4 +- internal/request/graphql/schema/generate.go | 12 +- internal/request/graphql/schema/manager.go | 100 ++- .../request/graphql/schema/manager_test.go | 11 - internal/request/graphql/schema/types/base.go | 684 +++++++++--------- .../request/graphql/schema/types/commits.go | 160 ++-- .../request/graphql/schema/types/scalars.go | 76 +- .../graphql/schema/types/scalars_test.go | 12 +- .../request/graphql/schema/types/types.go | 82 ++- tests/integration/schema/client_test.go | 2 +- 11 files changed, 606 insertions(+), 539 deletions(-) diff --git a/internal/request/graphql/schema/collection.go b/internal/request/graphql/schema/collection.go index 0416504ef4..7c4e44593b 100644 --- a/internal/request/graphql/schema/collection.go +++ b/internal/request/graphql/schema/collection.go @@ -482,7 +482,7 @@ func setCRDTType(field *ast.FieldDefinition, kind client.FieldKind) (client.CTyp switch arg.Name.Value { case "type": cTypeString := arg.Value.GetValue().(string) - cType, validCRDTEnum := types.CRDTEnum.ParseValue(cTypeString).(client.CType) + cType, validCRDTEnum := types.CRDTEnum().ParseValue(cTypeString).(client.CType) if !validCRDTEnum { return 0, client.NewErrInvalidCRDTType(field.Name.Value, cTypeString) } diff --git a/internal/request/graphql/schema/descriptions.go b/internal/request/graphql/schema/descriptions.go index 945f4e8bdf..6f89932c0a 100644 --- a/internal/request/graphql/schema/descriptions.go +++ b/internal/request/graphql/schema/descriptions.go @@ -33,8 +33,8 @@ var ( client.FieldKind_NILLABLE_STRING: gql.String, client.FieldKind_STRING_ARRAY: gql.NewList(gql.NewNonNull(gql.String)), client.FieldKind_NILLABLE_STRING_ARRAY: gql.NewList(gql.String), - client.FieldKind_NILLABLE_BLOB: schemaTypes.BlobScalarType, - client.FieldKind_NILLABLE_JSON: schemaTypes.JSONScalarType, + client.FieldKind_NILLABLE_BLOB: schemaTypes.BlobScalarType(), + client.FieldKind_NILLABLE_JSON: schemaTypes.JSONScalarType(), } defaultCRDTForFieldKind = map[client.FieldKind]client.CType{ diff --git a/internal/request/graphql/schema/generate.go b/internal/request/graphql/schema/generate.go index 1f489daf47..5fd6b5ecf6 100644 --- a/internal/request/graphql/schema/generate.go +++ b/internal/request/graphql/schema/generate.go @@ -179,7 +179,7 @@ func (g *Generator) generate(ctx context.Context, collections []client.Collectio } } - appendCommitChildGroupField() + g.appendCommitChildGroupField() // resolve types if err := g.manager.ResolveTypes(); err != nil { @@ -500,7 +500,7 @@ func (g *Generator) buildTypes( // add _version field fields[request.VersionFieldName] = &gql.Field{ Description: versionFieldDescription, - Type: gql.NewList(schemaTypes.CommitObject), + Type: gql.NewList(g.manager.schema.TypeMap()[request.CommitTypeName]), } // add _deleted field @@ -987,11 +987,13 @@ func (g *Generator) genNumericAggregateBaseArgInputs(obj *gql.Object) *gql.Input }) } -func appendCommitChildGroupField() { - schemaTypes.CommitObject.Fields()[request.GroupFieldName] = &gql.FieldDefinition{ +func (g *Generator) appendCommitChildGroupField() { + commitObject := g.manager.schema.TypeMap()[request.CommitTypeName] + + commitObject.(*gql.Object).Fields()[request.GroupFieldName] = &gql.FieldDefinition{ Name: request.GroupFieldName, Description: groupFieldDescription, - Type: gql.NewList(schemaTypes.CommitObject), + Type: gql.NewList(commitObject), } } diff --git a/internal/request/graphql/schema/manager.go b/internal/request/graphql/schema/manager.go index 02dfaade86..fee626b45c 100644 --- a/internal/request/graphql/schema/manager.go +++ b/internal/request/graphql/schema/manager.go @@ -27,11 +27,27 @@ type SchemaManager struct { // with a new default type map func NewSchemaManager() (*SchemaManager, error) { sm := &SchemaManager{} + + orderEnum := schemaTypes.OrderingEnum() + crdtEnum := schemaTypes.CRDTEnum() + explainEnum := schemaTypes.ExplainEnum() + + commitLinkObject := schemaTypes.CommitLinkObject() + commitObject := schemaTypes.CommitObject(commitLinkObject) + commitsOrderArg := schemaTypes.CommitsOrderArg(orderEnum) + schema, err := gql.NewSchema(gql.SchemaConfig{ - Types: defaultTypes(), - Query: defaultQueryType(), + Types: defaultTypes( + commitObject, + commitLinkObject, + commitsOrderArg, + orderEnum, + crdtEnum, + explainEnum, + ), + Query: defaultQueryType(commitObject, commitsOrderArg), Mutation: defaultMutationType(), - Directives: defaultDirectivesType(), + Directives: defaultDirectivesType(crdtEnum, explainEnum, orderEnum), }) if err != nil { return sm, err @@ -80,7 +96,10 @@ func (s *SchemaManager) ResolveTypes() error { } // @todo: Use a better default Query type -func defaultQueryType() *gql.Object { +func defaultQueryType(commitObject *gql.Object, commitsOrderArg *gql.InputObject) *gql.Object { + queryCommits := schemaTypes.QueryCommits(commitObject, commitsOrderArg) + queryLatestCommits := schemaTypes.QueryLatestCommits(commitObject) + return gql.NewObject(gql.ObjectConfig{ Name: "Query", Fields: gql.Fields{ @@ -90,8 +109,8 @@ func defaultQueryType() *gql.Object { }, // database API queries - schemaTypes.QueryCommits.Name: schemaTypes.QueryCommits, - schemaTypes.QueryLatestCommits.Name: schemaTypes.QueryLatestCommits, + queryCommits.Name: queryCommits, + queryLatestCommits.Name: queryLatestCommits, }, }) } @@ -109,15 +128,19 @@ func defaultMutationType() *gql.Object { } // default directives type. -func defaultDirectivesType() []*gql.Directive { +func defaultDirectivesType( + crdtEnum *gql.Enum, + explainEnum *gql.Enum, + orderEnum *gql.Enum, +) []*gql.Directive { return []*gql.Directive{ - schemaTypes.CRDTFieldDirective, - schemaTypes.ExplainDirective, - schemaTypes.PolicyDirective, - schemaTypes.IndexDirective, - schemaTypes.IndexFieldDirective, - schemaTypes.PrimaryDirective, - schemaTypes.RelationDirective, + schemaTypes.CRDTFieldDirective(crdtEnum), + schemaTypes.ExplainDirective(explainEnum), + schemaTypes.PolicyDirective(), + schemaTypes.IndexDirective(orderEnum), + schemaTypes.IndexFieldDirective(orderEnum), + schemaTypes.PrimaryDirective(), + schemaTypes.RelationDirective(), } } @@ -135,7 +158,14 @@ func inlineArrayTypes() []gql.Type { } // default type map includes all the native scalar types -func defaultTypes() []gql.Type { +func defaultTypes( + commitObject *gql.Object, + commitLinkObject *gql.Object, + commitsOrderArg *gql.InputObject, + orderEnum *gql.Enum, + crdtEnum *gql.Enum, + explainEnum *gql.Enum, +) []gql.Type { return []gql.Type{ // Base Scalar types gql.Boolean, @@ -146,31 +176,31 @@ func defaultTypes() []gql.Type { gql.String, // Custom Scalar types - schemaTypes.BlobScalarType, - schemaTypes.JSONScalarType, + schemaTypes.BlobScalarType(), + schemaTypes.JSONScalarType(), // Base Query types // Sort/Order enum - schemaTypes.OrderingEnum, + orderEnum, // Filter scalar blocks - schemaTypes.BooleanOperatorBlock, - schemaTypes.NotNullBooleanOperatorBlock, - schemaTypes.DateTimeOperatorBlock, - schemaTypes.FloatOperatorBlock, - schemaTypes.NotNullFloatOperatorBlock, - schemaTypes.IdOperatorBlock, - schemaTypes.IntOperatorBlock, - schemaTypes.NotNullIntOperatorBlock, - schemaTypes.StringOperatorBlock, - schemaTypes.NotNullstringOperatorBlock, - - schemaTypes.CommitsOrderArg, - schemaTypes.CommitLinkObject, - schemaTypes.CommitObject, - - schemaTypes.CRDTEnum, - schemaTypes.ExplainEnum, + schemaTypes.BooleanOperatorBlock(), + schemaTypes.NotNullBooleanOperatorBlock(), + schemaTypes.DateTimeOperatorBlock(), + schemaTypes.FloatOperatorBlock(), + schemaTypes.NotNullFloatOperatorBlock(), + schemaTypes.IdOperatorBlock(), + schemaTypes.IntOperatorBlock(), + schemaTypes.NotNullIntOperatorBlock(), + schemaTypes.StringOperatorBlock(), + schemaTypes.NotNullstringOperatorBlock(), + + commitsOrderArg, + commitLinkObject, + commitObject, + + crdtEnum, + explainEnum, } } diff --git a/internal/request/graphql/schema/manager_test.go b/internal/request/graphql/schema/manager_test.go index e4c9a3922e..e6c1be3828 100644 --- a/internal/request/graphql/schema/manager_test.go +++ b/internal/request/graphql/schema/manager_test.go @@ -21,17 +21,6 @@ func Test_SchemaManager_NewNoErrs(t *testing.T) { assert.NoError(t, err, "NewSchemaManager returned an error") } -func Test_SchemaManager_HasDefaultTypes(t *testing.T) { - s, err := NewSchemaManager() - assert.NoError(t, err, "NewSchemaManager returned an error") - - tm := s.schema.TypeMap() - for _, ty := range defaultTypes() { - _, ok := tm[ty.Name()] - assert.True(t, ok, "TypeMap missing default type %s", ty.Name()) - } -} - func Test_SchemaManager_ResolveTypes(t *testing.T) { s, _ := NewSchemaManager() err := s.ResolveTypes() diff --git a/internal/request/graphql/schema/types/base.go b/internal/request/graphql/schema/types/base.go index 83aa11c55d..ca4c8f2372 100644 --- a/internal/request/graphql/schema/types/base.go +++ b/internal/request/graphql/schema/types/base.go @@ -15,353 +15,373 @@ import ( ) // BooleanOperatorBlock filter block for boolean types. -var BooleanOperatorBlock = gql.NewInputObject(gql.InputObjectConfig{ - Name: "BooleanOperatorBlock", - Description: booleanOperatorBlockDescription, - Fields: gql.InputObjectConfigFieldMap{ - "_eq": &gql.InputObjectFieldConfig{ - Description: eqOperatorDescription, - Type: gql.Boolean, - }, - "_ne": &gql.InputObjectFieldConfig{ - Description: neOperatorDescription, - Type: gql.Boolean, - }, - "_in": &gql.InputObjectFieldConfig{ - Description: inOperatorDescription, - Type: gql.NewList(gql.Boolean), - }, - "_nin": &gql.InputObjectFieldConfig{ - Description: ninOperatorDescription, - Type: gql.NewList(gql.Boolean), - }, - }, -}) +func BooleanOperatorBlock() *gql.InputObject { + return gql.NewInputObject(gql.InputObjectConfig{ + Name: "BooleanOperatorBlock", + Description: booleanOperatorBlockDescription, + Fields: gql.InputObjectConfigFieldMap{ + "_eq": &gql.InputObjectFieldConfig{ + Description: eqOperatorDescription, + Type: gql.Boolean, + }, + "_ne": &gql.InputObjectFieldConfig{ + Description: neOperatorDescription, + Type: gql.Boolean, + }, + "_in": &gql.InputObjectFieldConfig{ + Description: inOperatorDescription, + Type: gql.NewList(gql.Boolean), + }, + "_nin": &gql.InputObjectFieldConfig{ + Description: ninOperatorDescription, + Type: gql.NewList(gql.Boolean), + }, + }, + }) +} // NotNullBooleanOperatorBlock filter block for boolean! types. -var NotNullBooleanOperatorBlock = gql.NewInputObject(gql.InputObjectConfig{ - Name: "NotNullBooleanOperatorBlock", - Description: notNullBooleanOperatorBlockDescription, - Fields: gql.InputObjectConfigFieldMap{ - "_eq": &gql.InputObjectFieldConfig{ - Description: eqOperatorDescription, - Type: gql.Boolean, - }, - "_ne": &gql.InputObjectFieldConfig{ - Description: neOperatorDescription, - Type: gql.Boolean, - }, - "_in": &gql.InputObjectFieldConfig{ - Description: inOperatorDescription, - Type: gql.NewList(gql.NewNonNull(gql.Boolean)), - }, - "_nin": &gql.InputObjectFieldConfig{ - Description: ninOperatorDescription, - Type: gql.NewList(gql.NewNonNull(gql.Boolean)), - }, - }, -}) +func NotNullBooleanOperatorBlock() *gql.InputObject { + return gql.NewInputObject(gql.InputObjectConfig{ + Name: "NotNullBooleanOperatorBlock", + Description: notNullBooleanOperatorBlockDescription, + Fields: gql.InputObjectConfigFieldMap{ + "_eq": &gql.InputObjectFieldConfig{ + Description: eqOperatorDescription, + Type: gql.Boolean, + }, + "_ne": &gql.InputObjectFieldConfig{ + Description: neOperatorDescription, + Type: gql.Boolean, + }, + "_in": &gql.InputObjectFieldConfig{ + Description: inOperatorDescription, + Type: gql.NewList(gql.NewNonNull(gql.Boolean)), + }, + "_nin": &gql.InputObjectFieldConfig{ + Description: ninOperatorDescription, + Type: gql.NewList(gql.NewNonNull(gql.Boolean)), + }, + }, + }) +} // DateTimeOperatorBlock filter block for DateTime types. -var DateTimeOperatorBlock = gql.NewInputObject(gql.InputObjectConfig{ - Name: "DateTimeOperatorBlock", - Description: dateTimeOperatorBlockDescription, - Fields: gql.InputObjectConfigFieldMap{ - "_eq": &gql.InputObjectFieldConfig{ - Description: eqOperatorDescription, - Type: gql.DateTime, - }, - "_ne": &gql.InputObjectFieldConfig{ - Description: neOperatorDescription, - Type: gql.DateTime, - }, - "_gt": &gql.InputObjectFieldConfig{ - Description: gtOperatorDescription, - Type: gql.DateTime, - }, - "_ge": &gql.InputObjectFieldConfig{ - Description: geOperatorDescription, - Type: gql.DateTime, - }, - "_lt": &gql.InputObjectFieldConfig{ - Description: ltOperatorDescription, - Type: gql.DateTime, - }, - "_le": &gql.InputObjectFieldConfig{ - Description: leOperatorDescription, - Type: gql.DateTime, - }, - "_in": &gql.InputObjectFieldConfig{ - Description: inOperatorDescription, - Type: gql.NewList(gql.DateTime), - }, - "_nin": &gql.InputObjectFieldConfig{ - Description: ninOperatorDescription, - Type: gql.NewList(gql.DateTime), - }, - }, -}) +func DateTimeOperatorBlock() *gql.InputObject { + return gql.NewInputObject(gql.InputObjectConfig{ + Name: "DateTimeOperatorBlock", + Description: dateTimeOperatorBlockDescription, + Fields: gql.InputObjectConfigFieldMap{ + "_eq": &gql.InputObjectFieldConfig{ + Description: eqOperatorDescription, + Type: gql.DateTime, + }, + "_ne": &gql.InputObjectFieldConfig{ + Description: neOperatorDescription, + Type: gql.DateTime, + }, + "_gt": &gql.InputObjectFieldConfig{ + Description: gtOperatorDescription, + Type: gql.DateTime, + }, + "_ge": &gql.InputObjectFieldConfig{ + Description: geOperatorDescription, + Type: gql.DateTime, + }, + "_lt": &gql.InputObjectFieldConfig{ + Description: ltOperatorDescription, + Type: gql.DateTime, + }, + "_le": &gql.InputObjectFieldConfig{ + Description: leOperatorDescription, + Type: gql.DateTime, + }, + "_in": &gql.InputObjectFieldConfig{ + Description: inOperatorDescription, + Type: gql.NewList(gql.DateTime), + }, + "_nin": &gql.InputObjectFieldConfig{ + Description: ninOperatorDescription, + Type: gql.NewList(gql.DateTime), + }, + }, + }) +} // FloatOperatorBlock filter block for Float types. -var FloatOperatorBlock = gql.NewInputObject(gql.InputObjectConfig{ - Name: "FloatOperatorBlock", - Description: floatOperatorBlockDescription, - Fields: gql.InputObjectConfigFieldMap{ - "_eq": &gql.InputObjectFieldConfig{ - Description: eqOperatorDescription, - Type: gql.Float, - }, - "_ne": &gql.InputObjectFieldConfig{ - Description: neOperatorDescription, - Type: gql.Float, - }, - "_gt": &gql.InputObjectFieldConfig{ - Description: gtOperatorDescription, - Type: gql.Float, - }, - "_ge": &gql.InputObjectFieldConfig{ - Description: geOperatorDescription, - Type: gql.Float, - }, - "_lt": &gql.InputObjectFieldConfig{ - Description: ltOperatorDescription, - Type: gql.Float, - }, - "_le": &gql.InputObjectFieldConfig{ - Description: leOperatorDescription, - Type: gql.Float, - }, - "_in": &gql.InputObjectFieldConfig{ - Description: inOperatorDescription, - Type: gql.NewList(gql.Float), - }, - "_nin": &gql.InputObjectFieldConfig{ - Description: ninOperatorDescription, - Type: gql.NewList(gql.Float), - }, - }, -}) +func FloatOperatorBlock() *gql.InputObject { + return gql.NewInputObject(gql.InputObjectConfig{ + Name: "FloatOperatorBlock", + Description: floatOperatorBlockDescription, + Fields: gql.InputObjectConfigFieldMap{ + "_eq": &gql.InputObjectFieldConfig{ + Description: eqOperatorDescription, + Type: gql.Float, + }, + "_ne": &gql.InputObjectFieldConfig{ + Description: neOperatorDescription, + Type: gql.Float, + }, + "_gt": &gql.InputObjectFieldConfig{ + Description: gtOperatorDescription, + Type: gql.Float, + }, + "_ge": &gql.InputObjectFieldConfig{ + Description: geOperatorDescription, + Type: gql.Float, + }, + "_lt": &gql.InputObjectFieldConfig{ + Description: ltOperatorDescription, + Type: gql.Float, + }, + "_le": &gql.InputObjectFieldConfig{ + Description: leOperatorDescription, + Type: gql.Float, + }, + "_in": &gql.InputObjectFieldConfig{ + Description: inOperatorDescription, + Type: gql.NewList(gql.Float), + }, + "_nin": &gql.InputObjectFieldConfig{ + Description: ninOperatorDescription, + Type: gql.NewList(gql.Float), + }, + }, + }) +} // NotNullFloatOperatorBlock filter block for Float! types. -var NotNullFloatOperatorBlock = gql.NewInputObject(gql.InputObjectConfig{ - Name: "NotNullFloatOperatorBlock", - Description: notNullFloatOperatorBlockDescription, - Fields: gql.InputObjectConfigFieldMap{ - "_eq": &gql.InputObjectFieldConfig{ - Description: eqOperatorDescription, - Type: gql.Float, - }, - "_ne": &gql.InputObjectFieldConfig{ - Description: neOperatorDescription, - Type: gql.Float, - }, - "_gt": &gql.InputObjectFieldConfig{ - Description: gtOperatorDescription, - Type: gql.Float, - }, - "_ge": &gql.InputObjectFieldConfig{ - Description: geOperatorDescription, - Type: gql.Float, - }, - "_lt": &gql.InputObjectFieldConfig{ - Description: ltOperatorDescription, - Type: gql.Float, - }, - "_le": &gql.InputObjectFieldConfig{ - Description: leOperatorDescription, - Type: gql.Float, - }, - "_in": &gql.InputObjectFieldConfig{ - Description: inOperatorDescription, - Type: gql.NewList(gql.NewNonNull(gql.Float)), - }, - "_nin": &gql.InputObjectFieldConfig{ - Description: ninOperatorDescription, - Type: gql.NewList(gql.NewNonNull(gql.Float)), - }, - }, -}) +func NotNullFloatOperatorBlock() *gql.InputObject { + return gql.NewInputObject(gql.InputObjectConfig{ + Name: "NotNullFloatOperatorBlock", + Description: notNullFloatOperatorBlockDescription, + Fields: gql.InputObjectConfigFieldMap{ + "_eq": &gql.InputObjectFieldConfig{ + Description: eqOperatorDescription, + Type: gql.Float, + }, + "_ne": &gql.InputObjectFieldConfig{ + Description: neOperatorDescription, + Type: gql.Float, + }, + "_gt": &gql.InputObjectFieldConfig{ + Description: gtOperatorDescription, + Type: gql.Float, + }, + "_ge": &gql.InputObjectFieldConfig{ + Description: geOperatorDescription, + Type: gql.Float, + }, + "_lt": &gql.InputObjectFieldConfig{ + Description: ltOperatorDescription, + Type: gql.Float, + }, + "_le": &gql.InputObjectFieldConfig{ + Description: leOperatorDescription, + Type: gql.Float, + }, + "_in": &gql.InputObjectFieldConfig{ + Description: inOperatorDescription, + Type: gql.NewList(gql.NewNonNull(gql.Float)), + }, + "_nin": &gql.InputObjectFieldConfig{ + Description: ninOperatorDescription, + Type: gql.NewList(gql.NewNonNull(gql.Float)), + }, + }, + }) +} // IntOperatorBlock filter block for Int types. -var IntOperatorBlock = gql.NewInputObject(gql.InputObjectConfig{ - Name: "IntOperatorBlock", - Description: intOperatorBlockDescription, - Fields: gql.InputObjectConfigFieldMap{ - "_eq": &gql.InputObjectFieldConfig{ - Description: eqOperatorDescription, - Type: gql.Int, - }, - "_ne": &gql.InputObjectFieldConfig{ - Description: neOperatorDescription, - Type: gql.Int, - }, - "_gt": &gql.InputObjectFieldConfig{ - Description: gtOperatorDescription, - Type: gql.Int, - }, - "_ge": &gql.InputObjectFieldConfig{ - Description: geOperatorDescription, - Type: gql.Int, - }, - "_lt": &gql.InputObjectFieldConfig{ - Description: ltOperatorDescription, - Type: gql.Int, - }, - "_le": &gql.InputObjectFieldConfig{ - Description: leOperatorDescription, - Type: gql.Int, - }, - "_in": &gql.InputObjectFieldConfig{ - Description: inOperatorDescription, - Type: gql.NewList(gql.Int), - }, - "_nin": &gql.InputObjectFieldConfig{ - Description: ninOperatorDescription, - Type: gql.NewList(gql.Int), - }, - }, -}) +func IntOperatorBlock() *gql.InputObject { + return gql.NewInputObject(gql.InputObjectConfig{ + Name: "IntOperatorBlock", + Description: intOperatorBlockDescription, + Fields: gql.InputObjectConfigFieldMap{ + "_eq": &gql.InputObjectFieldConfig{ + Description: eqOperatorDescription, + Type: gql.Int, + }, + "_ne": &gql.InputObjectFieldConfig{ + Description: neOperatorDescription, + Type: gql.Int, + }, + "_gt": &gql.InputObjectFieldConfig{ + Description: gtOperatorDescription, + Type: gql.Int, + }, + "_ge": &gql.InputObjectFieldConfig{ + Description: geOperatorDescription, + Type: gql.Int, + }, + "_lt": &gql.InputObjectFieldConfig{ + Description: ltOperatorDescription, + Type: gql.Int, + }, + "_le": &gql.InputObjectFieldConfig{ + Description: leOperatorDescription, + Type: gql.Int, + }, + "_in": &gql.InputObjectFieldConfig{ + Description: inOperatorDescription, + Type: gql.NewList(gql.Int), + }, + "_nin": &gql.InputObjectFieldConfig{ + Description: ninOperatorDescription, + Type: gql.NewList(gql.Int), + }, + }, + }) +} // NotNullIntOperatorBlock filter block for Int! types. -var NotNullIntOperatorBlock = gql.NewInputObject(gql.InputObjectConfig{ - Name: "NotNullIntOperatorBlock", - Description: notNullIntOperatorBlockDescription, - Fields: gql.InputObjectConfigFieldMap{ - "_eq": &gql.InputObjectFieldConfig{ - Description: eqOperatorDescription, - Type: gql.Int, - }, - "_ne": &gql.InputObjectFieldConfig{ - Description: neOperatorDescription, - Type: gql.Int, - }, - "_gt": &gql.InputObjectFieldConfig{ - Description: gtOperatorDescription, - Type: gql.Int, - }, - "_ge": &gql.InputObjectFieldConfig{ - Description: geOperatorDescription, - Type: gql.Int, - }, - "_lt": &gql.InputObjectFieldConfig{ - Description: ltOperatorDescription, - Type: gql.Int, - }, - "_le": &gql.InputObjectFieldConfig{ - Description: leOperatorDescription, - Type: gql.Int, - }, - "_in": &gql.InputObjectFieldConfig{ - Description: inOperatorDescription, - Type: gql.NewList(gql.NewNonNull(gql.Int)), - }, - "_nin": &gql.InputObjectFieldConfig{ - Description: ninOperatorDescription, - Type: gql.NewList(gql.NewNonNull(gql.Int)), - }, - }, -}) +func NotNullIntOperatorBlock() *gql.InputObject { + return gql.NewInputObject(gql.InputObjectConfig{ + Name: "NotNullIntOperatorBlock", + Description: notNullIntOperatorBlockDescription, + Fields: gql.InputObjectConfigFieldMap{ + "_eq": &gql.InputObjectFieldConfig{ + Description: eqOperatorDescription, + Type: gql.Int, + }, + "_ne": &gql.InputObjectFieldConfig{ + Description: neOperatorDescription, + Type: gql.Int, + }, + "_gt": &gql.InputObjectFieldConfig{ + Description: gtOperatorDescription, + Type: gql.Int, + }, + "_ge": &gql.InputObjectFieldConfig{ + Description: geOperatorDescription, + Type: gql.Int, + }, + "_lt": &gql.InputObjectFieldConfig{ + Description: ltOperatorDescription, + Type: gql.Int, + }, + "_le": &gql.InputObjectFieldConfig{ + Description: leOperatorDescription, + Type: gql.Int, + }, + "_in": &gql.InputObjectFieldConfig{ + Description: inOperatorDescription, + Type: gql.NewList(gql.NewNonNull(gql.Int)), + }, + "_nin": &gql.InputObjectFieldConfig{ + Description: ninOperatorDescription, + Type: gql.NewList(gql.NewNonNull(gql.Int)), + }, + }, + }) +} // StringOperatorBlock filter block for string types. -var StringOperatorBlock = gql.NewInputObject(gql.InputObjectConfig{ - Name: "StringOperatorBlock", - Description: stringOperatorBlockDescription, - Fields: gql.InputObjectConfigFieldMap{ - "_eq": &gql.InputObjectFieldConfig{ - Description: eqOperatorDescription, - Type: gql.String, - }, - "_ne": &gql.InputObjectFieldConfig{ - Description: neOperatorDescription, - Type: gql.String, - }, - "_in": &gql.InputObjectFieldConfig{ - Description: inOperatorDescription, - Type: gql.NewList(gql.String), - }, - "_nin": &gql.InputObjectFieldConfig{ - Description: ninOperatorDescription, - Type: gql.NewList(gql.String), - }, - "_like": &gql.InputObjectFieldConfig{ - Description: likeStringOperatorDescription, - Type: gql.String, - }, - "_nlike": &gql.InputObjectFieldConfig{ - Description: nlikeStringOperatorDescription, - Type: gql.String, - }, - "_ilike": &gql.InputObjectFieldConfig{ - Description: ilikeStringOperatorDescription, - Type: gql.String, - }, - "_nilike": &gql.InputObjectFieldConfig{ - Description: nilikeStringOperatorDescription, - Type: gql.String, - }, - }, -}) +func StringOperatorBlock() *gql.InputObject { + return gql.NewInputObject(gql.InputObjectConfig{ + Name: "StringOperatorBlock", + Description: stringOperatorBlockDescription, + Fields: gql.InputObjectConfigFieldMap{ + "_eq": &gql.InputObjectFieldConfig{ + Description: eqOperatorDescription, + Type: gql.String, + }, + "_ne": &gql.InputObjectFieldConfig{ + Description: neOperatorDescription, + Type: gql.String, + }, + "_in": &gql.InputObjectFieldConfig{ + Description: inOperatorDescription, + Type: gql.NewList(gql.String), + }, + "_nin": &gql.InputObjectFieldConfig{ + Description: ninOperatorDescription, + Type: gql.NewList(gql.String), + }, + "_like": &gql.InputObjectFieldConfig{ + Description: likeStringOperatorDescription, + Type: gql.String, + }, + "_nlike": &gql.InputObjectFieldConfig{ + Description: nlikeStringOperatorDescription, + Type: gql.String, + }, + "_ilike": &gql.InputObjectFieldConfig{ + Description: ilikeStringOperatorDescription, + Type: gql.String, + }, + "_nilike": &gql.InputObjectFieldConfig{ + Description: nilikeStringOperatorDescription, + Type: gql.String, + }, + }, + }) +} // NotNullstringOperatorBlock filter block for string! types. -var NotNullstringOperatorBlock = gql.NewInputObject(gql.InputObjectConfig{ - Name: "NotNullStringOperatorBlock", - Description: notNullStringOperatorBlockDescription, - Fields: gql.InputObjectConfigFieldMap{ - "_eq": &gql.InputObjectFieldConfig{ - Description: eqOperatorDescription, - Type: gql.String, - }, - "_ne": &gql.InputObjectFieldConfig{ - Description: neOperatorDescription, - Type: gql.String, - }, - "_in": &gql.InputObjectFieldConfig{ - Description: inOperatorDescription, - Type: gql.NewList(gql.NewNonNull(gql.String)), - }, - "_nin": &gql.InputObjectFieldConfig{ - Description: ninOperatorDescription, - Type: gql.NewList(gql.NewNonNull(gql.String)), - }, - "_like": &gql.InputObjectFieldConfig{ - Description: likeStringOperatorDescription, - Type: gql.String, - }, - "_nlike": &gql.InputObjectFieldConfig{ - Description: nlikeStringOperatorDescription, - Type: gql.String, - }, - "_ilike": &gql.InputObjectFieldConfig{ - Description: ilikeStringOperatorDescription, - Type: gql.String, - }, - "_nilike": &gql.InputObjectFieldConfig{ - Description: nilikeStringOperatorDescription, - Type: gql.String, - }, - }, -}) +func NotNullstringOperatorBlock() *gql.InputObject { + return gql.NewInputObject(gql.InputObjectConfig{ + Name: "NotNullStringOperatorBlock", + Description: notNullStringOperatorBlockDescription, + Fields: gql.InputObjectConfigFieldMap{ + "_eq": &gql.InputObjectFieldConfig{ + Description: eqOperatorDescription, + Type: gql.String, + }, + "_ne": &gql.InputObjectFieldConfig{ + Description: neOperatorDescription, + Type: gql.String, + }, + "_in": &gql.InputObjectFieldConfig{ + Description: inOperatorDescription, + Type: gql.NewList(gql.NewNonNull(gql.String)), + }, + "_nin": &gql.InputObjectFieldConfig{ + Description: ninOperatorDescription, + Type: gql.NewList(gql.NewNonNull(gql.String)), + }, + "_like": &gql.InputObjectFieldConfig{ + Description: likeStringOperatorDescription, + Type: gql.String, + }, + "_nlike": &gql.InputObjectFieldConfig{ + Description: nlikeStringOperatorDescription, + Type: gql.String, + }, + "_ilike": &gql.InputObjectFieldConfig{ + Description: ilikeStringOperatorDescription, + Type: gql.String, + }, + "_nilike": &gql.InputObjectFieldConfig{ + Description: nilikeStringOperatorDescription, + Type: gql.String, + }, + }, + }) +} // IdOperatorBlock filter block for ID types. -var IdOperatorBlock = gql.NewInputObject(gql.InputObjectConfig{ - Name: "IDOperatorBlock", - Description: idOperatorBlockDescription, - Fields: gql.InputObjectConfigFieldMap{ - "_eq": &gql.InputObjectFieldConfig{ - Description: eqOperatorDescription, - Type: gql.ID, - }, - "_ne": &gql.InputObjectFieldConfig{ - Description: neOperatorDescription, - Type: gql.ID, - }, - "_in": &gql.InputObjectFieldConfig{ - Description: inOperatorDescription, - Type: gql.NewList(gql.NewNonNull(gql.ID)), - }, - "_nin": &gql.InputObjectFieldConfig{ - Description: ninOperatorDescription, - Type: gql.NewList(gql.NewNonNull(gql.ID)), - }, - }, -}) +func IdOperatorBlock() *gql.InputObject { + return gql.NewInputObject(gql.InputObjectConfig{ + Name: "IDOperatorBlock", + Description: idOperatorBlockDescription, + Fields: gql.InputObjectConfigFieldMap{ + "_eq": &gql.InputObjectFieldConfig{ + Description: eqOperatorDescription, + Type: gql.ID, + }, + "_ne": &gql.InputObjectFieldConfig{ + Description: neOperatorDescription, + Type: gql.ID, + }, + "_in": &gql.InputObjectFieldConfig{ + Description: inOperatorDescription, + Type: gql.NewList(gql.NewNonNull(gql.ID)), + }, + "_nin": &gql.InputObjectFieldConfig{ + Description: ninOperatorDescription, + Type: gql.NewList(gql.NewNonNull(gql.ID)), + }, + }, + }) +} diff --git a/internal/request/graphql/schema/types/commits.go b/internal/request/graphql/schema/types/commits.go index 99373a367c..be05b5feb6 100644 --- a/internal/request/graphql/schema/types/commits.go +++ b/internal/request/graphql/schema/types/commits.go @@ -16,34 +16,23 @@ import ( "github.com/sourcenetwork/defradb/client/request" ) -var ( - // Helper only for `commit` below. - commitCountFieldArg = gql.NewEnum(gql.EnumConfig{ - Name: "commitCountFieldArg", - Description: CountFieldDescription, - Values: gql.EnumValueConfigMap{ - "links": &gql.EnumValueConfig{ - Description: commitLinksDescription, - Value: "links", - }, - }, - }) - - // Commit represents an individual commit to a MerkleCRDT - // type Commit { - // Height: Int - // CID: String - // DocID: String - // CollectionID: Int - // SchemaVersionID: String - // Delta: String - // Previous: [Commit] - // Links: [Commit] - // } - // - // Any self referential type needs to be initialized - // inside the init() func - CommitObject = gql.NewObject(gql.ObjectConfig{ +// Commit represents an individual commit to a MerkleCRDT +// +// type Commit { +// Height: Int +// CID: String +// DocID: String +// CollectionID: Int +// SchemaVersionID: String +// Delta: String +// Previous: [Commit] +// Links: [Commit] +// } +// +// Any self referential type needs to be initialized +// inside the init() func +func CommitObject(commitLinkObject *gql.Object) *gql.Object { + return gql.NewObject(gql.ObjectConfig{ Name: request.CommitTypeName, Description: commitDescription, Fields: gql.Fields{ @@ -81,23 +70,34 @@ var ( }, request.LinksFieldName: &gql.Field{ Description: commitLinksDescription, - Type: gql.NewList(CommitLinkObject), + Type: gql.NewList(commitLinkObject), }, request.CountFieldName: &gql.Field{ Description: CountFieldDescription, Type: gql.Int, Args: gql.FieldConfigArgument{ request.FieldName: &gql.ArgumentConfig{ - Type: commitCountFieldArg, + Type: gql.NewEnum(gql.EnumConfig{ + Name: "commitCountFieldArg", + Description: CountFieldDescription, + Values: gql.EnumValueConfigMap{ + "links": &gql.EnumValueConfig{ + Description: commitLinksDescription, + Value: "links", + }, + }, + }), }, }, }, }, }) +} - // CommitLink is a named DAG link between commits. - // This is primary used for CompositeDAG CRDTs - CommitLinkObject = gql.NewObject(gql.ObjectConfig{ +// CommitLink is a named DAG link between commits. +// This is primary used for CompositeDAG CRDTs +func CommitLinkObject() *gql.Object { + return gql.NewObject(gql.ObjectConfig{ Name: "CommitLink", Description: commitLinksDescription, Fields: gql.Fields{ @@ -111,78 +111,80 @@ var ( }, }, }) +} - CommitsOrderArg = gql.NewInputObject( +func CommitsOrderArg(orderEnum *gql.Enum) *gql.InputObject { + return gql.NewInputObject( gql.InputObjectConfig{ Name: "commitsOrderArg", Description: OrderArgDescription, Fields: gql.InputObjectConfigFieldMap{ "height": &gql.InputObjectFieldConfig{ Description: commitHeightFieldDescription, - Type: OrderingEnum, + Type: orderEnum, }, "cid": &gql.InputObjectFieldConfig{ Description: commitCIDFieldDescription, - Type: OrderingEnum, + Type: orderEnum, }, request.DocIDArgName: &gql.InputObjectFieldConfig{ Description: commitDocIDFieldDescription, - Type: OrderingEnum, + Type: orderEnum, }, "collectionID": &gql.InputObjectFieldConfig{ Description: commitCollectionIDFieldDescription, - Type: OrderingEnum, - }, - }, - }, - ) - - commitFields = gql.NewEnum( - gql.EnumConfig{ - Name: "commitFields", - Description: commitFieldsEnumDescription, - Values: gql.EnumValueConfigMap{ - "height": &gql.EnumValueConfig{ - Value: "height", - Description: commitHeightFieldDescription, - }, - "cid": &gql.EnumValueConfig{ - Value: "cid", - Description: commitCIDFieldDescription, - }, - request.DocIDArgName: &gql.EnumValueConfig{ - Value: request.DocIDArgName, - Description: commitDocIDFieldDescription, - }, - "collectionID": &gql.EnumValueConfig{ - Value: "collectionID", - Description: commitCollectionIDFieldDescription, - }, - "fieldName": &gql.EnumValueConfig{ - Value: "fieldName", - Description: commitFieldNameFieldDescription, - }, - "fieldId": &gql.EnumValueConfig{ - Value: "fieldId", - Description: commitFieldIDFieldDescription, + Type: orderEnum, }, }, }, ) +} - QueryCommits = &gql.Field{ +func QueryCommits(commitObject *gql.Object, commitsOrderArg *gql.InputObject) *gql.Field { + return &gql.Field{ Name: "commits", Description: commitsQueryDescription, - Type: gql.NewList(CommitObject), + Type: gql.NewList(commitObject), Args: gql.FieldConfigArgument{ request.DocIDArgName: NewArgConfig(gql.ID, commitDocIDArgDescription), request.FieldIDName: NewArgConfig(gql.String, commitFieldIDArgDescription), - "order": NewArgConfig(CommitsOrderArg, OrderArgDescription), + "order": NewArgConfig(commitsOrderArg, OrderArgDescription), "cid": NewArgConfig(gql.ID, commitCIDArgDescription), "groupBy": NewArgConfig( gql.NewList( gql.NewNonNull( - commitFields, + gql.NewEnum( + gql.EnumConfig{ + Name: "commitFields", + Description: commitFieldsEnumDescription, + Values: gql.EnumValueConfigMap{ + "height": &gql.EnumValueConfig{ + Value: "height", + Description: commitHeightFieldDescription, + }, + "cid": &gql.EnumValueConfig{ + Value: "cid", + Description: commitCIDFieldDescription, + }, + request.DocIDArgName: &gql.EnumValueConfig{ + Value: request.DocIDArgName, + Description: commitDocIDFieldDescription, + }, + "collectionID": &gql.EnumValueConfig{ + Value: "collectionID", + Description: commitCollectionIDFieldDescription, + }, + "fieldName": &gql.EnumValueConfig{ + Value: "fieldName", + Description: commitFieldNameFieldDescription, + }, + "fieldId": &gql.EnumValueConfig{ + Value: "fieldId", + Description: commitFieldIDFieldDescription, + }, + }, + }, + ), ), ), GroupByArgDescription, @@ -192,14 +194,16 @@ var ( request.DepthClause: NewArgConfig(gql.Int, commitDepthArgDescription), }, } +} - QueryLatestCommits = &gql.Field{ +func QueryLatestCommits(commitObject *gql.Object) *gql.Field { + return &gql.Field{ Name: "latestCommits", Description: latestCommitsQueryDescription, - Type: gql.NewList(CommitObject), + Type: gql.NewList(commitObject), Args: gql.FieldConfigArgument{ request.DocIDArgName: NewArgConfig(gql.NewNonNull(gql.ID), commitDocIDArgDescription), request.FieldIDName: NewArgConfig(gql.String, commitFieldIDArgDescription), }, } -) +} diff --git a/internal/request/graphql/schema/types/scalars.go b/internal/request/graphql/schema/types/scalars.go index 1d944a0f73..b86c744607 100644 --- a/internal/request/graphql/schema/types/scalars.go +++ b/internal/request/graphql/schema/types/scalars.go @@ -46,24 +46,26 @@ func coerceBlob(value any) any { } } -var BlobScalarType = graphql.NewScalar(graphql.ScalarConfig{ - Name: "Blob", - Description: "The `Blob` scalar type represents a binary large object.", - // Serialize converts the value to a hex string - Serialize: coerceBlob, - // ParseValue converts the value to a hex string - ParseValue: coerceBlob, - // ParseLiteral converts the ast value to a hex string - ParseLiteral: func(valueAST ast.Value) any { - switch valueAST := valueAST.(type) { - case *ast.StringValue: - return coerceBlob(valueAST.Value) - default: - // return nil if the value cannot be parsed - return nil - } - }, -}) +func BlobScalarType() *graphql.Scalar { + return graphql.NewScalar(graphql.ScalarConfig{ + Name: "Blob", + Description: "The `Blob` scalar type represents a binary large object.", + // Serialize converts the value to a hex string + Serialize: coerceBlob, + // ParseValue converts the value to a hex string + ParseValue: coerceBlob, + // ParseLiteral converts the ast value to a hex string + ParseLiteral: func(valueAST ast.Value) any { + switch valueAST := valueAST.(type) { + case *ast.StringValue: + return coerceBlob(valueAST.Value) + default: + // return nil if the value cannot be parsed + return nil + } + }, + }) +} // coerceJSON converts the given value into a valid json string. // If the value cannot be converted nil is returned. @@ -98,21 +100,23 @@ func coerceJSON(value any) any { } } -var JSONScalarType = graphql.NewScalar(graphql.ScalarConfig{ - Name: "JSON", - Description: "The `JSON` scalar type represents a JSON string.", - // Serialize converts the value to a json string - Serialize: coerceJSON, - // ParseValue converts the value to a json string - ParseValue: coerceJSON, - // ParseLiteral converts the ast value to a json string - ParseLiteral: func(valueAST ast.Value) any { - switch valueAST := valueAST.(type) { - case *ast.StringValue: - return coerceJSON(valueAST.Value) - default: - // return nil if the value cannot be parsed - return nil - } - }, -}) +func JSONScalarType() *graphql.Scalar { + return graphql.NewScalar(graphql.ScalarConfig{ + Name: "JSON", + Description: "The `JSON` scalar type represents a JSON string.", + // Serialize converts the value to a json string + Serialize: coerceJSON, + // ParseValue converts the value to a json string + ParseValue: coerceJSON, + // ParseLiteral converts the ast value to a json string + ParseLiteral: func(valueAST ast.Value) any { + switch valueAST := valueAST.(type) { + case *ast.StringValue: + return coerceJSON(valueAST.Value) + default: + // return nil if the value cannot be parsed + return nil + } + }, + }) +} diff --git a/internal/request/graphql/schema/types/scalars_test.go b/internal/request/graphql/schema/types/scalars_test.go index 6be3fa23fa..fba94ce67b 100644 --- a/internal/request/graphql/schema/types/scalars_test.go +++ b/internal/request/graphql/schema/types/scalars_test.go @@ -34,7 +34,7 @@ func TestBlobScalarTypeSerialize(t *testing.T) { {false, nil}, } for _, c := range cases { - result := BlobScalarType.Serialize(c.input) + result := BlobScalarType().Serialize(c.input) assert.Equal(t, c.expect, result) } } @@ -60,7 +60,7 @@ func TestBlobScalarTypeParseValue(t *testing.T) { {false, nil}, } for _, c := range cases { - result := BlobScalarType.ParseValue(c.input) + result := BlobScalarType().ParseValue(c.input) assert.Equal(t, c.expect, result) } } @@ -82,7 +82,7 @@ func TestBlobScalarTypeParseLiteral(t *testing.T) { {&ast.ObjectValue{}, nil}, } for _, c := range cases { - result := BlobScalarType.ParseLiteral(c.input) + result := BlobScalarType().ParseLiteral(c.input) assert.Equal(t, c.expect, result) } } @@ -141,10 +141,10 @@ func TestJSONScalarTypeParseAndSerialize(t *testing.T) { {false, nil}, } for _, c := range cases { - parsed := JSONScalarType.ParseValue(c.input) + parsed := JSONScalarType().ParseValue(c.input) assert.Equal(t, c.expect, parsed) - serialized := JSONScalarType.Serialize(c.input) + serialized := JSONScalarType().Serialize(c.input) assert.Equal(t, c.expect, serialized) } } @@ -165,7 +165,7 @@ func TestJSONScalarTypeParseLiteral(t *testing.T) { {&ast.ObjectValue{}, nil}, } for _, c := range cases { - result := JSONScalarType.ParseLiteral(c.input) + result := JSONScalarType().ParseLiteral(c.input) assert.Equal(t, c.expect, result) } } diff --git a/internal/request/graphql/schema/types/types.go b/internal/request/graphql/schema/types/types.go index 7865e204db..ae027312ba 100644 --- a/internal/request/graphql/schema/types/types.go +++ b/internal/request/graphql/schema/types/types.go @@ -44,9 +44,9 @@ const ( FieldOrderDESC = "DESC" ) -var ( - // OrderingEnum is an enum for the Ordering argument. - OrderingEnum = gql.NewEnum(gql.EnumConfig{ +// OrderingEnum is an enum for the Ordering argument. +func OrderingEnum() *gql.Enum { + return gql.NewEnum(gql.EnumConfig{ Name: "Ordering", Values: gql.EnumValueConfigMap{ "ASC": &gql.EnumValueConfig{ @@ -59,8 +59,10 @@ var ( }, }, }) +} - ExplainEnum = gql.NewEnum(gql.EnumConfig{ +func ExplainEnum() *gql.Enum { + return gql.NewEnum(gql.EnumConfig{ Name: "ExplainType", Description: "ExplainType is an enum selecting the type of explanation done by the @explain directive.", Values: gql.EnumValueConfigMap{ @@ -80,13 +82,15 @@ var ( }, }, }) +} - ExplainDirective *gql.Directive = gql.NewDirective(gql.DirectiveConfig{ +func ExplainDirective(explainEnum *gql.Enum) *gql.Directive { + return gql.NewDirective(gql.DirectiveConfig{ Name: ExplainLabel, Description: "@explain is a directive that can be used to explain the query.", Args: gql.FieldConfigArgument{ ExplainArgNameType: &gql.ArgumentConfig{ - Type: ExplainEnum, + Type: explainEnum, }, }, @@ -97,8 +101,10 @@ var ( gql.DirectiveLocationMutation, }, }) +} - PolicyDirective *gql.Directive = gql.NewDirective(gql.DirectiveConfig{ +func PolicyDirective() *gql.Directive { + return gql.NewDirective(gql.DirectiveConfig{ Name: PolicySchemaDirectiveLabel, Description: "@policy is a directive that can be used to link a policy on a collection type.", Args: gql.FieldConfigArgument{ @@ -113,8 +119,10 @@ var ( gql.DirectiveLocationObject, }, }) +} - IndexDirective *gql.Directive = gql.NewDirective(gql.DirectiveConfig{ +func IndexDirective(orderingEnum *gql.Enum) *gql.Directive { + return gql.NewDirective(gql.DirectiveConfig{ Name: IndexDirectiveLabel, Description: "@index is a directive that can be used to create an index on a type.", Args: gql.FieldConfigArgument{ @@ -125,15 +133,17 @@ var ( Type: gql.NewList(gql.String), }, IndexDirectivePropDirections: &gql.ArgumentConfig{ - Type: gql.NewList(OrderingEnum), + Type: gql.NewList(orderingEnum), }, }, Locations: []string{ gql.DirectiveLocationObject, }, }) +} - IndexFieldDirective *gql.Directive = gql.NewDirective(gql.DirectiveConfig{ +func IndexFieldDirective(orderingEnum *gql.Enum) *gql.Directive { + return gql.NewDirective(gql.DirectiveConfig{ Name: IndexDirectiveLabel, Description: "@index is a directive that can be used to create an index on a field.", Args: gql.FieldConfigArgument{ @@ -144,15 +154,17 @@ var ( Type: gql.Boolean, }, IndexDirectivePropDirection: &gql.ArgumentConfig{ - Type: OrderingEnum, + Type: orderingEnum, }, }, Locations: []string{ gql.DirectiveLocationField, }, }) +} - CRDTEnum = gql.NewEnum(gql.EnumConfig{ +func CRDTEnum() *gql.Enum { + return gql.NewEnum(gql.EnumConfig{ Name: "CRDTType", Description: "One of the possible CRDT Types.", Values: gql.EnumValueConfigMap{ @@ -163,51 +175,57 @@ var ( client.PN_COUNTER.String(): &gql.EnumValueConfig{ Value: client.PN_COUNTER, Description: `Positive-Negative Counter. - -WARNING: Incrementing an integer and causing it to overflow the int64 max value -will cause the value to roll over to the int64 min value. Incremeting a float and -causing it to overflow the float64 max value will act like a no-op.`, + + WARNING: Incrementing an integer and causing it to overflow the int64 max value + will cause the value to roll over to the int64 min value. Incremeting a float and + causing it to overflow the float64 max value will act like a no-op.`, }, client.P_COUNTER.String(): &gql.EnumValueConfig{ Value: client.P_COUNTER, Description: `Positive Counter. - -WARNING: Incrementing an integer and causing it to overflow the int64 max value -will cause the value to roll over to the int64 min value. Incremeting a float and -causing it to overflow the float64 max value will act like a no-op.`, + + WARNING: Incrementing an integer and causing it to overflow the int64 max value + will cause the value to roll over to the int64 min value. Incremeting a float and + causing it to overflow the float64 max value will act like a no-op.`, }, }, }) +} - // CRDTFieldDirective @crdt is used to define the CRDT type of a field - CRDTFieldDirective *gql.Directive = gql.NewDirective(gql.DirectiveConfig{ +// CRDTFieldDirective @crdt is used to define the CRDT type of a field +func CRDTFieldDirective(crdtEnum *gql.Enum) *gql.Directive { + return gql.NewDirective(gql.DirectiveConfig{ Name: CRDTDirectiveLabel, Description: crdtDirectiveDescription, Args: gql.FieldConfigArgument{ CRDTDirectivePropType: &gql.ArgumentConfig{ - Type: CRDTEnum, + Type: crdtEnum, }, }, Locations: []string{ gql.DirectiveLocationField, }, }) +} - // PrimaryDirective @primary is used to indicate the primary - // side of a one-to-one relationship. - PrimaryDirective = gql.NewDirective(gql.DirectiveConfig{ +// PrimaryDirective @primary is used to indicate the primary +// side of a one-to-one relationship. +func PrimaryDirective() *gql.Directive { + return gql.NewDirective(gql.DirectiveConfig{ Name: PrimaryLabel, Description: primaryDirectiveDescription, Locations: []string{ gql.DirectiveLocationFieldDefinition, }, }) +} - // RelationDirective @relation is used to explicitly define - // the attributes of a relationship, specifically, the name - // if you don't want to use the default generated relationship - // name. - RelationDirective = gql.NewDirective(gql.DirectiveConfig{ +// RelationDirective @relation is used to explicitly define +// the attributes of a relationship, specifically, the name +// if you don't want to use the default generated relationship +// name. +func RelationDirective() *gql.Directive { + return gql.NewDirective(gql.DirectiveConfig{ Name: RelationLabel, Description: relationDirectiveDescription, Args: gql.FieldConfigArgument{ @@ -220,7 +238,7 @@ causing it to overflow the float64 max value will act like a no-op.`, gql.DirectiveLocationFieldDefinition, }, }) -) +} func NewArgConfig(t gql.Type, description string) *gql.ArgumentConfig { return &gql.ArgumentConfig{ diff --git a/tests/integration/schema/client_test.go b/tests/integration/schema/client_test.go index ef2a155297..06aed989fd 100644 --- a/tests/integration/schema/client_test.go +++ b/tests/integration/schema/client_test.go @@ -38,7 +38,7 @@ func TestIntrospectionExplainTypeDefined(t *testing.T) { "__schema": map[string]any{ "types": []any{ map[string]any{ - "description": schemaTypes.ExplainEnum.Description(), + "description": schemaTypes.ExplainEnum().Description(), "kind": "ENUM", "name": "ExplainType", }, From 2efd29a63635444d0ecd9a81d119e9f4efe8714f Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Thu, 27 Jun 2024 11:07:28 -0700 Subject: [PATCH 68/78] fix: Keyring output (#2784) ## Relevant issue(s) Resolves #2759 Resolves #2758 Resolves #2757 ## Description This PR adds more logs to the keyring to improve the user experience. ## Tasks - [x] I made sure the code is well commented, particularly hard-to-understand areas. - [x] I made sure the repository-held documentation is changed accordingly. - [x] I made sure the pull request title adheres to the conventional commit style (the subset used in the project can be found in [tools/configs/chglog/config.yml](tools/configs/chglog/config.yml)). - [x] I made sure to discuss its limitations such as threats to validity, vulnerability to mistake and misuse, robustness to invalidation of assumptions, resource requirements, ... ## How has this been tested? - unit test Specify the platform(s) on which this was tested: - MacOS --- cli/keyring.go | 16 +++++++++- cli/keyring_generate.go | 32 +++++++++++++------ cli/keyring_generate_test.go | 16 ++++++++++ cli/utils.go | 6 +++- .../website/references/cli/defradb_keyring.md | 14 ++++++++ .../cli/defradb_keyring_generate.md | 7 +++- 6 files changed, 79 insertions(+), 12 deletions(-) diff --git a/cli/keyring.go b/cli/keyring.go index c60440ccc9..a905ce190b 100644 --- a/cli/keyring.go +++ b/cli/keyring.go @@ -19,7 +19,21 @@ func MakeKeyringCommand() *cobra.Command { Use: "keyring", Short: "Manage DefraDB private keys", Long: `Manage DefraDB private keys. -Generate, import, and export private keys.`, +Generate, import, and export private keys. + +The following keys are loaded from the keyring on start: + peer-key: Ed25519 private key (required) + encryption-key: AES-128, AES-192, or AES-256 key (optional) + +To randomly generate the required keys, run the following command: + defradb keyring generate + +To import externally generated keys, run the following command: + defradb keyring import + +To learn more about the available options: + defradb keyring --help +`, } return cmd } diff --git a/cli/keyring_generate.go b/cli/keyring_generate.go index f9e073d0d5..34209671a5 100644 --- a/cli/keyring_generate.go +++ b/cli/keyring_generate.go @@ -17,12 +17,14 @@ import ( ) func MakeKeyringGenerateCommand() *cobra.Command { - var noEncryption bool + var noEncryptionKey bool + var noPeerKey bool var cmd = &cobra.Command{ Use: "generate", Short: "Generate private keys", Long: `Generate private keys. Randomly generate and store private keys in the keyring. +By default peer and encryption keys will be generated. WARNING: This will overwrite existing keys in the keyring. @@ -32,6 +34,9 @@ Example: Example: with no encryption key defradb keyring generate --no-encryption-key +Example: with no peer key + defradb keyring generate --no-peer-key + Example: with system keyring defradb keyring generate --keyring-backend system`, RunE: func(cmd *cobra.Command, args []string) error { @@ -39,8 +44,7 @@ Example: with system keyring if err != nil { return err } - if !noEncryption { - // generate optional encryption key + if !noEncryptionKey { encryptionKey, err := crypto.GenerateAES256() if err != nil { return err @@ -49,15 +53,25 @@ Example: with system keyring if err != nil { return err } + log.Info("generated encryption key") } - peerKey, err := crypto.GenerateEd25519() - if err != nil { - return err + if !noPeerKey { + peerKey, err := crypto.GenerateEd25519() + if err != nil { + return err + } + err = keyring.Set(peerKeyName, peerKey) + if err != nil { + return err + } + log.Info("generated peer key") } - return keyring.Set(peerKeyName, peerKey) + return nil }, } - cmd.Flags().BoolVar(&noEncryption, "no-encryption-key", false, - "Skip generating an encryption. Encryption at rest will be disabled") + cmd.Flags().BoolVar(&noEncryptionKey, "no-encryption-key", false, + "Skip generating an encryption key. Encryption at rest will be disabled") + cmd.Flags().BoolVar(&noPeerKey, "no-peer-key", false, + "Skip generating a peer key.") return cmd } diff --git a/cli/keyring_generate_test.go b/cli/keyring_generate_test.go index 672760c2ef..b29446bd15 100644 --- a/cli/keyring_generate_test.go +++ b/cli/keyring_generate_test.go @@ -50,3 +50,19 @@ func TestKeyringGenerateNoEncryptionKey(t *testing.T) { assert.NoFileExists(t, filepath.Join(rootdir, "keys", encryptionKeyName)) assert.FileExists(t, filepath.Join(rootdir, "keys", peerKeyName)) } + +func TestKeyringGenerateNoPeerKey(t *testing.T) { + rootdir := t.TempDir() + readPassword = func(_ *cobra.Command, _ string) ([]byte, error) { + return []byte("secret"), nil + } + + cmd := NewDefraCommand() + cmd.SetArgs([]string{"keyring", "generate", "--no-peer-key", "--rootdir", rootdir}) + + err := cmd.Execute() + require.NoError(t, err) + + assert.FileExists(t, filepath.Join(rootdir, "keys", encryptionKeyName)) + assert.NoFileExists(t, filepath.Join(rootdir, "keys", peerKeyName)) +} diff --git a/cli/utils.go b/cli/utils.go index ed98fb0f30..d1ee09962b 100644 --- a/cli/utils.go +++ b/cli/utils.go @@ -181,9 +181,13 @@ func setContextRootDir(cmd *cobra.Command) error { // openKeyring opens the keyring for the current environment. func openKeyring(cmd *cobra.Command) (keyring.Keyring, error) { cfg := mustGetContextConfig(cmd) - if cfg.Get("keyring.backend") == "system" { + backend := cfg.Get("keyring.backend") + if backend == "system" { return keyring.OpenSystemKeyring(cfg.GetString("keyring.namespace")), nil } + if backend != "file" { + log.Info("keyring defaulted to file backend") + } path := cfg.GetString("keyring.path") if err := os.MkdirAll(path, 0755); err != nil { return nil, err diff --git a/docs/website/references/cli/defradb_keyring.md b/docs/website/references/cli/defradb_keyring.md index e650592529..ef091ef233 100644 --- a/docs/website/references/cli/defradb_keyring.md +++ b/docs/website/references/cli/defradb_keyring.md @@ -7,6 +7,20 @@ Manage DefraDB private keys Manage DefraDB private keys. Generate, import, and export private keys. +The following keys are loaded from the keyring on start: + peer-key: Ed25519 private key (required) + encryption-key: AES-128, AES-192, or AES-256 key (optional) + +To randomly generate the required keys, run the following command: + defradb keyring generate + +To import externally generated keys, run the following command: + defradb keyring import + +To learn more about the available options: + defradb keyring --help + + ### Options ``` diff --git a/docs/website/references/cli/defradb_keyring_generate.md b/docs/website/references/cli/defradb_keyring_generate.md index ab8ce5ad88..5f58c61369 100644 --- a/docs/website/references/cli/defradb_keyring_generate.md +++ b/docs/website/references/cli/defradb_keyring_generate.md @@ -6,6 +6,7 @@ Generate private keys Generate private keys. Randomly generate and store private keys in the keyring. +By default peer and encryption keys will be generated. WARNING: This will overwrite existing keys in the keyring. @@ -15,6 +16,9 @@ Example: Example: with no encryption key defradb keyring generate --no-encryption-key +Example: with no peer key + defradb keyring generate --no-peer-key + Example: with system keyring defradb keyring generate --keyring-backend system @@ -26,7 +30,8 @@ defradb keyring generate [flags] ``` -h, --help help for generate - --no-encryption-key Skip generating an encryption. Encryption at rest will be disabled + --no-encryption-key Skip generating an encryption key. Encryption at rest will be disabled + --no-peer-key Skip generating a peer key. ``` ### Options inherited from parent commands From dd53c1372ed4a0902e5e4a5a622a46f46ebecbc6 Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Thu, 27 Jun 2024 16:02:00 -0400 Subject: [PATCH 69/78] test: Support asserting on doc index in test results (#2786) ## Relevant issue(s) Resolves #2785 ## Description Support asserting on doc index in test results. PR does not seek to propagate it's usage, only to permit it's usage. --- .../net/one_to_many/peer/with_create_update_test.go | 4 ++-- tests/integration/utils2.go | 10 ++++++++++ 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/tests/integration/net/one_to_many/peer/with_create_update_test.go b/tests/integration/net/one_to_many/peer/with_create_update_test.go index e6862754de..1cedd82495 100644 --- a/tests/integration/net/one_to_many/peer/with_create_update_test.go +++ b/tests/integration/net/one_to_many/peer/with_create_update_test.go @@ -81,7 +81,7 @@ func TestP2POneToManyPeerWithCreateUpdateLinkingSyncedDocToUnsyncedDoc(t *testin Results: []map[string]any{ { "Name": "Gulistan", - "Author_id": "bae-6a4c24c0-7b0b-5f51-a274-132d7ca90499", + "Author_id": testUtils.NewDocIndex(0, 0), "Author": map[string]any{ "Name": "Saadi", }, @@ -102,7 +102,7 @@ func TestP2POneToManyPeerWithCreateUpdateLinkingSyncedDocToUnsyncedDoc(t *testin Results: []map[string]any{ { "Name": "Gulistan", - "Author_id": "bae-6a4c24c0-7b0b-5f51-a274-132d7ca90499", + "Author_id": testUtils.NewDocIndex(0, 0), // "Saadi" was not synced to node 1, the update did not // result in an error and synced to relational id even though "Saadi" // does not exist in this node. diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index 5e9d089ccd..a1071de4aa 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -1880,6 +1880,16 @@ func assertRequestResults( valueSet := anyOfByField[dfk] valueSet = append(valueSet, actualValue) anyOfByField[dfk] = valueSet + case DocIndex: + expectedDocID := s.documents[r.CollectionIndex][r.Index].ID().String() + assertResultsEqual( + s.t, + s.clientType, + expectedDocID, + actualValue, + fmt.Sprintf("node: %v, doc: %v", nodeID, docIndex), + ) + default: assertResultsEqual( s.t, From 1a8b1fa8bf9d636d7b59198ca7470c2a89bf5b3a Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Thu, 27 Jun 2024 16:27:55 -0400 Subject: [PATCH 70/78] test: Remove duplicate test (#2787) ## Relevant issue(s) Resolves #1854 ## Description Removes a duplicate test. I think this bug might have been resolved by the validation refactor a few weeks ago and I never spotted the test-split when tweaking the expected result. --- .../one_to_many/with_alias_test.go | 60 ------------------- 1 file changed, 60 deletions(-) diff --git a/tests/integration/mutation/update/field_kinds/one_to_many/with_alias_test.go b/tests/integration/mutation/update/field_kinds/one_to_many/with_alias_test.go index bc0af76015..5f50e1732b 100644 --- a/tests/integration/mutation/update/field_kinds/one_to_many/with_alias_test.go +++ b/tests/integration/mutation/update/field_kinds/one_to_many/with_alias_test.go @@ -25,66 +25,6 @@ func TestMutationUpdateOneToMany_AliasRelationNameToLinkFromSingleSide_Collectio test := testUtils.TestCase{ Description: "One to many update mutation using relation alias name from single side (wrong)", - // This restiction is temporary due to an inconsitent error message, see - // TestMutationUpdateOneToMany_AliasRelationNameToLinkFromSingleSide_GQL - // and https://github.com/sourcenetwork/defradb/issues/1854 for more info. - SupportedMutationTypes: immutable.Some([]testUtils.MutationType{ - testUtils.CollectionNamedMutationType, - testUtils.CollectionSaveMutationType, - }), - Actions: []any{ - testUtils.CreateDoc{ - CollectionID: 1, - Doc: `{ - "name": "John Grisham" - }`, - }, - testUtils.CreateDoc{ - CollectionID: 1, - Doc: `{ - "name": "New Shahzad" - }`, - }, - testUtils.CreateDoc{ - CollectionID: 0, - Doc: fmt.Sprintf( - `{ - "name": "Painted House", - "author": "%s" - }`, - author1ID, - ), - }, - testUtils.UpdateDoc{ - CollectionID: 1, - DocID: 1, - // NOTE: There is no `published` on book. - Doc: fmt.Sprintf( - `{ - "published": "%s" - }`, - bookID, - ), - ExpectedError: "The given field does not exist. Name: published", - }, - }, - } - - executeTestCase(t, test) -} - -func TestMutationUpdateOneToMany_AliasRelationNameToLinkFromSingleSide_GQL(t *testing.T) { - author1ID := "bae-a47f80ab-1c30-53b3-9dac-04a4a3fda77e" - bookID := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" - - test := testUtils.TestCase{ - Description: "One to many update mutation using relation alias name from single side (wrong)", - // This restiction is temporary due to an inconsitent error message, see - // TestMutationUpdateOneToMany_AliasRelationNameToLinkFromSingleSide_Collection - // and https://github.com/sourcenetwork/defradb/issues/1854 for more info. - SupportedMutationTypes: immutable.Some([]testUtils.MutationType{ - testUtils.GQLRequestMutationType, - }), Actions: []any{ testUtils.CreateDoc{ CollectionID: 1, From 343ec387a4b9a02f45149b70298855ee13db78c6 Mon Sep 17 00:00:00 2001 From: Shahzad Lone Date: Thu, 27 Jun 2024 18:08:37 -0400 Subject: [PATCH 71/78] ci(i): Bump all gh-action versions to support node20 (#2780) ## Relevant issue(s) Resolves #2778 ## Description - Similar to #990 - Update all actions to `node20` versions - There was a bug the way we were using artifacts previously such that it would overwrite some generated code coverage reports due to artifact name clashing, this was caught as the behavior of "artifact name clashing" now causes error instead of "merging" them. The fix is in the last commit, which will now ensure "more" accurate coverage as the missing 3 reports are also included. ### Go Releaser - This should be done outside this PR, made an issue here: https://github.com/sourcenetwork/defradb/issues/2781 ## How has this been tested? - `act` tool - ci Specify the platform(s) on which this was tested: - WSL2 --- .github/workflows/build-dependencies.yml | 7 +++-- .github/workflows/build-then-deploy-ami.yml | 6 ++-- .github/workflows/check-documentation.yml | 21 ++++++++------ .github/workflows/check-mocks.yml | 7 +++-- .github/workflows/check-tidy.yml | 7 +++-- .github/workflows/check-vulnerabilities.yml | 2 +- .github/workflows/combine-bot-prs.yml | 2 +- .github/workflows/lint-then-benchmark.yml | 15 +++++----- .github/workflows/lint.yml | 17 +++++++---- .../preview-ami-with-terraform-plan.yml | 8 ++--- .github/workflows/release.yml | 26 ++++++++--------- .github/workflows/start-binary.yml | 7 +++-- .../workflows/test-and-upload-coverage.yml | 29 ++++++++++++++----- .github/workflows/validate-containerfile.yml | 8 ++--- .github/workflows/validate-title.yml | 2 +- 15 files changed, 96 insertions(+), 68 deletions(-) diff --git a/.github/workflows/build-dependencies.yml b/.github/workflows/build-dependencies.yml index 0bed67de09..aa5910a039 100644 --- a/.github/workflows/build-dependencies.yml +++ b/.github/workflows/build-dependencies.yml @@ -32,13 +32,14 @@ jobs: steps: - name: Checkout code into the directory - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Go environment explicitly - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: - go-version: "1.21" + go-version-file: 'go.mod' check-latest: true + cache: false - name: Build all dependencies run: make deps diff --git a/.github/workflows/build-then-deploy-ami.yml b/.github/workflows/build-then-deploy-ami.yml index 4423d70659..3051380e1f 100644 --- a/.github/workflows/build-then-deploy-ami.yml +++ b/.github/workflows/build-then-deploy-ami.yml @@ -48,7 +48,7 @@ jobs: steps: - name: Checkout code into the directory - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Environment version target run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> ${GITHUB_ENV} @@ -89,10 +89,10 @@ jobs: steps: - name: Checkout code into the directory - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Terraform action setup - uses: hashicorp/setup-terraform@v2 + uses: hashicorp/setup-terraform@v3 with: terraform_version: 1.3.7 diff --git a/.github/workflows/check-documentation.yml b/.github/workflows/check-documentation.yml index 97214d515b..c4cf4ca792 100644 --- a/.github/workflows/check-documentation.yml +++ b/.github/workflows/check-documentation.yml @@ -33,13 +33,14 @@ jobs: steps: - name: Checkout code into the directory - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Go environment explicitly - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: - go-version: "1.21" + go-version-file: 'go.mod' check-latest: true + cache: false - name: Try generating cli documentation run: make docs:cli @@ -58,13 +59,14 @@ jobs: steps: - name: Checkout code into the directory - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Go environment explicitly - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: - go-version: "1.21" + go-version-file: 'go.mod' check-latest: true + cache: false - name: Try generating http documentation run: make docs:http @@ -83,13 +85,14 @@ jobs: steps: - name: Checkout code into the directory - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Go environment explicitly - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: - go-version: "1.21" + go-version-file: 'go.mod' check-latest: true + cache: false - name: Try generating readme toc run: make toc diff --git a/.github/workflows/check-mocks.yml b/.github/workflows/check-mocks.yml index 80d9dbee20..5866ea699b 100644 --- a/.github/workflows/check-mocks.yml +++ b/.github/workflows/check-mocks.yml @@ -33,13 +33,14 @@ jobs: steps: - name: Checkout code into the directory - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Go environment explicitly - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: - go-version: "1.21" + go-version-file: 'go.mod' check-latest: true + cache: false - name: Try generating mocks run: make mocks diff --git a/.github/workflows/check-tidy.yml b/.github/workflows/check-tidy.yml index 979052cb5b..bbcf2d620b 100644 --- a/.github/workflows/check-tidy.yml +++ b/.github/workflows/check-tidy.yml @@ -37,13 +37,14 @@ jobs: steps: - name: Checkout code into the directory - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Go environment explicitly - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: - go-version: "1.21" + go-version-file: 'go.mod' check-latest: true + cache: false # This checks mod tidy is not broken. - name: Check mod tidy diff --git a/.github/workflows/check-vulnerabilities.yml b/.github/workflows/check-vulnerabilities.yml index 67d806ab6e..6f1b2fd35f 100644 --- a/.github/workflows/check-vulnerabilities.yml +++ b/.github/workflows/check-vulnerabilities.yml @@ -36,4 +36,4 @@ jobs: go-version-input: "1.21" go-package: ./... check-latest: true - cache: true + cache: false diff --git a/.github/workflows/combine-bot-prs.yml b/.github/workflows/combine-bot-prs.yml index abe21143d8..bb861fbecd 100644 --- a/.github/workflows/combine-bot-prs.yml +++ b/.github/workflows/combine-bot-prs.yml @@ -49,7 +49,7 @@ jobs: - name: Create combined pr id: create-combined-pr - uses: actions/github-script@v6 + uses: actions/github-script@v7 with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | diff --git a/.github/workflows/lint-then-benchmark.yml b/.github/workflows/lint-then-benchmark.yml index c984cce3ef..1b1b1a73ba 100644 --- a/.github/workflows/lint-then-benchmark.yml +++ b/.github/workflows/lint-then-benchmark.yml @@ -51,17 +51,18 @@ jobs: steps: - name: Checkout code into the directory - uses: actions/checkout@v3 + uses: actions/checkout@v4 # Setting up Go explicitly is required for v3.0.0+ of golangci/golangci-lint-action. - name: Setup Go environment explicitly - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: - go-version: "1.21" + go-version-file: 'go.mod' check-latest: true + cache: false - name: Run the golangci-lint - uses: golangci/golangci-lint-action@v3 + uses: golangci/golangci-lint-action@v6 with: # Required: the version of golangci-lint is required. @@ -192,7 +193,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Run the full bechmarking suite if: needs.decide-benchmark-type.outputs.benchmark-type == 'FULL' @@ -214,7 +215,7 @@ jobs: if: | github.event_name == 'push' && github.ref_name == 'develop' - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: bench-artifact-${{ github.sha }} path: bench-artifact-${{ github.sha }}.txt @@ -245,7 +246,7 @@ jobs: if: | github.event_name == 'pull_request' && github.base_ref == 'develop' - uses: dawidd6/action-download-artifact@v2 + uses: dawidd6/action-download-artifact@v6 with: github_token: ${{ secrets.ONLY_DEFRADB_REPO_CI_PAT }} workflow: lint-then-benchmark.yml diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index b482903cad..20f9128f59 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -19,9 +19,12 @@ on: push: permissions: - # Allow read access to pull request (Required for the `only-new-issues` option.) + # Required for the `only-new-issues` option. pull-requests: read + # Required for analysis. contents: read + # Required to annotate code in the PR. + checks: write jobs: lint-go: @@ -31,16 +34,18 @@ jobs: steps: - name: Checkout code into the directory - uses: actions/checkout@v3 + uses: actions/checkout@v4 + # Setting up Go explicitly is required for v3.0.0+ of golangci/golangci-lint-action. - name: Setup Go environment explicitly - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: - go-version: "1.21" + go-version-file: 'go.mod' check-latest: true + cache: false - name: Run golangci-lint linter - uses: golangci/golangci-lint-action@v3 + uses: golangci/golangci-lint-action@v6 with: # Required: the version of golangci-lint is required. # Note: The version should not pick the patch version as the latest patch @@ -75,7 +80,7 @@ jobs: steps: - name: Checkout code into the directory - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Run yamllint linter uses: ibiqlik/action-yamllint@v3 diff --git a/.github/workflows/preview-ami-with-terraform-plan.yml b/.github/workflows/preview-ami-with-terraform-plan.yml index 25e975a247..40f4b1c948 100644 --- a/.github/workflows/preview-ami-with-terraform-plan.yml +++ b/.github/workflows/preview-ami-with-terraform-plan.yml @@ -49,7 +49,7 @@ jobs: - name: Stop and notify the use of unprivileged flow or missing tokens if: env.AWS_ACCESS_KEY_ID == '' || env.AWS_SECRET_ACCESS_KEY == '' # Note: Fail this step, as we don't want unprivileged access doing these changes. - uses: actions/github-script@v6 + uses: actions/github-script@v7 with: script: | let unprivileged_warning = @@ -64,10 +64,10 @@ jobs: core.setFailed(unprivileged_warning) - name: Checkout code into the directory - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Terraform action setup - uses: hashicorp/setup-terraform@v2 + uses: hashicorp/setup-terraform@v3 with: terraform_version: 1.3.7 @@ -93,7 +93,7 @@ jobs: continue-on-error: true - name: Comment results on pull request - uses: actions/github-script@v6 + uses: actions/github-script@v7 env: TERRAFORM_PLAN_OUTPUT: "Terraform Plan Output:\n${{ steps.terraform-plan.outputs.stdout }}\n" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index bf8332107a..3351bfc242 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -31,16 +31,16 @@ jobs: steps: - name: Checkout code into the directory - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 - name: Setup Go environment explicitly - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: - go-version: "1.21" + go-version-file: 'go.mod' check-latest: true - cache: true + cache: false - name: Apply tag run: git tag ${{ github.event.inputs.tag }} @@ -50,18 +50,18 @@ jobs: - name: Set up QEMU if: matrix.os == 'ubuntu-latest' - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 - name: Log in to Docker Hub if: matrix.os == 'ubuntu-latest' - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} - name: Log in to the Container registry if: matrix.os == 'ubuntu-latest' - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.actor }} @@ -109,7 +109,7 @@ jobs: needs: prepare steps: - name: Checkout code into the directory - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 @@ -117,14 +117,14 @@ jobs: run: git tag ${{ github.event.inputs.tag }} - name: Setup Go environment explicitly - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: - go-version: "1.21" + go-version-file: 'go.mod' check-latest: true - cache: true + cache: false - name: Log in to Docker Hub - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} @@ -193,7 +193,7 @@ jobs: steps: - name: Log in to Docker Hub - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} diff --git a/.github/workflows/start-binary.yml b/.github/workflows/start-binary.yml index 7268faae7b..f77962f0e6 100644 --- a/.github/workflows/start-binary.yml +++ b/.github/workflows/start-binary.yml @@ -32,13 +32,14 @@ jobs: steps: - name: Checkout code into the directory - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Go environment explicitly - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: - go-version: "1.21" + go-version-file: 'go.mod' check-latest: true + cache: false - name: Build modules run: make deps:modules diff --git a/.github/workflows/test-and-upload-coverage.yml b/.github/workflows/test-and-upload-coverage.yml index 64d05cfb87..3931005a6c 100644 --- a/.github/workflows/test-and-upload-coverage.yml +++ b/.github/workflows/test-and-upload-coverage.yml @@ -102,13 +102,14 @@ jobs: steps: - name: Checkout code into the directory - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Go environment explicitly - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: - go-version: "1.21" + go-version-file: 'go.mod' check-latest: true + cache: false - name: Set cache paths id: cache-paths @@ -164,9 +165,19 @@ jobs: - name: Upload coverage artifact if: ${{ !matrix.detect-changes }} - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: ${{ matrix.os }}_${{ matrix.client-type }}_${{ matrix.database-type }}_${{ matrix.mutation-type }} + # Make sure the name is always unique per job as artifacts are now immutable. + # Note Issue: https://github.com/actions/upload-artifact/issues/478 + # Solve: https://github.com/actions/upload-artifact/issues/478#issuecomment-1885470013 + name: "coverage\ + _${{ matrix.os }}\ + _${{ matrix.client-type }}\ + _${{ matrix.database-type }}\ + _${{ matrix.mutation-type }}\ + _${{ matrix.lens-type }}\ + _${{ matrix.database-encryption }}\ + " path: coverage.txt if-no-files-found: error retention-days: 7 @@ -185,11 +196,14 @@ jobs: steps: - name: Checkout code into the directory - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Download coverage reports - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: + pattern: coverage_* + # Note: https://github.com/actions/download-artifact/blob/main/docs/MIGRATION.md + merge-multiple: false path: coverage_reports - name: Upload coverage to Codecov @@ -197,6 +211,7 @@ jobs: with: token: ${{ secrets.CODECOV_TOKEN }} name: defradb-codecov + files: coverage_reports/**/*.txt flags: all-tests os: 'linux' fail_ci_if_error: true diff --git a/.github/workflows/validate-containerfile.yml b/.github/workflows/validate-containerfile.yml index 260e0dba89..b0dc0d56c9 100644 --- a/.github/workflows/validate-containerfile.yml +++ b/.github/workflows/validate-containerfile.yml @@ -36,16 +36,16 @@ jobs: steps: - name: Check out the repo - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Build Docker image - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v6 with: context: . file: tools/defradb.containerfile diff --git a/.github/workflows/validate-title.yml b/.github/workflows/validate-title.yml index 9899a9b92a..fa054663ef 100644 --- a/.github/workflows/validate-title.yml +++ b/.github/workflows/validate-title.yml @@ -28,7 +28,7 @@ jobs: steps: - name: Checkout code into the directory - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Ensure the scripts are not broken run: make test:scripts From 8437805714c5858ff57d9f23e6d09f9dfa760939 Mon Sep 17 00:00:00 2001 From: Shahzad Lone Date: Thu, 27 Jun 2024 20:48:51 -0400 Subject: [PATCH 72/78] ci(i): Make change detector an independent workflow (#2789) ## Relevant issue(s) Resolves #2736 ## Description - Move change detection to a separate workflow to not block code coverage upload to be dependent / blocked by it. - More reasons in the issue (#2736) on why we want this. ### Note will need to change required checks naming, will do it pre-merge and after an approval. ## How has this been tested? - Manually Specify the platform(s) on which this was tested: - WSL2 --- .../workflows/check-data-format-changes.yml | 50 +++++++++++++++++++ .../workflows/test-and-upload-coverage.yml | 13 ----- 2 files changed, 50 insertions(+), 13 deletions(-) create mode 100644 .github/workflows/check-data-format-changes.yml diff --git a/.github/workflows/check-data-format-changes.yml b/.github/workflows/check-data-format-changes.yml new file mode 100644 index 0000000000..b9b2406c89 --- /dev/null +++ b/.github/workflows/check-data-format-changes.yml @@ -0,0 +1,50 @@ +# Copyright 2024 Democratized Data Foundation +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +name: Check Data Format Changes Workflow + +on: + pull_request: + branches: + - master + - develop + + push: + tags: + - 'v[0-9]+.[0-9]+.[0-9]+' + branches: + - master + - develop + +jobs: + check-data-format-changes: + name: Check data format changes job + + runs-on: ubuntu-latest + + steps: + + - name: Checkout code into the directory + uses: actions/checkout@v4 + + - name: Setup Go environment explicitly + uses: actions/setup-go@v5 + with: + go-version-file: 'go.mod' + check-latest: true + cache: false + + - name: Build dependencies + run: | + make deps:modules + make deps:test + + - name: Run data format change detection tests + run: make test:changes diff --git a/.github/workflows/test-and-upload-coverage.yml b/.github/workflows/test-and-upload-coverage.yml index 3931005a6c..0e07157f38 100644 --- a/.github/workflows/test-and-upload-coverage.yml +++ b/.github/workflows/test-and-upload-coverage.yml @@ -35,7 +35,6 @@ jobs: database-type: [badger-file, badger-memory] mutation-type: [gql, collection-named, collection-save] lens-type: [wasm-time] - detect-changes: [false] database-encryption: [false] include: - os: ubuntu-latest @@ -43,21 +42,18 @@ jobs: database-type: badger-memory mutation-type: collection-save lens-type: wasm-time - detect-changes: true database-encryption: false - os: ubuntu-latest client-type: go database-type: badger-memory mutation-type: collection-save lens-type: wasm-time - detect-changes: false database-encryption: true - os: macos-latest client-type: go database-type: badger-memory mutation-type: collection-save lens-type: wasm-time - detect-changes: false database-encryption: false ## TODO: https://github.com/sourcenetwork/defradb/issues/2080 ## Uncomment the lines below to Re-enable the windows build once this todo is resolved. @@ -65,21 +61,18 @@ jobs: ## client-type: go ## database-type: badger-memory ## mutation-type: collection-save -## detect-changes: false ## database-encryption: false - os: ubuntu-latest client-type: go database-type: badger-memory mutation-type: collection-save lens-type: wazero - detect-changes: false database-encryption: false - os: ubuntu-latest client-type: go database-type: badger-memory mutation-type: collection-save lens-type: wasmer - detect-changes: false database-encryption: false runs-on: ${{ matrix.os }} @@ -156,15 +149,9 @@ jobs: make deps:test - name: Run integration tests - if: ${{ !matrix.detect-changes }} run: make test:coverage - - name: Run change detector tests - if: ${{ matrix.detect-changes }} - run: make test:changes - - name: Upload coverage artifact - if: ${{ !matrix.detect-changes }} uses: actions/upload-artifact@v4 with: # Make sure the name is always unique per job as artifacts are now immutable. From 7261711d96e3a0931d1bde80ed2161ca486d3b94 Mon Sep 17 00:00:00 2001 From: Shahzad Lone Date: Fri, 28 Jun 2024 10:38:10 -0400 Subject: [PATCH 73/78] ci(i): Remove redundant matrix rule that is repeated (#2790) ## Description - Remove redundant matrix rule that is repeated (1st commit) - Move all non-ubuntu special rules at the bottom ## How has this been tested? - `act` tool - ci Specify the platform(s) on which this was tested: - WSL2 --- .../workflows/test-and-upload-coverage.yml | 23 ++++++++----------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/.github/workflows/test-and-upload-coverage.yml b/.github/workflows/test-and-upload-coverage.yml index 0e07157f38..392cef826b 100644 --- a/.github/workflows/test-and-upload-coverage.yml +++ b/.github/workflows/test-and-upload-coverage.yml @@ -42,13 +42,19 @@ jobs: database-type: badger-memory mutation-type: collection-save lens-type: wasm-time + database-encryption: true + - os: ubuntu-latest + client-type: go + database-type: badger-memory + mutation-type: collection-save + lens-type: wazero database-encryption: false - os: ubuntu-latest client-type: go database-type: badger-memory mutation-type: collection-save - lens-type: wasm-time - database-encryption: true + lens-type: wasmer + database-encryption: false - os: macos-latest client-type: go database-type: badger-memory @@ -61,19 +67,8 @@ jobs: ## client-type: go ## database-type: badger-memory ## mutation-type: collection-save +## lens-type: wasm-time ## database-encryption: false - - os: ubuntu-latest - client-type: go - database-type: badger-memory - mutation-type: collection-save - lens-type: wazero - database-encryption: false - - os: ubuntu-latest - client-type: go - database-type: badger-memory - mutation-type: collection-save - lens-type: wasmer - database-encryption: false runs-on: ${{ matrix.os }} From 666e871772bd114fb70e8b0a03ffdade1c338bc8 Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Fri, 28 Jun 2024 11:08:38 -0400 Subject: [PATCH 74/78] docs: Remove reference to client ping from readme (#2793) ## Relevant issue(s) Resolves #2792 ## Description Removes the reference to client ping from readme. The ping command no longer exists. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index be76a151ff..6e1f2b3d1a 100644 --- a/README.md +++ b/README.md @@ -90,7 +90,7 @@ defradb keyring --help Start a node by executing `defradb start`. Keep the node running while going through the following examples. -Verify the local connection to the node works by executing `defradb client ping` in another terminal. +Verify the local connection to the node works by executing `defradb client collection describe` in another terminal. ## Configuration From 30b40fdcaf218619c299822181e47e10ab45a726 Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Fri, 28 Jun 2024 12:42:00 -0400 Subject: [PATCH 75/78] test: Allow assertion of AddSchema results (#2788) ## Relevant issue(s) Resolves #2766 ## Description Allows assertion of AddSchema results. --- tests/integration/results.go | 42 +++++++++++++++++++++++++ tests/integration/schema/simple_test.go | 11 +++++++ tests/integration/test_case.go | 9 ++++++ tests/integration/utils2.go | 39 ++++++----------------- 4 files changed, 72 insertions(+), 29 deletions(-) diff --git a/tests/integration/results.go b/tests/integration/results.go index b4fc9d5948..61561a48e1 100644 --- a/tests/integration/results.go +++ b/tests/integration/results.go @@ -18,6 +18,9 @@ import ( "github.com/sourcenetwork/immutable" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/client" ) // AnyOf may be used as `Results` field where the value may @@ -184,3 +187,42 @@ func areResultArraysEqual[S any](expected []S, actual any) bool { } return true } + +func assertCollectionDescriptions( + s *state, + expected []client.CollectionDescription, + actual []client.CollectionDescription, +) { + require.Equal(s.t, len(expected), len(actual)) + + for i, expected := range expected { + actual := actual[i] + if expected.ID != 0 { + require.Equal(s.t, expected.ID, actual.ID) + } + if expected.RootID != 0 { + require.Equal(s.t, expected.RootID, actual.RootID) + } + if expected.SchemaVersionID != "" { + require.Equal(s.t, expected.SchemaVersionID, actual.SchemaVersionID) + } + + require.Equal(s.t, expected.Name, actual.Name) + + if expected.Indexes != nil || len(actual.Indexes) != 0 { + // Dont bother asserting this if the expected is nil and the actual is nil/empty. + // This is to say each test action from having to bother declaring an empty slice (if there are no indexes) + require.Equal(s.t, expected.Indexes, actual.Indexes) + } + + if expected.Sources != nil || len(actual.Sources) != 0 { + // Dont bother asserting this if the expected is nil and the actual is nil/empty. + // This is to say each test action from having to bother declaring an empty slice (if there are no sources) + require.Equal(s.t, expected.Sources, actual.Sources) + } + + if expected.Fields != nil { + require.Equal(s.t, expected.Fields, actual.Fields) + } + } +} diff --git a/tests/integration/schema/simple_test.go b/tests/integration/schema/simple_test.go index 9e169e6178..dd7e8ce2cd 100644 --- a/tests/integration/schema/simple_test.go +++ b/tests/integration/schema/simple_test.go @@ -16,6 +16,7 @@ import ( "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/client/request" testUtils "github.com/sourcenetwork/defradb/tests/integration" ) @@ -28,6 +29,16 @@ func TestSchemaSimpleCreatesSchemaGivenEmptyType(t *testing.T) { Schema: ` type Users {} `, + ExpectedResults: []client.CollectionDescription{ + { + Name: immutable.Some("Users"), + Fields: []client.CollectionFieldDescription{ + { + Name: request.DocIDFieldName, + }, + }, + }, + }, }, testUtils.IntrospectionRequest{ Request: ` diff --git a/tests/integration/test_case.go b/tests/integration/test_case.go index 487641c5ec..4536c0cd0a 100644 --- a/tests/integration/test_case.go +++ b/tests/integration/test_case.go @@ -81,6 +81,15 @@ type SchemaUpdate struct { // The schema update. Schema string + // Optionally, the expected results. + // + // Each item will be compared individually, if ID, RootID, SchemaVersionID or Fields on the + // expected item are default they will not be compared with the actual. + // + // Assertions on Indexes and Sources will not distinguish between nil and empty (in order + // to allow their ommission in most cases). + ExpectedResults []client.CollectionDescription + // Any error expected from the action. Optional. // // String can be a partial, and the test will pass if an error is returned that diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index a1071de4aa..42ab28c04c 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -1000,10 +1000,14 @@ func updateSchema( action SchemaUpdate, ) { for _, node := range getNodes(action.NodeID, s.nodes) { - _, err := node.AddSchema(s.ctx, action.Schema) + results, err := node.AddSchema(s.ctx, action.Schema) expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) + + if action.ExpectedResults != nil { + assertCollectionDescriptions(s, action.ExpectedResults, results) + } } // If the schema was updated we need to refresh the collection definitions. @@ -1089,39 +1093,16 @@ func getCollections( txn := getTransaction(s, node, action.TransactionID, "") ctx := db.SetContextTxn(s.ctx, txn) results, err := node.GetCollections(ctx, action.FilterOptions) + resultDescriptions := make([]client.CollectionDescription, len(results)) + for i, col := range results { + resultDescriptions[i] = col.Description() + } expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) if !expectedErrorRaised { - require.Equal(s.t, len(action.ExpectedResults), len(results)) - - for i, expected := range action.ExpectedResults { - actual := results[i].Description() - if expected.ID != 0 { - require.Equal(s.t, expected.ID, actual.ID) - } - if expected.RootID != 0 { - require.Equal(s.t, expected.RootID, actual.RootID) - } - if expected.SchemaVersionID != "" { - require.Equal(s.t, expected.SchemaVersionID, actual.SchemaVersionID) - } - - require.Equal(s.t, expected.Name, actual.Name) - - if expected.Indexes != nil || len(actual.Indexes) != 0 { - // Dont bother asserting this if the expected is nil and the actual is nil/empty. - // This is to say each test action from having to bother declaring an empty slice (if there are no indexes) - require.Equal(s.t, expected.Indexes, actual.Indexes) - } - - if expected.Sources != nil || len(actual.Sources) != 0 { - // Dont bother asserting this if the expected is nil and the actual is nil/empty. - // This is to say each test action from having to bother declaring an empty slice (if there are no sources) - require.Equal(s.t, expected.Sources, actual.Sources) - } - } + assertCollectionDescriptions(s, action.ExpectedResults, resultDescriptions) } } } From 2aba842302a74b85408376b7bb4006b1e1be336d Mon Sep 17 00:00:00 2001 From: Fred Carle Date: Fri, 28 Jun 2024 13:08:23 -0400 Subject: [PATCH 76/78] ci(i): Update go releaser (#2798) ## Relevant issue(s) Resolves #2797 ## Description This PR updates the Go Releaser to version 2. (*replace*) Describe the tests performed to verify the changes. Provide instructions to reproduce them. locally and in through the release action on a fork. Specify the platform(s) on which this was tested: - MacOS --- .github/workflows/release.yml | 4 ++-- .goreleaser.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 3351bfc242..2eee0900a9 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -72,7 +72,7 @@ jobs: run: echo "sha_short=$(git rev-parse --short HEAD)" >> ${GITHUB_ENV} - name: Run GoReleaser - uses: goreleaser/goreleaser-action@v5 + uses: goreleaser/goreleaser-action@v6 with: distribution: goreleaser-pro version: latest @@ -169,7 +169,7 @@ jobs: run: exit 1 - name: Do the release, only if all OS caches were restored - uses: goreleaser/goreleaser-action@v5 + uses: goreleaser/goreleaser-action@v6 with: distribution: goreleaser-pro version: latest diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 05f201200f..4abe0b198a 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -1,4 +1,4 @@ -version: 1 +version: 2 before: hooks: From e5ff02c6a68b5e0172fc532f04f0d73710765c7c Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Fri, 28 Jun 2024 13:28:23 -0400 Subject: [PATCH 77/78] fix: Allow primary field declarations on one-many (#2796) ## Relevant issue(s) Resolves #961 ## Description Allows primary field declarations on one side of one-many. Declaring it as primary doesn't do anything, but there is no reason it shouldn't be permitted and it definitely shouldn't have errored the way it did. --- internal/request/graphql/schema/collection.go | 23 ++--- tests/integration/results.go | 4 +- tests/integration/schema/one_many_test.go | 85 +++++++++++++++++++ 3 files changed, 100 insertions(+), 12 deletions(-) create mode 100644 tests/integration/schema/one_many_test.go diff --git a/internal/request/graphql/schema/collection.go b/internal/request/graphql/schema/collection.go index 7c4e44593b..c4180be0f4 100644 --- a/internal/request/graphql/schema/collection.go +++ b/internal/request/graphql/schema/collection.go @@ -676,16 +676,19 @@ func finalizeRelations( } if !otherColFieldDescription.HasValue() || otherColFieldDescription.Value().Kind.Value().IsArray() { - // Relations only defined on one side of the object are possible, and so if this is one of them - // or if the other side is an array, we need to add the field to the schema (is primary side). - definition.Schema.Fields = append( - definition.Schema.Fields, - client.SchemaFieldDescription{ - Name: field.Name, - Kind: field.Kind.Value(), - Typ: cTypeByFieldNameByObjName[definition.Schema.Name][field.Name], - }, - ) + if _, exists := definition.Schema.GetFieldByName(field.Name); !exists { + // Relations only defined on one side of the object are possible, and so if this is one of them + // or if the other side is an array, we need to add the field to the schema (is primary side) + // if the field has not been explicitly declared by the user. + definition.Schema.Fields = append( + definition.Schema.Fields, + client.SchemaFieldDescription{ + Name: field.Name, + Kind: field.Kind.Value(), + Typ: cTypeByFieldNameByObjName[definition.Schema.Name][field.Name], + }, + ) + } } otherIsEmbedded := len(otherColDefinition.Value().Description.Fields) == 0 diff --git a/tests/integration/results.go b/tests/integration/results.go index 61561a48e1..755608394d 100644 --- a/tests/integration/results.go +++ b/tests/integration/results.go @@ -211,13 +211,13 @@ func assertCollectionDescriptions( if expected.Indexes != nil || len(actual.Indexes) != 0 { // Dont bother asserting this if the expected is nil and the actual is nil/empty. - // This is to say each test action from having to bother declaring an empty slice (if there are no indexes) + // This is to save each test action from having to bother declaring an empty slice (if there are no indexes) require.Equal(s.t, expected.Indexes, actual.Indexes) } if expected.Sources != nil || len(actual.Sources) != 0 { // Dont bother asserting this if the expected is nil and the actual is nil/empty. - // This is to say each test action from having to bother declaring an empty slice (if there are no sources) + // This is to save each test action from having to bother declaring an empty slice (if there are no sources) require.Equal(s.t, expected.Sources, actual.Sources) } diff --git a/tests/integration/schema/one_many_test.go b/tests/integration/schema/one_many_test.go new file mode 100644 index 0000000000..bab84b3d40 --- /dev/null +++ b/tests/integration/schema/one_many_test.go @@ -0,0 +1,85 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package schema + +import ( + "testing" + + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/client" + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestSchemaOneMany_Primary(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + dogs: [Dog] + } + type Dog { + name: String + owner: User @primary + } + `, + ExpectedResults: []client.CollectionDescription{ + { + Name: immutable.Some("User"), + Fields: []client.CollectionFieldDescription{ + { + Name: "_docID", + }, + { + Name: "dogs", + ID: 1, + Kind: immutable.Some[client.FieldKind](client.ObjectArrayKind("Dog")), + RelationName: immutable.Some("dog_user"), + }, + { + Name: "name", + ID: 2, + }, + }, + }, + { + Name: immutable.Some("Dog"), + Fields: []client.CollectionFieldDescription{ + { + Name: "_docID", + }, + { + Name: "name", + ID: 1, + }, + { + Name: "owner", + ID: 2, + Kind: immutable.Some[client.FieldKind](client.ObjectKind("User")), + RelationName: immutable.Some("dog_user"), + }, + { + Name: "owner_id", + ID: 3, + Kind: immutable.Some[client.FieldKind](client.ScalarKind(client.FieldKind_DocID)), + RelationName: immutable.Some("dog_user"), + }, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} From 87a20abddae7f513286bc9cea3a810b382d85677 Mon Sep 17 00:00:00 2001 From: Fred Carle Date: Fri, 28 Jun 2024 12:32:15 -0400 Subject: [PATCH 78/78] Release v0.12.0 --- CHANGELOG copy.md | 1170 +++++++++++++++++++++++++++++++++++++++++++++ CHANGELOG.md | 86 ++++ licenses/BSL.txt | 2 +- 3 files changed, 1257 insertions(+), 1 deletion(-) create mode 100644 CHANGELOG copy.md diff --git a/CHANGELOG copy.md b/CHANGELOG copy.md new file mode 100644 index 0000000000..7345a58cc8 --- /dev/null +++ b/CHANGELOG copy.md @@ -0,0 +1,1170 @@ + + +## [v0.11.0](https://github.com/sourcenetwork/defradb/compare/v0.10.0...v0.11.0) + +> 2024-05-03 + +DefraDB v0.11 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + +To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.10.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.gg/w7jYQVJ/. + +### Features + +* Update corelog to 0.0.7 ([#2547](https://github.com/sourcenetwork/defradb/issues/2547)) +* Move relation field properties onto collection ([#2529](https://github.com/sourcenetwork/defradb/issues/2529)) +* Lens runtime config ([#2497](https://github.com/sourcenetwork/defradb/issues/2497)) +* Add P Counter CRDT ([#2482](https://github.com/sourcenetwork/defradb/issues/2482)) +* Add Access Control Policy ([#2338](https://github.com/sourcenetwork/defradb/issues/2338)) +* Force explicit primary decl. in SDL for one-ones ([#2462](https://github.com/sourcenetwork/defradb/issues/2462)) +* Allow mutation of col sources via PatchCollection ([#2424](https://github.com/sourcenetwork/defradb/issues/2424)) +* Add Defra-Lens support for branching schema ([#2421](https://github.com/sourcenetwork/defradb/issues/2421)) +* Add PatchCollection ([#2402](https://github.com/sourcenetwork/defradb/issues/2402)) + +### Fixes + +* Return correct results from one-many indexed filter ([#2579](https://github.com/sourcenetwork/defradb/issues/2579)) +* Handle compound filters on related indexed fields ([#2575](https://github.com/sourcenetwork/defradb/issues/2575)) +* Add check to filter result for logical ops ([#2573](https://github.com/sourcenetwork/defradb/issues/2573)) +* Make all array kinds nillable ([#2534](https://github.com/sourcenetwork/defradb/issues/2534)) +* Allow update when updating non-indexed field ([#2511](https://github.com/sourcenetwork/defradb/issues/2511)) + +### Documentation + +* Add data definition document ([#2544](https://github.com/sourcenetwork/defradb/issues/2544)) + +### Refactoring + +* Merge collection UpdateWith and DeleteWith ([#2531](https://github.com/sourcenetwork/defradb/issues/2531)) +* DB transactions context ([#2513](https://github.com/sourcenetwork/defradb/issues/2513)) +* Add NormalValue ([#2404](https://github.com/sourcenetwork/defradb/issues/2404)) +* Clean up client/request package ([#2443](https://github.com/sourcenetwork/defradb/issues/2443)) +* Rewrite convertImmutable ([#2445](https://github.com/sourcenetwork/defradb/issues/2445)) +* Unify Field Kind and Schema properties ([#2414](https://github.com/sourcenetwork/defradb/issues/2414)) +* Replace logging package with corelog ([#2406](https://github.com/sourcenetwork/defradb/issues/2406)) + +### Testing + +* Add flag to skip network tests ([#2495](https://github.com/sourcenetwork/defradb/issues/2495)) + +### Bot + +* Update dependencies (bulk dependabot PRs) 30-04-2024 ([#2570](https://github.com/sourcenetwork/defradb/issues/2570)) +* Bump [@typescript](https://github.com/typescript)-eslint/parser from 7.7.0 to 7.7.1 in /playground ([#2550](https://github.com/sourcenetwork/defradb/issues/2550)) +* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 7.7.0 to 7.7.1 in /playground ([#2551](https://github.com/sourcenetwork/defradb/issues/2551)) +* Bump swagger-ui-react from 5.16.2 to 5.17.0 in /playground ([#2549](https://github.com/sourcenetwork/defradb/issues/2549)) +* Update dependencies (bulk dependabot PRs) 23-04-2023 ([#2548](https://github.com/sourcenetwork/defradb/issues/2548)) +* Bump go.opentelemetry.io/otel/sdk/metric from 1.24.0 to 1.25.0 ([#2499](https://github.com/sourcenetwork/defradb/issues/2499)) +* Bump typescript from 5.4.3 to 5.4.5 in /playground ([#2515](https://github.com/sourcenetwork/defradb/issues/2515)) +* Bump swagger-ui-react from 5.14.0 to 5.15.0 in /playground ([#2514](https://github.com/sourcenetwork/defradb/issues/2514)) +* Update dependencies (bulk dependabot PRs) 2024-04-09 ([#2509](https://github.com/sourcenetwork/defradb/issues/2509)) +* Update dependencies (bulk dependabot PRs) 2024-04-03 ([#2492](https://github.com/sourcenetwork/defradb/issues/2492)) +* Update dependencies (bulk dependabot PRs) 03-04-2024 ([#2486](https://github.com/sourcenetwork/defradb/issues/2486)) +* Bump github.com/multiformats/go-multiaddr from 0.12.2 to 0.12.3 ([#2480](https://github.com/sourcenetwork/defradb/issues/2480)) +* Bump [@types](https://github.com/types)/react from 18.2.66 to 18.2.67 in /playground ([#2427](https://github.com/sourcenetwork/defradb/issues/2427)) +* Bump [@typescript](https://github.com/typescript)-eslint/parser from 7.2.0 to 7.3.1 in /playground ([#2428](https://github.com/sourcenetwork/defradb/issues/2428)) +* Update dependencies (bulk dependabot PRs) 19-03-2024 ([#2426](https://github.com/sourcenetwork/defradb/issues/2426)) +* Update dependencies (bulk dependabot PRs) 03-11-2024 ([#2399](https://github.com/sourcenetwork/defradb/issues/2399)) + + +## [v0.10.0](https://github.com/sourcenetwork/defradb/compare/v0.9.0...v0.10.0) + +> 2024-03-08 + +DefraDB v0.10 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + +To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.9.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.gg/w7jYQVJ/. + +### Features + +* Add case insensitive `like` operator ([#2368](https://github.com/sourcenetwork/defradb/issues/2368)) +* Reverted order for indexed fields ([#2335](https://github.com/sourcenetwork/defradb/issues/2335)) +* Rework GetCollection/SchemaByFoo funcs into single ([#2319](https://github.com/sourcenetwork/defradb/issues/2319)) +* Add support for views with Lens transforms ([#2311](https://github.com/sourcenetwork/defradb/issues/2311)) +* Model Col. SchemaVersions and migrations on Cols ([#2286](https://github.com/sourcenetwork/defradb/issues/2286)) +* Replace FieldDescription.RelationType with IsPrimary ([#2288](https://github.com/sourcenetwork/defradb/issues/2288)) +* Multiple docs with nil value on unique-indexed field ([#2276](https://github.com/sourcenetwork/defradb/issues/2276)) +* Allow setting null values on doc fields ([#2273](https://github.com/sourcenetwork/defradb/issues/2273)) +* Add JSON scalar ([#2254](https://github.com/sourcenetwork/defradb/issues/2254)) +* Generate OpenAPI command ([#2235](https://github.com/sourcenetwork/defradb/issues/2235)) +* Add composite indexes ([#2226](https://github.com/sourcenetwork/defradb/issues/2226)) + +### Fixes + +* Add `latest` image tag for ghcr ([#2340](https://github.com/sourcenetwork/defradb/issues/2340)) +* Move field id off of schema ([#2336](https://github.com/sourcenetwork/defradb/issues/2336)) +* Make returned collections respect explicit transactions ([#2385](https://github.com/sourcenetwork/defradb/issues/2385)) +* Update GetCollections behaviour ([#2378](https://github.com/sourcenetwork/defradb/issues/2378)) +* Add missing directive definitions ([#2369](https://github.com/sourcenetwork/defradb/issues/2369)) +* Add validation to JSON fields ([#2375](https://github.com/sourcenetwork/defradb/issues/2375)) +* Make peers sync secondary index ([#2390](https://github.com/sourcenetwork/defradb/issues/2390)) +* Load root dir before loading config ([#2266](https://github.com/sourcenetwork/defradb/issues/2266)) +* Mark docs as deleted when querying in delete mut ([#2298](https://github.com/sourcenetwork/defradb/issues/2298)) +* Add missing logs at startup ([#2391](https://github.com/sourcenetwork/defradb/issues/2391)) +* Add missing delta payload ([#2306](https://github.com/sourcenetwork/defradb/issues/2306)) +* Fix compound relational filters in aggregates ([#2297](https://github.com/sourcenetwork/defradb/issues/2297)) + +### Refactoring + +* Generate field ids using a sequence ([#2339](https://github.com/sourcenetwork/defradb/issues/2339)) +* Make config internal to CLI ([#2310](https://github.com/sourcenetwork/defradb/issues/2310)) +* Node config ([#2296](https://github.com/sourcenetwork/defradb/issues/2296)) +* HTTP config ([#2278](https://github.com/sourcenetwork/defradb/issues/2278)) +* Remove unused Delete field from client.Document ([#2275](https://github.com/sourcenetwork/defradb/issues/2275)) +* Decouple net config ([#2258](https://github.com/sourcenetwork/defradb/issues/2258)) +* Make CollectionDescription.Name Option ([#2223](https://github.com/sourcenetwork/defradb/issues/2223)) + +### Chore + +* Bump to GoLang v1.21 ([#2195](https://github.com/sourcenetwork/defradb/issues/2195)) + +### Bot + +* Update dependencies (bulk dependabot PRs) 05-02-2024 ([#2372](https://github.com/sourcenetwork/defradb/issues/2372)) +* Update dependencies (bulk dependabot PRs) 02-27-2024 ([#2353](https://github.com/sourcenetwork/defradb/issues/2353)) +* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.21.0 to 7.0.1 in /playground ([#2331](https://github.com/sourcenetwork/defradb/issues/2331)) +* Bump google.golang.org/grpc from 1.61.0 to 1.61.1 ([#2320](https://github.com/sourcenetwork/defradb/issues/2320)) +* Update dependencies (bulk dependabot PRs) 2024-02-19 ([#2330](https://github.com/sourcenetwork/defradb/issues/2330)) +* Bump vite from 5.1.1 to 5.1.2 in /playground ([#2317](https://github.com/sourcenetwork/defradb/issues/2317)) +* Bump golang.org/x/net from 0.20.0 to 0.21.0 ([#2301](https://github.com/sourcenetwork/defradb/issues/2301)) +* Update dependencies (bulk dependabot PRs) 2023-02-14 ([#2313](https://github.com/sourcenetwork/defradb/issues/2313)) +* Update dependencies (bulk dependabot PRs) 02-07-2024 ([#2294](https://github.com/sourcenetwork/defradb/issues/2294)) +* Update dependencies (bulk dependabot PRs) 30-01-2024 ([#2270](https://github.com/sourcenetwork/defradb/issues/2270)) +* Update dependencies (bulk dependabot PRs) 23-01-2024 ([#2252](https://github.com/sourcenetwork/defradb/issues/2252)) +* Bump vite from 5.0.11 to 5.0.12 in /playground ([#2236](https://github.com/sourcenetwork/defradb/issues/2236)) +* Bump github.com/evanphx/json-patch/v5 from 5.7.0 to 5.8.1 ([#2233](https://github.com/sourcenetwork/defradb/issues/2233)) + + +## [v0.9.0](https://github.com/sourcenetwork/defradb/compare/v0.8.0...v0.9.0) + +> 2024-01-18 + +DefraDB v0.9 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + +To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.8.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.gg/w7jYQVJ/. + +### Features + +* Mutation typed input ([#2167](https://github.com/sourcenetwork/defradb/issues/2167)) +* Add PN Counter CRDT type ([#2119](https://github.com/sourcenetwork/defradb/issues/2119)) +* Allow users to add Views ([#2114](https://github.com/sourcenetwork/defradb/issues/2114)) +* Add unique secondary index ([#2131](https://github.com/sourcenetwork/defradb/issues/2131)) +* New cmd for docs auto generation ([#2096](https://github.com/sourcenetwork/defradb/issues/2096)) +* Add blob scalar type ([#2091](https://github.com/sourcenetwork/defradb/issues/2091)) + +### Fixes + +* Add entropy to counter CRDT type updates ([#2186](https://github.com/sourcenetwork/defradb/issues/2186)) +* Handle multiple nil values on unique indexed fields ([#2178](https://github.com/sourcenetwork/defradb/issues/2178)) +* Filtering on unique index if there is no match ([#2177](https://github.com/sourcenetwork/defradb/issues/2177)) + +### Performance + +* Switch LensVM to wasmtime runtime ([#2030](https://github.com/sourcenetwork/defradb/issues/2030)) + +### Refactoring + +* Add strong typing to document creation ([#2161](https://github.com/sourcenetwork/defradb/issues/2161)) +* Rename key,id,dockey to docID terminology ([#1749](https://github.com/sourcenetwork/defradb/issues/1749)) +* Simplify Merkle CRDT workflow ([#2111](https://github.com/sourcenetwork/defradb/issues/2111)) + +### Testing + +* Add auto-doc generation ([#2051](https://github.com/sourcenetwork/defradb/issues/2051)) + +### Continuous integration + +* Add windows test runner ([#2033](https://github.com/sourcenetwork/defradb/issues/2033)) + +### Chore + +* Update Lens to v0.5 ([#2083](https://github.com/sourcenetwork/defradb/issues/2083)) + +### Bot + +* Bump [@types](https://github.com/types)/react from 18.2.47 to 18.2.48 in /playground ([#2213](https://github.com/sourcenetwork/defradb/issues/2213)) +* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.18.0 to 6.18.1 in /playground ([#2215](https://github.com/sourcenetwork/defradb/issues/2215)) +* Update dependencies (bulk dependabot PRs) 15-01-2024 ([#2217](https://github.com/sourcenetwork/defradb/issues/2217)) +* Bump follow-redirects from 1.15.3 to 1.15.4 in /playground ([#2181](https://github.com/sourcenetwork/defradb/issues/2181)) +* Bump github.com/getkin/kin-openapi from 0.120.0 to 0.122.0 ([#2097](https://github.com/sourcenetwork/defradb/issues/2097)) +* Update dependencies (bulk dependabot PRs) 08-01-2024 ([#2173](https://github.com/sourcenetwork/defradb/issues/2173)) +* Bump github.com/bits-and-blooms/bitset from 1.12.0 to 1.13.0 ([#2160](https://github.com/sourcenetwork/defradb/issues/2160)) +* Bump [@types](https://github.com/types)/react from 18.2.45 to 18.2.46 in /playground ([#2159](https://github.com/sourcenetwork/defradb/issues/2159)) +* Bump [@typescript](https://github.com/typescript)-eslint/parser from 6.15.0 to 6.16.0 in /playground ([#2156](https://github.com/sourcenetwork/defradb/issues/2156)) +* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.15.0 to 6.16.0 in /playground ([#2155](https://github.com/sourcenetwork/defradb/issues/2155)) +* Update dependencies (bulk dependabot PRs) 27-12-2023 ([#2154](https://github.com/sourcenetwork/defradb/issues/2154)) +* Bump github.com/spf13/viper from 1.17.0 to 1.18.2 ([#2145](https://github.com/sourcenetwork/defradb/issues/2145)) +* Bump golang.org/x/crypto from 0.16.0 to 0.17.0 ([#2144](https://github.com/sourcenetwork/defradb/issues/2144)) +* Update dependencies (bulk dependabot PRs) 18-12-2023 ([#2142](https://github.com/sourcenetwork/defradb/issues/2142)) +* Bump [@typescript](https://github.com/typescript)-eslint/parser from 6.13.2 to 6.14.0 in /playground ([#2136](https://github.com/sourcenetwork/defradb/issues/2136)) +* Bump [@types](https://github.com/types)/react from 18.2.43 to 18.2.45 in /playground ([#2134](https://github.com/sourcenetwork/defradb/issues/2134)) +* Bump vite from 5.0.7 to 5.0.10 in /playground ([#2135](https://github.com/sourcenetwork/defradb/issues/2135)) +* Update dependencies (bulk dependabot PRs) 04-12-2023 ([#2133](https://github.com/sourcenetwork/defradb/issues/2133)) +* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.13.1 to 6.13.2 in /playground ([#2109](https://github.com/sourcenetwork/defradb/issues/2109)) +* Bump vite from 5.0.2 to 5.0.5 in /playground ([#2112](https://github.com/sourcenetwork/defradb/issues/2112)) +* Bump [@types](https://github.com/types)/react from 18.2.41 to 18.2.42 in /playground ([#2108](https://github.com/sourcenetwork/defradb/issues/2108)) +* Update dependencies (bulk dependabot PRs) 04-12-2023 ([#2107](https://github.com/sourcenetwork/defradb/issues/2107)) +* Bump [@types](https://github.com/types)/react from 18.2.38 to 18.2.39 in /playground ([#2086](https://github.com/sourcenetwork/defradb/issues/2086)) +* Bump [@typescript](https://github.com/typescript)-eslint/parser from 6.12.0 to 6.13.0 in /playground ([#2085](https://github.com/sourcenetwork/defradb/issues/2085)) +* Update dependencies (bulk dependabot PRs) 27-11-2023 ([#2081](https://github.com/sourcenetwork/defradb/issues/2081)) +* Bump swagger-ui-react from 5.10.0 to 5.10.3 in /playground ([#2067](https://github.com/sourcenetwork/defradb/issues/2067)) +* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.11.0 to 6.12.0 in /playground ([#2068](https://github.com/sourcenetwork/defradb/issues/2068)) +* Update dependencies (bulk dependabot PRs) 20-11-2023 ([#2066](https://github.com/sourcenetwork/defradb/issues/2066)) + + +## [v0.8.0](https://github.com/sourcenetwork/defradb/compare/v0.7.0...v0.8.0) + +> 2023-11-14 + +DefraDB v0.8 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + +To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.7.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.gg/w7jYQVJ/. + +### Features + +* Add means to fetch schema ([#2006](https://github.com/sourcenetwork/defradb/issues/2006)) +* Rename Schema.SchemaID to Schema.Root ([#2005](https://github.com/sourcenetwork/defradb/issues/2005)) +* Enable playground in Docker build ([#1986](https://github.com/sourcenetwork/defradb/issues/1986)) +* Change GetCollectionBySchemaFoo funcs to return many ([#1984](https://github.com/sourcenetwork/defradb/issues/1984)) +* Add Swagger UI to playground ([#1979](https://github.com/sourcenetwork/defradb/issues/1979)) +* Add OpenAPI route ([#1960](https://github.com/sourcenetwork/defradb/issues/1960)) +* Remove CollectionDescription.Schema ([#1965](https://github.com/sourcenetwork/defradb/issues/1965)) +* Remove collection from patch schema ([#1957](https://github.com/sourcenetwork/defradb/issues/1957)) +* Make queries utilise secondary indexes ([#1925](https://github.com/sourcenetwork/defradb/issues/1925)) +* Allow setting of default schema version ([#1888](https://github.com/sourcenetwork/defradb/issues/1888)) +* Add CCIP Support ([#1896](https://github.com/sourcenetwork/defradb/issues/1896)) + +### Fixes + +* Fix test module relying on closed memory leak ([#2037](https://github.com/sourcenetwork/defradb/issues/2037)) +* Make return type for FieldKind_INT an int64 ([#1982](https://github.com/sourcenetwork/defradb/issues/1982)) +* Node private key requires data directory ([#1938](https://github.com/sourcenetwork/defradb/issues/1938)) +* Remove collection name from schema ID generation ([#1920](https://github.com/sourcenetwork/defradb/issues/1920)) +* Infinite loop when updating one-one relation ([#1915](https://github.com/sourcenetwork/defradb/issues/1915)) + +### Refactoring + +* CRDT merge direction ([#2016](https://github.com/sourcenetwork/defradb/issues/2016)) +* Reorganise collection description storage ([#1988](https://github.com/sourcenetwork/defradb/issues/1988)) +* Add peerstore to multistore ([#1980](https://github.com/sourcenetwork/defradb/issues/1980)) +* P2P client interface ([#1924](https://github.com/sourcenetwork/defradb/issues/1924)) +* Deprecate CollectionDescription.Schema ([#1939](https://github.com/sourcenetwork/defradb/issues/1939)) +* Remove net GRPC API ([#1927](https://github.com/sourcenetwork/defradb/issues/1927)) +* CLI client interface ([#1839](https://github.com/sourcenetwork/defradb/issues/1839)) + +### Continuous integration + +* Add goreleaser workflow ([#2040](https://github.com/sourcenetwork/defradb/issues/2040)) +* Add mac test runner ([#2035](https://github.com/sourcenetwork/defradb/issues/2035)) +* Parallelize change detector ([#1871](https://github.com/sourcenetwork/defradb/issues/1871)) + +### Chore + +* Update dependencies ([#2044](https://github.com/sourcenetwork/defradb/issues/2044)) + +### Bot + +* Bump [@typescript](https://github.com/typescript)-eslint/parser from 6.10.0 to 6.11.0 in /playground ([#2053](https://github.com/sourcenetwork/defradb/issues/2053)) +* Update dependencies (bulk dependabot PRs) 13-11-2023 ([#2052](https://github.com/sourcenetwork/defradb/issues/2052)) +* Bump axios from 1.5.1 to 1.6.1 in /playground ([#2041](https://github.com/sourcenetwork/defradb/issues/2041)) +* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.9.1 to 6.10.0 in /playground ([#2042](https://github.com/sourcenetwork/defradb/issues/2042)) +* Bump [@vitejs](https://github.com/vitejs)/plugin-react-swc from 3.4.0 to 3.4.1 in /playground ([#2022](https://github.com/sourcenetwork/defradb/issues/2022)) +* Update dependencies (bulk dependabot PRs) 08-11-2023 ([#2038](https://github.com/sourcenetwork/defradb/issues/2038)) +* Update dependencies (bulk dependabot PRs) 30-10-2023 ([#2015](https://github.com/sourcenetwork/defradb/issues/2015)) +* Bump eslint-plugin and parser from 6.8.0 to 6.9.0 in /playground ([#2000](https://github.com/sourcenetwork/defradb/issues/2000)) +* Update dependencies (bulk dependabot PRs) 16-10-2023 ([#1998](https://github.com/sourcenetwork/defradb/issues/1998)) +* Update dependencies (bulk dependabot PRs) 16-10-2023 ([#1976](https://github.com/sourcenetwork/defradb/issues/1976)) +* Bump golang.org/x/net from 0.16.0 to 0.17.0 ([#1961](https://github.com/sourcenetwork/defradb/issues/1961)) +* Bump [@types](https://github.com/types)/react-dom from 18.2.11 to 18.2.12 in /playground ([#1952](https://github.com/sourcenetwork/defradb/issues/1952)) +* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.7.4 to 6.7.5 in /playground ([#1953](https://github.com/sourcenetwork/defradb/issues/1953)) +* Bump combined dependencies 09-10-2023 ([#1951](https://github.com/sourcenetwork/defradb/issues/1951)) +* Bump [@types](https://github.com/types)/react from 18.2.24 to 18.2.25 in /playground ([#1932](https://github.com/sourcenetwork/defradb/issues/1932)) +* Bump [@typescript](https://github.com/typescript)-eslint/parser from 6.7.3 to 6.7.4 in /playground ([#1933](https://github.com/sourcenetwork/defradb/issues/1933)) +* Bump [@vitejs](https://github.com/vitejs)/plugin-react-swc from 3.3.2 to 3.4.0 in /playground ([#1904](https://github.com/sourcenetwork/defradb/issues/1904)) +* Bump combined dependencies 19-09-2023 ([#1931](https://github.com/sourcenetwork/defradb/issues/1931)) +* Bump graphql from 16.8.0 to 16.8.1 in /playground ([#1901](https://github.com/sourcenetwork/defradb/issues/1901)) +* Update combined dependabot PRs 19-09-2023 ([#1898](https://github.com/sourcenetwork/defradb/issues/1898)) + + +## [v0.7.0](https://github.com/sourcenetwork/defradb/compare/v0.6.0...v0.7.0) + +> 2023-09-18 + +DefraDB v0.7 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + +This release has focused on robustness, testing, and schema management. Some highlight new features include notable expansions to the expressiveness of schema migrations. + +To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.6.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.gg/w7jYQVJ/. + +### Features + +* Allow field indexing by name in PatchSchema ([#1810](https://github.com/sourcenetwork/defradb/issues/1810)) +* Auto-create relation id fields via PatchSchema ([#1807](https://github.com/sourcenetwork/defradb/issues/1807)) +* Support PatchSchema relational field kind substitution ([#1777](https://github.com/sourcenetwork/defradb/issues/1777)) +* Add support for adding of relational fields ([#1766](https://github.com/sourcenetwork/defradb/issues/1766)) +* Enable downgrading of documents via Lens inverses ([#1721](https://github.com/sourcenetwork/defradb/issues/1721)) + +### Fixes + +* Correctly handle serialisation of nil field values ([#1872](https://github.com/sourcenetwork/defradb/issues/1872)) +* Compound filter operators with relations ([#1855](https://github.com/sourcenetwork/defradb/issues/1855)) +* Only update updated fields via update requests ([#1817](https://github.com/sourcenetwork/defradb/issues/1817)) +* Error when saving a deleted document ([#1806](https://github.com/sourcenetwork/defradb/issues/1806)) +* Prevent multiple docs from being linked in one one ([#1790](https://github.com/sourcenetwork/defradb/issues/1790)) +* Handle the querying of secondary relation id fields ([#1768](https://github.com/sourcenetwork/defradb/issues/1768)) +* Improve the way migrations handle transactions ([#1737](https://github.com/sourcenetwork/defradb/issues/1737)) + +### Tooling + +* Add Akash deployment configuration ([#1736](https://github.com/sourcenetwork/defradb/issues/1736)) + +### Refactoring + +* HTTP client interface ([#1776](https://github.com/sourcenetwork/defradb/issues/1776)) +* Simplify fetcher interface ([#1746](https://github.com/sourcenetwork/defradb/issues/1746)) + +### Testing + +* Convert and move out of place explain tests ([#1878](https://github.com/sourcenetwork/defradb/issues/1878)) +* Update mutation tests to make use of mutation system ([#1853](https://github.com/sourcenetwork/defradb/issues/1853)) +* Test top level agg. with compound relational filter ([#1870](https://github.com/sourcenetwork/defradb/issues/1870)) +* Skip unsupported mutation types at test level ([#1850](https://github.com/sourcenetwork/defradb/issues/1850)) +* Extend mutation tests with col.Update and Create ([#1838](https://github.com/sourcenetwork/defradb/issues/1838)) +* Add tests for multiple one-one joins ([#1793](https://github.com/sourcenetwork/defradb/issues/1793)) + +### Chore + +* Update Badger version to v4 ([#1740](https://github.com/sourcenetwork/defradb/issues/1740)) +* Update go-libp2p to 0.29.2 ([#1780](https://github.com/sourcenetwork/defradb/issues/1780)) +* Bump golangci-lint to v1.54 ([#1881](https://github.com/sourcenetwork/defradb/issues/1881)) +* Bump go.opentelemetry.io/otel/metric from 1.17.0 to 1.18.0 ([#1890](https://github.com/sourcenetwork/defradb/issues/1890)) +* Bump [@tanstack](https://github.com/tanstack)/react-query from 4.35.0 to 4.35.3 in /playground ([#1876](https://github.com/sourcenetwork/defradb/issues/1876)) +* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.5.0 to 6.7.0 in /playground ([#1874](https://github.com/sourcenetwork/defradb/issues/1874)) +* Bump [@typescript](https://github.com/typescript)-eslint/parser from 6.6.0 to 6.7.0 in /playground ([#1875](https://github.com/sourcenetwork/defradb/issues/1875)) +* Combined PRs 2023-09-14 ([#1873](https://github.com/sourcenetwork/defradb/issues/1873)) +* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.4.0 to 6.5.0 in /playground ([#1827](https://github.com/sourcenetwork/defradb/issues/1827)) +* Bump go.opentelemetry.io/otel/sdk/metric from 0.39.0 to 0.40.0 ([#1829](https://github.com/sourcenetwork/defradb/issues/1829)) +* Bump github.com/ipfs/go-block-format from 0.1.2 to 0.2.0 ([#1819](https://github.com/sourcenetwork/defradb/issues/1819)) +* Combined PRs ([#1826](https://github.com/sourcenetwork/defradb/issues/1826)) +* Bump [@typescript](https://github.com/typescript)-eslint/parser from 6.4.0 to 6.4.1 in /playground ([#1804](https://github.com/sourcenetwork/defradb/issues/1804)) +* Combined PRs ([#1803](https://github.com/sourcenetwork/defradb/issues/1803)) +* Combined PRs ([#1791](https://github.com/sourcenetwork/defradb/issues/1791)) +* Combined PRs ([#1778](https://github.com/sourcenetwork/defradb/issues/1778)) +* Bump dependencies ([#1761](https://github.com/sourcenetwork/defradb/issues/1761)) +* Bump vite from 4.3.9 to 4.4.8 in /playground ([#1748](https://github.com/sourcenetwork/defradb/issues/1748)) +* Bump graphiql from 3.0.4 to 3.0.5 in /playground ([#1730](https://github.com/sourcenetwork/defradb/issues/1730)) +* Combined bumps of dependencies under /playground ([#1744](https://github.com/sourcenetwork/defradb/issues/1744)) +* Bump github.com/ipfs/boxo from 0.10.2 to 0.11.0 ([#1726](https://github.com/sourcenetwork/defradb/issues/1726)) +* Bump github.com/libp2p/go-libp2p-kad-dht from 0.24.2 to 0.24.3 ([#1724](https://github.com/sourcenetwork/defradb/issues/1724)) +* Bump google.golang.org/grpc from 1.56.2 to 1.57.0 ([#1725](https://github.com/sourcenetwork/defradb/issues/1725)) + + +## [v0.6.0](https://github.com/sourcenetwork/defradb/compare/v0.5.1...v0.6.0) + +> 2023-07-31 + +DefraDB v0.6 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + +There are several new and powerful features, important bug fixes, and notable refactors in this release. Some highlight features include: The initial release of our LensVM based schema migration engine powered by WebAssembly ([#1650](https://github.com/sourcenetwork/defradb/issues/1650)), newly embedded DefraDB Playround which includes a bundled GraphQL client and schema manager, and last but not least a relation field (_id) alias to improve the developer experience ([#1609](https://github.com/sourcenetwork/defradb/issues/1609)). + +To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.5.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.gg/w7jYQVJ/. + +### Features + +* Add `_not` operator ([#1631](https://github.com/sourcenetwork/defradb/issues/1631)) +* Schema list API ([#1625](https://github.com/sourcenetwork/defradb/issues/1625)) +* Add simple data import and export ([#1630](https://github.com/sourcenetwork/defradb/issues/1630)) +* Playground ([#1575](https://github.com/sourcenetwork/defradb/issues/1575)) +* Add schema migration get and set cmds to CLI ([#1650](https://github.com/sourcenetwork/defradb/issues/1650)) +* Allow relation alias on create and update ([#1609](https://github.com/sourcenetwork/defradb/issues/1609)) +* Make fetcher calculate docFetches and fieldFetches ([#1713](https://github.com/sourcenetwork/defradb/issues/1713)) +* Add lens migration engine to defra ([#1564](https://github.com/sourcenetwork/defradb/issues/1564)) +* Add `_keys` attribute to `selectNode` simple explain ([#1546](https://github.com/sourcenetwork/defradb/issues/1546)) +* CLI commands for secondary indexes ([#1595](https://github.com/sourcenetwork/defradb/issues/1595)) +* Add alias to `groupBy` related object ([#1579](https://github.com/sourcenetwork/defradb/issues/1579)) +* Non-unique secondary index (no querying) ([#1450](https://github.com/sourcenetwork/defradb/issues/1450)) +* Add ability to explain-debug all nodes ([#1563](https://github.com/sourcenetwork/defradb/issues/1563)) +* Include dockey in doc exists err ([#1558](https://github.com/sourcenetwork/defradb/issues/1558)) + +### Fixes + +* Better wait in CLI integration test ([#1415](https://github.com/sourcenetwork/defradb/issues/1415)) +* Return error when relation is not defined on both types ([#1647](https://github.com/sourcenetwork/defradb/issues/1647)) +* Change `core.DocumentMapping` to pointer ([#1528](https://github.com/sourcenetwork/defradb/issues/1528)) +* Fix invalid (badger) datastore state ([#1685](https://github.com/sourcenetwork/defradb/issues/1685)) +* Discard index and subscription implicit transactions ([#1715](https://github.com/sourcenetwork/defradb/issues/1715)) +* Remove duplicated `peers` in peerstore prefix ([#1678](https://github.com/sourcenetwork/defradb/issues/1678)) +* Return errors from typeJoinOne ([#1716](https://github.com/sourcenetwork/defradb/issues/1716)) +* Document change detector breaking change ([#1531](https://github.com/sourcenetwork/defradb/issues/1531)) +* Standardise `schema migration` CLI errors ([#1682](https://github.com/sourcenetwork/defradb/issues/1682)) +* Introspection OrderArg returns null inputFields ([#1633](https://github.com/sourcenetwork/defradb/issues/1633)) +* Avoid duplicated requestable fields ([#1621](https://github.com/sourcenetwork/defradb/issues/1621)) +* Normalize int field kind ([#1619](https://github.com/sourcenetwork/defradb/issues/1619)) +* Change the WriteSyncer to use lock when piping ([#1608](https://github.com/sourcenetwork/defradb/issues/1608)) +* Filter splitting and rendering for related types ([#1541](https://github.com/sourcenetwork/defradb/issues/1541)) + +### Documentation + +* Improve CLI command documentation ([#1505](https://github.com/sourcenetwork/defradb/issues/1505)) + +### Refactoring + +* Schema list output to include schemaVersionID ([#1706](https://github.com/sourcenetwork/defradb/issues/1706)) +* Reuse lens wasm modules ([#1641](https://github.com/sourcenetwork/defradb/issues/1641)) +* Remove redundant txn param from fetcher start ([#1635](https://github.com/sourcenetwork/defradb/issues/1635)) +* Remove first CRDT byte from field encoded values ([#1622](https://github.com/sourcenetwork/defradb/issues/1622)) +* Merge `node` into `net` and improve coverage ([#1593](https://github.com/sourcenetwork/defradb/issues/1593)) +* Fetcher filter and field optimization ([#1500](https://github.com/sourcenetwork/defradb/issues/1500)) + +### Testing + +* Rework transaction test framework capabilities ([#1603](https://github.com/sourcenetwork/defradb/issues/1603)) +* Expand backup integration tests ([#1699](https://github.com/sourcenetwork/defradb/issues/1699)) +* Disable test ([#1675](https://github.com/sourcenetwork/defradb/issues/1675)) +* Add tests for 1-1 group by id ([#1655](https://github.com/sourcenetwork/defradb/issues/1655)) +* Remove CLI tests from make test ([#1643](https://github.com/sourcenetwork/defradb/issues/1643)) +* Bundle test state into single var ([#1645](https://github.com/sourcenetwork/defradb/issues/1645)) +* Convert explain group tests to new explain setup ([#1537](https://github.com/sourcenetwork/defradb/issues/1537)) +* Add tests for foo_id field name clashes ([#1521](https://github.com/sourcenetwork/defradb/issues/1521)) +* Resume wait correctly following test node restart ([#1515](https://github.com/sourcenetwork/defradb/issues/1515)) +* Require no errors when none expected ([#1509](https://github.com/sourcenetwork/defradb/issues/1509)) + +### Continuous integration + +* Add workflows to push, pull, and validate docker images ([#1676](https://github.com/sourcenetwork/defradb/issues/1676)) +* Build mocks using make ([#1612](https://github.com/sourcenetwork/defradb/issues/1612)) +* Fix terraform plan and merge AMI build + deploy workflow ([#1514](https://github.com/sourcenetwork/defradb/issues/1514)) +* Reconfigure CodeCov action to ensure stability ([#1414](https://github.com/sourcenetwork/defradb/issues/1414)) + +### Chore + +* Bump to GoLang v1.20 ([#1689](https://github.com/sourcenetwork/defradb/issues/1689)) +* Update to ipfs boxo 0.10.0 ([#1573](https://github.com/sourcenetwork/defradb/issues/1573)) + + + +## [v0.5.1](https://github.com/sourcenetwork/defradb/compare/v0.5.0...v0.5.1) + +> 2023-05-16 + +### Features + +* Add collection response information on creation ([#1499](https://github.com/sourcenetwork/defradb/issues/1499)) +* CLI client request from file ([#1503](https://github.com/sourcenetwork/defradb/issues/1503)) +* Add commits fieldName and fieldId fields ([#1451](https://github.com/sourcenetwork/defradb/issues/1451)) +* Add allowed origins config ([#1408](https://github.com/sourcenetwork/defradb/issues/1408)) +* Add descriptions to all system defined GQL stuff ([#1387](https://github.com/sourcenetwork/defradb/issues/1387)) +* Strongly type Request.Errors ([#1364](https://github.com/sourcenetwork/defradb/issues/1364)) + +### Fixes + +* Skip new test packages in change detector ([#1495](https://github.com/sourcenetwork/defradb/issues/1495)) +* Make nested joins work correctly from primary direction ([#1491](https://github.com/sourcenetwork/defradb/issues/1491)) +* Add reconnection to known peers ([#1482](https://github.com/sourcenetwork/defradb/issues/1482)) +* Rename commit field input arg to fieldId ([#1460](https://github.com/sourcenetwork/defradb/issues/1460)) +* Reference collectionID in p2p readme ([#1466](https://github.com/sourcenetwork/defradb/issues/1466)) +* Handling SIGTERM in CLI `start` command ([#1459](https://github.com/sourcenetwork/defradb/issues/1459)) +* Update QL documentation link and replicator command ([#1440](https://github.com/sourcenetwork/defradb/issues/1440)) +* Fix typo in readme ([#1419](https://github.com/sourcenetwork/defradb/issues/1419)) +* Limit the size of http request bodies that we handle ([#1405](https://github.com/sourcenetwork/defradb/issues/1405)) +* Improve P2P event handling ([#1388](https://github.com/sourcenetwork/defradb/issues/1388)) +* Serialize DB errors to json in http package ([#1401](https://github.com/sourcenetwork/defradb/issues/1401)) +* Do not commit if errors have been returned ([#1390](https://github.com/sourcenetwork/defradb/issues/1390)) +* Unlock replicator lock before returning error ([#1369](https://github.com/sourcenetwork/defradb/issues/1369)) +* Improve NonNull error message ([#1362](https://github.com/sourcenetwork/defradb/issues/1362)) +* Use ring-buffer for WaitForFoo chans ([#1359](https://github.com/sourcenetwork/defradb/issues/1359)) +* Guarantee event processing order ([#1352](https://github.com/sourcenetwork/defradb/issues/1352)) +* Explain of _group with dockeys filter to be []string ([#1348](https://github.com/sourcenetwork/defradb/issues/1348)) + +### Refactoring + +* Use `int32` for proper gql scalar Int parsing ([#1493](https://github.com/sourcenetwork/defradb/issues/1493)) +* Improve rollback on peer P2P collection error ([#1461](https://github.com/sourcenetwork/defradb/issues/1461)) +* Improve CLI with test suite and builder pattern ([#928](https://github.com/sourcenetwork/defradb/issues/928)) + +### Testing + +* Add DB/Node Restart tests ([#1504](https://github.com/sourcenetwork/defradb/issues/1504)) +* Provide tests for client introspection query ([#1492](https://github.com/sourcenetwork/defradb/issues/1492)) +* Convert explain count tests to new explain setup ([#1488](https://github.com/sourcenetwork/defradb/issues/1488)) +* Convert explain sum tests to new explain setup ([#1489](https://github.com/sourcenetwork/defradb/issues/1489)) +* Convert explain average tests to new explain setup ([#1487](https://github.com/sourcenetwork/defradb/issues/1487)) +* Convert explain top-level tests to new explain setup ([#1480](https://github.com/sourcenetwork/defradb/issues/1480)) +* Convert explain order tests to new explain setup ([#1478](https://github.com/sourcenetwork/defradb/issues/1478)) +* Convert explain join tests to new explain setup ([#1476](https://github.com/sourcenetwork/defradb/issues/1476)) +* Convert explain dagscan tests to new explain setup ([#1474](https://github.com/sourcenetwork/defradb/issues/1474)) +* Add tests to assert schema id order independence ([#1456](https://github.com/sourcenetwork/defradb/issues/1456)) +* Capitalize all integration schema types ([#1445](https://github.com/sourcenetwork/defradb/issues/1445)) +* Convert explain limit tests to new explain setup ([#1446](https://github.com/sourcenetwork/defradb/issues/1446)) +* Improve change detector performance ([#1433](https://github.com/sourcenetwork/defradb/issues/1433)) +* Convert mutation explain tests to new explain setup ([#1416](https://github.com/sourcenetwork/defradb/issues/1416)) +* Convert filter explain tests to new explain setup ([#1380](https://github.com/sourcenetwork/defradb/issues/1380)) +* Retry test doc mutation on transaction conflict ([#1366](https://github.com/sourcenetwork/defradb/issues/1366)) + +### Continuous integration + +* Remove secret ssh key stuff from change detector wf ([#1438](https://github.com/sourcenetwork/defradb/issues/1438)) +* Fix the SSH security issue from AMI scan report ([#1426](https://github.com/sourcenetwork/defradb/issues/1426)) +* Add a separate workflow to run the linter ([#1434](https://github.com/sourcenetwork/defradb/issues/1434)) +* Allow CI to work from forked repo ([#1392](https://github.com/sourcenetwork/defradb/issues/1392)) +* Bump go version within packer for AWS AMI ([#1344](https://github.com/sourcenetwork/defradb/issues/1344)) + +### Chore + +* Enshrine defra logger names ([#1410](https://github.com/sourcenetwork/defradb/issues/1410)) +* Remove some dead code ([#1470](https://github.com/sourcenetwork/defradb/issues/1470)) +* Update graphql-go ([#1422](https://github.com/sourcenetwork/defradb/issues/1422)) +* Improve logging consistency ([#1424](https://github.com/sourcenetwork/defradb/issues/1424)) +* Makefile tests with shorter timeout and common flags ([#1397](https://github.com/sourcenetwork/defradb/issues/1397)) +* Move to gofrs/uuid ([#1396](https://github.com/sourcenetwork/defradb/issues/1396)) +* Move to ipfs boxo ([#1393](https://github.com/sourcenetwork/defradb/issues/1393)) +* Document collection.txn ([#1363](https://github.com/sourcenetwork/defradb/issues/1363)) + +### Bot + +* Bump golang.org/x/crypto from 0.8.0 to 0.9.0 ([#1497](https://github.com/sourcenetwork/defradb/issues/1497)) +* Bump golang.org/x/net from 0.9.0 to 0.10.0 ([#1496](https://github.com/sourcenetwork/defradb/issues/1496)) +* Bump google.golang.org/grpc from 1.54.0 to 1.55.0 ([#1464](https://github.com/sourcenetwork/defradb/issues/1464)) +* Bump github.com/ipfs/boxo from 0.8.0 to 0.8.1 ([#1427](https://github.com/sourcenetwork/defradb/issues/1427)) +* Bump golang.org/x/crypto from 0.7.0 to 0.8.0 ([#1398](https://github.com/sourcenetwork/defradb/issues/1398)) +* Bump github.com/spf13/cobra from 1.6.1 to 1.7.0 ([#1399](https://github.com/sourcenetwork/defradb/issues/1399)) +* Bump github.com/ipfs/go-blockservice from 0.5.0 to 0.5.1 ([#1300](https://github.com/sourcenetwork/defradb/issues/1300)) +* Bump github.com/ipfs/go-cid from 0.4.0 to 0.4.1 ([#1301](https://github.com/sourcenetwork/defradb/issues/1301)) + + +## [v0.5.0](https://github.com/sourcenetwork/defradb/compare/v0.4.0...v0.5.0) + +> 2023-04-12 + +DefraDB v0.5 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + +There many new features in this release, but most importantly, this is the first open source release for DefraDB. As such, this release focused on various quality of life changes and refactors, bug fixes, and overall cleanliness of the repo so it can effectively be used and tested in the public domain. + +To get a full outline of the changes, we invite you to review the official changelog below. Some highlights are the first iteration of our schema update system, allowing developers to add new fields to schemas using our JSON Patch based DDL, a new DAG based delete system which will persist "soft-delete" ops into the CRDT Merkle DAG, and a early prototype for our collection level peer-to-peer synchronization. + +This release does include a Breaking Change to existing v0.4.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.gg/w7jYQVJ/. + +### Features + +* Add document delete mechanics ([#1263](https://github.com/sourcenetwork/defradb/issues/1263)) +* Ability to explain an executed request ([#1188](https://github.com/sourcenetwork/defradb/issues/1188)) +* Add SchemaPatch CLI command ([#1250](https://github.com/sourcenetwork/defradb/issues/1250)) +* Add support for one-one mutation from sec. side ([#1247](https://github.com/sourcenetwork/defradb/issues/1247)) +* Store only key in DAG instead of dockey path ([#1245](https://github.com/sourcenetwork/defradb/issues/1245)) +* Add collectionId field to commit field ([#1235](https://github.com/sourcenetwork/defradb/issues/1235)) +* Add field kind substitution for PatchSchema ([#1223](https://github.com/sourcenetwork/defradb/issues/1223)) +* Add dockey field for commit field ([#1216](https://github.com/sourcenetwork/defradb/issues/1216)) +* Allow new fields to be added locally to schema ([#1139](https://github.com/sourcenetwork/defradb/issues/1139)) +* Add `like` sub-string filter ([#1091](https://github.com/sourcenetwork/defradb/issues/1091)) +* Add ability for P2P to wait for pushlog by peer ([#1098](https://github.com/sourcenetwork/defradb/issues/1098)) +* Add P2P collection topic subscription ([#1086](https://github.com/sourcenetwork/defradb/issues/1086)) +* Add support for schema version id in queries ([#1067](https://github.com/sourcenetwork/defradb/issues/1067)) +* Add schema version id to commit queries ([#1061](https://github.com/sourcenetwork/defradb/issues/1061)) +* Persist schema version at time of commit ([#1055](https://github.com/sourcenetwork/defradb/issues/1055)) +* Add ability to input simple explain type arg ([#1039](https://github.com/sourcenetwork/defradb/issues/1039)) + +### Fixes + +* API address parameter validation ([#1311](https://github.com/sourcenetwork/defradb/issues/1311)) +* Improve error message for NonNull GQL types ([#1333](https://github.com/sourcenetwork/defradb/issues/1333)) +* Handle panics in the rpc server ([#1330](https://github.com/sourcenetwork/defradb/issues/1330)) +* Handle returned error in select.go ([#1329](https://github.com/sourcenetwork/defradb/issues/1329)) +* Resolve handful of CLI issues ([#1318](https://github.com/sourcenetwork/defradb/issues/1318)) +* Only check for events queue on subscription request ([#1326](https://github.com/sourcenetwork/defradb/issues/1326)) +* Remove client Create/UpdateCollection ([#1309](https://github.com/sourcenetwork/defradb/issues/1309)) +* CLI to display specific command usage help ([#1314](https://github.com/sourcenetwork/defradb/issues/1314)) +* Fix P2P collection CLI commands ([#1295](https://github.com/sourcenetwork/defradb/issues/1295)) +* Dont double up badger file path ([#1299](https://github.com/sourcenetwork/defradb/issues/1299)) +* Update immutable package ([#1290](https://github.com/sourcenetwork/defradb/issues/1290)) +* Fix panic on success of Add/RemoveP2PCollections ([#1297](https://github.com/sourcenetwork/defradb/issues/1297)) +* Fix deadlock on memory-datastore Close ([#1273](https://github.com/sourcenetwork/defradb/issues/1273)) +* Determine if query is introspection query ([#1255](https://github.com/sourcenetwork/defradb/issues/1255)) +* Allow newly added fields to sync via p2p ([#1226](https://github.com/sourcenetwork/defradb/issues/1226)) +* Expose `ExplainEnum` in the GQL schema ([#1204](https://github.com/sourcenetwork/defradb/issues/1204)) +* Resolve aggregates' mapping with deep nested subtypes ([#1175](https://github.com/sourcenetwork/defradb/issues/1175)) +* Make sort stable and handle nil comparison ([#1094](https://github.com/sourcenetwork/defradb/issues/1094)) +* Change successful schema add status to 200 ([#1106](https://github.com/sourcenetwork/defradb/issues/1106)) +* Add delay in P2P test util execution ([#1093](https://github.com/sourcenetwork/defradb/issues/1093)) +* Ensure errors test don't hard expect folder name ([#1072](https://github.com/sourcenetwork/defradb/issues/1072)) +* Remove potential P2P deadlock ([#1056](https://github.com/sourcenetwork/defradb/issues/1056)) +* Rework the P2P integration tests ([#989](https://github.com/sourcenetwork/defradb/issues/989)) +* Improve DAG sync with highly concurrent updates ([#1031](https://github.com/sourcenetwork/defradb/issues/1031)) + +### Documentation + +* Update docs for the v0.5 release ([#1320](https://github.com/sourcenetwork/defradb/issues/1320)) +* Document client interfaces in client/db.go ([#1305](https://github.com/sourcenetwork/defradb/issues/1305)) +* Document client Description types ([#1307](https://github.com/sourcenetwork/defradb/issues/1307)) +* Improve security policy ([#1240](https://github.com/sourcenetwork/defradb/issues/1240)) +* Add security disclosure policy ([#1194](https://github.com/sourcenetwork/defradb/issues/1194)) +* Correct commits query example in readme ([#1172](https://github.com/sourcenetwork/defradb/issues/1172)) + +### Refactoring + +* Improve p2p collection operations on peer ([#1286](https://github.com/sourcenetwork/defradb/issues/1286)) +* Migrate gql introspection tests to new framework ([#1211](https://github.com/sourcenetwork/defradb/issues/1211)) +* Reorganise client transaction related interfaces ([#1180](https://github.com/sourcenetwork/defradb/issues/1180)) +* Config-local viper, rootdir, and logger parsing ([#1132](https://github.com/sourcenetwork/defradb/issues/1132)) +* Migrate mutation-relation tests to new framework ([#1109](https://github.com/sourcenetwork/defradb/issues/1109)) +* Rework integration test framework ([#1089](https://github.com/sourcenetwork/defradb/issues/1089)) +* Generate gql types using col. desc ([#1080](https://github.com/sourcenetwork/defradb/issues/1080)) +* Extract config errors to dedicated file ([#1107](https://github.com/sourcenetwork/defradb/issues/1107)) +* Change terminology from query to request ([#1054](https://github.com/sourcenetwork/defradb/issues/1054)) +* Allow db keys to handle multiple schema versions ([#1026](https://github.com/sourcenetwork/defradb/issues/1026)) +* Extract query schema errors to dedicated file ([#1037](https://github.com/sourcenetwork/defradb/issues/1037)) +* Extract planner errors to dedicated file ([#1034](https://github.com/sourcenetwork/defradb/issues/1034)) +* Extract query parser errors to dedicated file ([#1035](https://github.com/sourcenetwork/defradb/issues/1035)) + +### Testing + +* Remove test reference to DEFRA_ROOTDIR env var ([#1328](https://github.com/sourcenetwork/defradb/issues/1328)) +* Expand tests for Peer subscribe actions ([#1287](https://github.com/sourcenetwork/defradb/issues/1287)) +* Fix flaky TestCloseThroughContext test ([#1265](https://github.com/sourcenetwork/defradb/issues/1265)) +* Add gql introspection tests for patch schema ([#1219](https://github.com/sourcenetwork/defradb/issues/1219)) +* Explicitly state change detector split for test ([#1228](https://github.com/sourcenetwork/defradb/issues/1228)) +* Add test for successful one-one create mutation ([#1215](https://github.com/sourcenetwork/defradb/issues/1215)) +* Ensure that all databases are always closed on exit ([#1187](https://github.com/sourcenetwork/defradb/issues/1187)) +* Add P2P tests for Schema Update adding field ([#1182](https://github.com/sourcenetwork/defradb/issues/1182)) +* Migrate P2P/state tests to new framework ([#1160](https://github.com/sourcenetwork/defradb/issues/1160)) +* Remove sleep from subscription tests ([#1156](https://github.com/sourcenetwork/defradb/issues/1156)) +* Fetch documents on test execution start ([#1163](https://github.com/sourcenetwork/defradb/issues/1163)) +* Introduce basic testing for the `version` module ([#1111](https://github.com/sourcenetwork/defradb/issues/1111)) +* Boost test coverage for collection_update ([#1050](https://github.com/sourcenetwork/defradb/issues/1050)) +* Wait between P2P update retry attempts ([#1052](https://github.com/sourcenetwork/defradb/issues/1052)) +* Exclude auto-generated protobuf files from codecov ([#1048](https://github.com/sourcenetwork/defradb/issues/1048)) +* Add P2P tests for relational docs ([#1042](https://github.com/sourcenetwork/defradb/issues/1042)) + +### Continuous integration + +* Add workflow that builds DefraDB AMI upon tag push ([#1304](https://github.com/sourcenetwork/defradb/issues/1304)) +* Allow PR title to end with a capital letter ([#1291](https://github.com/sourcenetwork/defradb/issues/1291)) +* Changes for `dependabot` to be well-behaved ([#1165](https://github.com/sourcenetwork/defradb/issues/1165)) +* Skip benchmarks for dependabot ([#1144](https://github.com/sourcenetwork/defradb/issues/1144)) +* Add workflow to ensure deps build properly ([#1078](https://github.com/sourcenetwork/defradb/issues/1078)) +* Runner and Builder Containerfiles ([#951](https://github.com/sourcenetwork/defradb/issues/951)) +* Fix go-header linter rule to be any year ([#1021](https://github.com/sourcenetwork/defradb/issues/1021)) + +### Chore + +* Add Islam as contributor ([#1302](https://github.com/sourcenetwork/defradb/issues/1302)) +* Update go-libp2p to 0.26.4 ([#1257](https://github.com/sourcenetwork/defradb/issues/1257)) +* Improve the test coverage of datastore ([#1203](https://github.com/sourcenetwork/defradb/issues/1203)) +* Add issue and discussion templates ([#1193](https://github.com/sourcenetwork/defradb/issues/1193)) +* Bump libp2p/go-libp2p-kad-dht from 0.21.0 to 0.21.1 ([#1146](https://github.com/sourcenetwork/defradb/issues/1146)) +* Enable dependabot ([#1120](https://github.com/sourcenetwork/defradb/issues/1120)) +* Update `opentelemetry` dependencies ([#1114](https://github.com/sourcenetwork/defradb/issues/1114)) +* Update dependencies including go-ipfs ([#1112](https://github.com/sourcenetwork/defradb/issues/1112)) +* Bump to GoLang v1.19 ([#818](https://github.com/sourcenetwork/defradb/issues/818)) +* Remove versionedScan node ([#1049](https://github.com/sourcenetwork/defradb/issues/1049)) + +### Bot + +* Bump github.com/multiformats/go-multiaddr from 0.8.0 to 0.9.0 ([#1277](https://github.com/sourcenetwork/defradb/issues/1277)) +* Bump google.golang.org/grpc from 1.53.0 to 1.54.0 ([#1233](https://github.com/sourcenetwork/defradb/issues/1233)) +* Bump github.com/multiformats/go-multibase from 0.1.1 to 0.2.0 ([#1230](https://github.com/sourcenetwork/defradb/issues/1230)) +* Bump github.com/ipfs/go-libipfs from 0.6.2 to 0.7.0 ([#1231](https://github.com/sourcenetwork/defradb/issues/1231)) +* Bump github.com/ipfs/go-cid from 0.3.2 to 0.4.0 ([#1200](https://github.com/sourcenetwork/defradb/issues/1200)) +* Bump github.com/ipfs/go-ipfs-blockstore from 1.2.0 to 1.3.0 ([#1199](https://github.com/sourcenetwork/defradb/issues/1199)) +* Bump github.com/stretchr/testify from 1.8.1 to 1.8.2 ([#1198](https://github.com/sourcenetwork/defradb/issues/1198)) +* Bump github.com/ipfs/go-libipfs from 0.6.1 to 0.6.2 ([#1201](https://github.com/sourcenetwork/defradb/issues/1201)) +* Bump golang.org/x/crypto from 0.6.0 to 0.7.0 ([#1197](https://github.com/sourcenetwork/defradb/issues/1197)) +* Bump libp2p/go-libp2p-gostream from 0.5.0 to 0.6.0 ([#1152](https://github.com/sourcenetwork/defradb/issues/1152)) +* Bump github.com/ipfs/go-libipfs from 0.5.0 to 0.6.1 ([#1166](https://github.com/sourcenetwork/defradb/issues/1166)) +* Bump github.com/ugorji/go/codec from 1.2.9 to 1.2.11 ([#1173](https://github.com/sourcenetwork/defradb/issues/1173)) +* Bump github.com/libp2p/go-libp2p-pubsub from 0.9.0 to 0.9.3 ([#1183](https://github.com/sourcenetwork/defradb/issues/1183)) + + +## [v0.4.0](https://github.com/sourcenetwork/defradb/compare/v0.3.1...v0.4.0) + +> 2023-12-23 + +DefraDB v0.4 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + +There are various new features in this release - some of which are breaking - and we invite you to review the official changelog below. Some highlights are persistence of replicators, DateTime scalars, TLS support, and GQL subscriptions. + +This release does include a Breaking Change to existing v0.3.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.gg/w7jYQVJ/. + +### Features + +* Add basic metric functionality ([#971](https://github.com/sourcenetwork/defradb/issues/971)) +* Add thread safe transactional in-memory datastore ([#947](https://github.com/sourcenetwork/defradb/issues/947)) +* Persist p2p replicators ([#960](https://github.com/sourcenetwork/defradb/issues/960)) +* Add DateTime custom scalars ([#931](https://github.com/sourcenetwork/defradb/issues/931)) +* Add GraphQL subscriptions ([#934](https://github.com/sourcenetwork/defradb/issues/934)) +* Add support for tls ([#885](https://github.com/sourcenetwork/defradb/issues/885)) +* Add group by support for commits ([#887](https://github.com/sourcenetwork/defradb/issues/887)) +* Add depth support for commits ([#889](https://github.com/sourcenetwork/defradb/issues/889)) +* Make dockey optional for allCommits queries ([#847](https://github.com/sourcenetwork/defradb/issues/847)) +* Add WithStack to the errors package ([#870](https://github.com/sourcenetwork/defradb/issues/870)) +* Add event system ([#834](https://github.com/sourcenetwork/defradb/issues/834)) + +### Fixes + +* Correct errors.WithStack behaviour ([#984](https://github.com/sourcenetwork/defradb/issues/984)) +* Correctly handle nested one to one joins ([#964](https://github.com/sourcenetwork/defradb/issues/964)) +* Do not assume parent record exists when joining ([#963](https://github.com/sourcenetwork/defradb/issues/963)) +* Change time format for HTTP API log ([#910](https://github.com/sourcenetwork/defradb/issues/910)) +* Error if group select contains non-group-by fields ([#898](https://github.com/sourcenetwork/defradb/issues/898)) +* Add inspection of values for ENV flags ([#900](https://github.com/sourcenetwork/defradb/issues/900)) +* Remove panics from document ([#881](https://github.com/sourcenetwork/defradb/issues/881)) +* Add __typename support ([#871](https://github.com/sourcenetwork/defradb/issues/871)) +* Handle subscriber close ([#877](https://github.com/sourcenetwork/defradb/issues/877)) +* Publish update events post commit ([#866](https://github.com/sourcenetwork/defradb/issues/866)) + +### Refactoring + +* Make rootstore require Batching and TxnDatastore ([#940](https://github.com/sourcenetwork/defradb/issues/940)) +* Conceptually clarify schema vs query-language ([#924](https://github.com/sourcenetwork/defradb/issues/924)) +* Decouple db.db from gql ([#912](https://github.com/sourcenetwork/defradb/issues/912)) +* Merkle clock heads cleanup ([#918](https://github.com/sourcenetwork/defradb/issues/918)) +* Simplify dag fetcher ([#913](https://github.com/sourcenetwork/defradb/issues/913)) +* Cleanup parsing logic ([#909](https://github.com/sourcenetwork/defradb/issues/909)) +* Move planner outside the gql directory ([#907](https://github.com/sourcenetwork/defradb/issues/907)) +* Refactor commit nodes ([#892](https://github.com/sourcenetwork/defradb/issues/892)) +* Make latest commits syntax sugar ([#890](https://github.com/sourcenetwork/defradb/issues/890)) +* Remove commit query ([#841](https://github.com/sourcenetwork/defradb/issues/841)) + +### Testing + +* Add event tests ([#965](https://github.com/sourcenetwork/defradb/issues/965)) +* Add new setup for testing explain functionality ([#949](https://github.com/sourcenetwork/defradb/issues/949)) +* Add txn relation-type delete and create tests ([#875](https://github.com/sourcenetwork/defradb/issues/875)) +* Skip change detection for tests that assert panic ([#883](https://github.com/sourcenetwork/defradb/issues/883)) + +### Continuous integration + +* Bump all gh-action versions to support node16 ([#990](https://github.com/sourcenetwork/defradb/issues/990)) +* Bump ssh-agent action to v0.7.0 ([#978](https://github.com/sourcenetwork/defradb/issues/978)) +* Add error message format check ([#901](https://github.com/sourcenetwork/defradb/issues/901)) + +### Chore + +* Extract (events, merkle) errors to errors.go ([#973](https://github.com/sourcenetwork/defradb/issues/973)) +* Extract (datastore, db) errors to errors.go ([#969](https://github.com/sourcenetwork/defradb/issues/969)) +* Extract (connor, crdt, core) errors to errors.go ([#968](https://github.com/sourcenetwork/defradb/issues/968)) +* Extract inline (http and client) errors to errors.go ([#967](https://github.com/sourcenetwork/defradb/issues/967)) +* Update badger version ([#966](https://github.com/sourcenetwork/defradb/issues/966)) +* Move Option and Enumerable to immutables ([#939](https://github.com/sourcenetwork/defradb/issues/939)) +* Add configuration of external loggers ([#942](https://github.com/sourcenetwork/defradb/issues/942)) +* Strip DSKey prefixes and simplify NewDataStoreKey ([#944](https://github.com/sourcenetwork/defradb/issues/944)) +* Include version metadata in cross-building ([#930](https://github.com/sourcenetwork/defradb/issues/930)) +* Update to v0.23.2 the libP2P package ([#908](https://github.com/sourcenetwork/defradb/issues/908)) +* Remove `ipfslite` dependency ([#739](https://github.com/sourcenetwork/defradb/issues/739)) + + + +## [v0.3.1](https://github.com/sourcenetwork/defradb/compare/v0.3.0...v0.3.1) + +> 2022-09-23 + +DefraDB v0.3.1 is a minor release, primarily focusing on additional/extended features and fixes of items added in the `v0.3.0` release. + +### Features + +* Add cid support for allCommits ([#857](https://github.com/sourcenetwork/defradb/issues/857)) +* Add offset support to allCommits ([#859](https://github.com/sourcenetwork/defradb/issues/859)) +* Add limit support to allCommits query ([#856](https://github.com/sourcenetwork/defradb/issues/856)) +* Add order support to allCommits ([#845](https://github.com/sourcenetwork/defradb/issues/845)) +* Display CLI usage on user error ([#819](https://github.com/sourcenetwork/defradb/issues/819)) +* Add support for dockey filters in child joins ([#806](https://github.com/sourcenetwork/defradb/issues/806)) +* Add sort support for numeric aggregates ([#786](https://github.com/sourcenetwork/defradb/issues/786)) +* Allow filtering by nil ([#789](https://github.com/sourcenetwork/defradb/issues/789)) +* Add aggregate offset support ([#778](https://github.com/sourcenetwork/defradb/issues/778)) +* Remove filter depth limit ([#777](https://github.com/sourcenetwork/defradb/issues/777)) +* Add support for and-or inline array aggregate filters ([#779](https://github.com/sourcenetwork/defradb/issues/779)) +* Add limit support for aggregates ([#771](https://github.com/sourcenetwork/defradb/issues/771)) +* Add support for inline arrays of nillable types ([#759](https://github.com/sourcenetwork/defradb/issues/759)) +* Create errors package ([#548](https://github.com/sourcenetwork/defradb/issues/548)) +* Add ability to display peer id ([#719](https://github.com/sourcenetwork/defradb/issues/719)) +* Add a config option to set the vlog max file size ([#743](https://github.com/sourcenetwork/defradb/issues/743)) +* Explain `topLevelNode` like a `MultiNode` plan ([#749](https://github.com/sourcenetwork/defradb/issues/749)) +* Make `topLevelNode` explainable ([#737](https://github.com/sourcenetwork/defradb/issues/737)) + +### Fixes + +* Order subtype without selecting the join child ([#810](https://github.com/sourcenetwork/defradb/issues/810)) +* Correctly handles nil one-one joins ([#837](https://github.com/sourcenetwork/defradb/issues/837)) +* Reset scan node for each join ([#828](https://github.com/sourcenetwork/defradb/issues/828)) +* Handle filter input field argument being nil ([#787](https://github.com/sourcenetwork/defradb/issues/787)) +* Ensure CLI outputs JSON to stdout when directed to pipe ([#804](https://github.com/sourcenetwork/defradb/issues/804)) +* Error if given the wrong side of a one-one relationship ([#795](https://github.com/sourcenetwork/defradb/issues/795)) +* Add object marker to enable return of empty docs ([#800](https://github.com/sourcenetwork/defradb/issues/800)) +* Resolve the extra `typeIndexJoin`s for `_avg` aggregate ([#774](https://github.com/sourcenetwork/defradb/issues/774)) +* Remove _like filter operator ([#797](https://github.com/sourcenetwork/defradb/issues/797)) +* Remove having gql types ([#785](https://github.com/sourcenetwork/defradb/issues/785)) +* Error if child _group selected without parent groupBy ([#781](https://github.com/sourcenetwork/defradb/issues/781)) +* Error nicely on missing field specifier ([#782](https://github.com/sourcenetwork/defradb/issues/782)) +* Handle order input field argument being nil ([#701](https://github.com/sourcenetwork/defradb/issues/701)) +* Change output to outputpath in config file template for logger ([#716](https://github.com/sourcenetwork/defradb/issues/716)) +* Delete mutations not correct persisting all keys ([#731](https://github.com/sourcenetwork/defradb/issues/731)) + +### Tooling + +* Ban the usage of `ioutil` package ([#747](https://github.com/sourcenetwork/defradb/issues/747)) +* Migrate from CircleCi to GitHub Actions ([#679](https://github.com/sourcenetwork/defradb/issues/679)) + +### Documentation + +* Clarify meaning of url param, update in-repo CLI docs ([#814](https://github.com/sourcenetwork/defradb/issues/814)) +* Disclaimer of exposed to network and not encrypted ([#793](https://github.com/sourcenetwork/defradb/issues/793)) +* Update logo to respect theme ([#728](https://github.com/sourcenetwork/defradb/issues/728)) + +### Refactoring + +* Replace all `interface{}` with `any` alias ([#805](https://github.com/sourcenetwork/defradb/issues/805)) +* Use fastjson to parse mutation data string ([#772](https://github.com/sourcenetwork/defradb/issues/772)) +* Rework limit node flow ([#767](https://github.com/sourcenetwork/defradb/issues/767)) +* Make Option immutable ([#769](https://github.com/sourcenetwork/defradb/issues/769)) +* Rework sum and count nodes to make use of generics ([#757](https://github.com/sourcenetwork/defradb/issues/757)) +* Remove some possible panics from codebase ([#732](https://github.com/sourcenetwork/defradb/issues/732)) +* Change logging calls to use feedback in CLI package ([#714](https://github.com/sourcenetwork/defradb/issues/714)) + +### Testing + +* Add tests for aggs with nil filters ([#813](https://github.com/sourcenetwork/defradb/issues/813)) +* Add not equals filter tests ([#798](https://github.com/sourcenetwork/defradb/issues/798)) +* Fix `cli/peerid_test` to not clash addresses ([#766](https://github.com/sourcenetwork/defradb/issues/766)) +* Add change detector summary to test readme ([#754](https://github.com/sourcenetwork/defradb/issues/754)) +* Add tests for inline array grouping ([#752](https://github.com/sourcenetwork/defradb/issues/752)) + +### Continuous integration + +* Reduce test resource usage and test with file db ([#791](https://github.com/sourcenetwork/defradb/issues/791)) +* Add makefile target to verify the local module cache ([#775](https://github.com/sourcenetwork/defradb/issues/775)) +* Allow PR titles to end with a number ([#745](https://github.com/sourcenetwork/defradb/issues/745)) +* Add a workflow to validate pull request titles ([#734](https://github.com/sourcenetwork/defradb/issues/734)) +* Fix the linter version to `v1.47` ([#726](https://github.com/sourcenetwork/defradb/issues/726)) + +### Chore + +* Remove file system paths from resulting executable ([#831](https://github.com/sourcenetwork/defradb/issues/831)) +* Add goimports linter for consistent imports ordering ([#816](https://github.com/sourcenetwork/defradb/issues/816)) +* Improve UX by providing more information ([#802](https://github.com/sourcenetwork/defradb/issues/802)) +* Change to defra errors and handle errors stacktrace ([#794](https://github.com/sourcenetwork/defradb/issues/794)) +* Clean up `go.mod` with pruned module graphs ([#756](https://github.com/sourcenetwork/defradb/issues/756)) +* Update to v0.20.3 of libp2p ([#740](https://github.com/sourcenetwork/defradb/issues/740)) +* Bump to GoLang `v1.18` ([#721](https://github.com/sourcenetwork/defradb/issues/721)) + + + +## [v0.3.0](https://github.com/sourcenetwork/defradb/compare/v0.2.1...v0.3.0) + +> 2022-08-02 + +DefraDB v0.3 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + +There are *several* new features in this release, and we invite you to review the official changelog below. Some highlights are various new features for Grouping & Aggregation for the query system, like top-level aggregation and group filtering. Moreover, a brand new Query Explain system was added to introspect the execution plans created by DefraDB. Lastly we introduced a revamped CLI configuration system. + +This release does include a Breaking Change to existing v0.2.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.gg/w7jYQVJ/. + +### Features + +* Add named config overrides ([#659](https://github.com/sourcenetwork/defradb/issues/659)) +* Expose color and caller log options, add validation ([#652](https://github.com/sourcenetwork/defradb/issues/652)) +* Add ability to explain `groupNode` and it's attribute(s). ([#641](https://github.com/sourcenetwork/defradb/issues/641)) +* Add primary directive for schema definitions ([@primary](https://github.com/primary)) ([#650](https://github.com/sourcenetwork/defradb/issues/650)) +* Add support for aggregate filters on inline arrays ([#622](https://github.com/sourcenetwork/defradb/issues/622)) +* Add explainable renderLimitNode & hardLimitNode attributes. ([#614](https://github.com/sourcenetwork/defradb/issues/614)) +* Add support for top level aggregates ([#594](https://github.com/sourcenetwork/defradb/issues/594)) +* Update `countNode` explanation to be consistent. ([#600](https://github.com/sourcenetwork/defradb/issues/600)) +* Add support for stdin as input in CLI ([#608](https://github.com/sourcenetwork/defradb/issues/608)) +* Explain `cid` & `field` attributes for `dagScanNode` ([#598](https://github.com/sourcenetwork/defradb/issues/598)) +* Add ability to explain `dagScanNode` attribute(s). ([#560](https://github.com/sourcenetwork/defradb/issues/560)) +* Add the ability to send user feedback to the console even when logging to file. ([#568](https://github.com/sourcenetwork/defradb/issues/568)) +* Add ability to explain `sortNode` attribute(s). ([#558](https://github.com/sourcenetwork/defradb/issues/558)) +* Add ability to explain `sumNode` attribute(s). ([#559](https://github.com/sourcenetwork/defradb/issues/559)) +* Introduce top-level config package ([#389](https://github.com/sourcenetwork/defradb/issues/389)) +* Add ability to explain `updateNode` attributes. ([#514](https://github.com/sourcenetwork/defradb/issues/514)) +* Add `typeIndexJoin` explainable attributes. ([#499](https://github.com/sourcenetwork/defradb/issues/499)) +* Add support to explain `countNode` attributes. ([#504](https://github.com/sourcenetwork/defradb/issues/504)) +* Add CORS capability to HTTP API ([#467](https://github.com/sourcenetwork/defradb/issues/467)) +* Add explaination of spans for `scanNode`. ([#492](https://github.com/sourcenetwork/defradb/issues/492)) +* Add ability to Explain the response plan. ([#385](https://github.com/sourcenetwork/defradb/issues/385)) +* Add aggregate filter support for groups only ([#426](https://github.com/sourcenetwork/defradb/issues/426)) +* Configurable caller option in logger ([#416](https://github.com/sourcenetwork/defradb/issues/416)) +* Add Average aggregate support ([#383](https://github.com/sourcenetwork/defradb/issues/383)) +* Allow summation of aggregates ([#341](https://github.com/sourcenetwork/defradb/issues/341)) +* Add ability to check DefraDB CLI version. ([#339](https://github.com/sourcenetwork/defradb/issues/339)) + +### Fixes + +* Add a check to ensure limit is not 0 when evaluating query limit and offset ([#706](https://github.com/sourcenetwork/defradb/issues/706)) +* Support multiple `--logger` flags ([#704](https://github.com/sourcenetwork/defradb/issues/704)) +* Return without an error if relation is finalized ([#698](https://github.com/sourcenetwork/defradb/issues/698)) +* Logger not correctly applying named config ([#696](https://github.com/sourcenetwork/defradb/issues/696)) +* Add content-type media type parsing ([#678](https://github.com/sourcenetwork/defradb/issues/678)) +* Remove portSyncLock deadlock condition ([#671](https://github.com/sourcenetwork/defradb/issues/671)) +* Silence cobra default errors and usage printing ([#668](https://github.com/sourcenetwork/defradb/issues/668)) +* Add stdout validation when setting logging output path ([#666](https://github.com/sourcenetwork/defradb/issues/666)) +* Consider `--logoutput` CLI flag properly ([#645](https://github.com/sourcenetwork/defradb/issues/645)) +* Handle errors and responses in CLI `client` commands ([#579](https://github.com/sourcenetwork/defradb/issues/579)) +* Rename aggregate gql types ([#638](https://github.com/sourcenetwork/defradb/issues/638)) +* Error when attempting to insert value into relationship field ([#632](https://github.com/sourcenetwork/defradb/issues/632)) +* Allow adding of new schema to database ([#635](https://github.com/sourcenetwork/defradb/issues/635)) +* Correctly parse dockey in broadcast log event. ([#631](https://github.com/sourcenetwork/defradb/issues/631)) +* Increase system's open files limit in integration tests ([#627](https://github.com/sourcenetwork/defradb/issues/627)) +* Avoid populating `order.ordering` with empties. ([#618](https://github.com/sourcenetwork/defradb/issues/618)) +* Change to supporting of non-null inline arrays ([#609](https://github.com/sourcenetwork/defradb/issues/609)) +* Assert fields exist in collection before saving to them ([#604](https://github.com/sourcenetwork/defradb/issues/604)) +* CLI `init` command to reinitialize only config file ([#603](https://github.com/sourcenetwork/defradb/issues/603)) +* Add config and registry clearing to TestLogWritesMessagesToFeedbackLog ([#596](https://github.com/sourcenetwork/defradb/issues/596)) +* Change `$eq` to `_eq` in the failing test. ([#576](https://github.com/sourcenetwork/defradb/issues/576)) +* Resolve failing HTTP API tests via cleanup ([#557](https://github.com/sourcenetwork/defradb/issues/557)) +* Ensure Makefile compatibility with macOS ([#527](https://github.com/sourcenetwork/defradb/issues/527)) +* Separate out iotas in their own blocks. ([#464](https://github.com/sourcenetwork/defradb/issues/464)) +* Use x/cases for titling instead of strings to handle deprecation ([#457](https://github.com/sourcenetwork/defradb/issues/457)) +* Handle limit and offset in sub groups ([#440](https://github.com/sourcenetwork/defradb/issues/440)) +* Issue preventing DB from restarting with no records ([#437](https://github.com/sourcenetwork/defradb/issues/437)) +* log serving HTTP API before goroutine blocks ([#358](https://github.com/sourcenetwork/defradb/issues/358)) + +### Testing + +* Add integration testing for P2P. ([#655](https://github.com/sourcenetwork/defradb/issues/655)) +* Fix formatting of tests with no extra brackets ([#643](https://github.com/sourcenetwork/defradb/issues/643)) +* Add tests for `averageNode` explain. ([#639](https://github.com/sourcenetwork/defradb/issues/639)) +* Add schema integration tests ([#628](https://github.com/sourcenetwork/defradb/issues/628)) +* Add tests for default properties ([#611](https://github.com/sourcenetwork/defradb/issues/611)) +* Specify which collection to update in test framework ([#601](https://github.com/sourcenetwork/defradb/issues/601)) +* Add tests for grouping by undefined value ([#543](https://github.com/sourcenetwork/defradb/issues/543)) +* Add test for querying undefined field ([#544](https://github.com/sourcenetwork/defradb/issues/544)) +* Expand commit query tests ([#541](https://github.com/sourcenetwork/defradb/issues/541)) +* Add cid (time-travel) query tests ([#539](https://github.com/sourcenetwork/defradb/issues/539)) +* Restructure and expand filter tests ([#512](https://github.com/sourcenetwork/defradb/issues/512)) +* Basic unit testing of `node` package ([#503](https://github.com/sourcenetwork/defradb/issues/503)) +* Test filter in filter tests ([#473](https://github.com/sourcenetwork/defradb/issues/473)) +* Add test for deletion of records in a relationship ([#329](https://github.com/sourcenetwork/defradb/issues/329)) +* Benchmark transaction iteration ([#289](https://github.com/sourcenetwork/defradb/issues/289)) + +### Refactoring + +* Improve CLI error handling and fix small issues ([#649](https://github.com/sourcenetwork/defradb/issues/649)) +* Add top-level `version` package ([#583](https://github.com/sourcenetwork/defradb/issues/583)) +* Remove extra log levels ([#634](https://github.com/sourcenetwork/defradb/issues/634)) +* Change `sortNode` to `orderNode`. ([#591](https://github.com/sourcenetwork/defradb/issues/591)) +* Rework update and delete node to remove secondary planner ([#571](https://github.com/sourcenetwork/defradb/issues/571)) +* Trim imported connor package ([#530](https://github.com/sourcenetwork/defradb/issues/530)) +* Internal doc restructure ([#471](https://github.com/sourcenetwork/defradb/issues/471)) +* Copy-paste connor fork into repo ([#567](https://github.com/sourcenetwork/defradb/issues/567)) +* Add safety to the tests, add ability to catch stderr logs and add output path validation ([#552](https://github.com/sourcenetwork/defradb/issues/552)) +* Change handler functions implementation and response formatting ([#498](https://github.com/sourcenetwork/defradb/issues/498)) +* Improve the HTTP API implementation ([#382](https://github.com/sourcenetwork/defradb/issues/382)) +* Use new logger in net/api ([#420](https://github.com/sourcenetwork/defradb/issues/420)) +* Rename NewCidV1_SHA2_256 to mixedCaps ([#415](https://github.com/sourcenetwork/defradb/issues/415)) +* Remove utils package ([#397](https://github.com/sourcenetwork/defradb/issues/397)) +* Rework planNode Next and Value(s) function ([#374](https://github.com/sourcenetwork/defradb/issues/374)) +* Restructure aggregate query syntax ([#373](https://github.com/sourcenetwork/defradb/issues/373)) +* Remove dead code from client package and document remaining ([#356](https://github.com/sourcenetwork/defradb/issues/356)) +* Restructure datastore keys ([#316](https://github.com/sourcenetwork/defradb/issues/316)) +* Add commits lost during github outage ([#303](https://github.com/sourcenetwork/defradb/issues/303)) +* Move public members out of core and base packages ([#295](https://github.com/sourcenetwork/defradb/issues/295)) +* Make db stuff internal/private ([#291](https://github.com/sourcenetwork/defradb/issues/291)) +* Rework client.DB to ensure interface contains only public types ([#277](https://github.com/sourcenetwork/defradb/issues/277)) +* Remove GetPrimaryIndexDocKey from collection interface ([#279](https://github.com/sourcenetwork/defradb/issues/279)) +* Remove DataStoreKey from (public) dockey struct ([#278](https://github.com/sourcenetwork/defradb/issues/278)) +* Renormalize to ensure consistent file line termination. ([#226](https://github.com/sourcenetwork/defradb/issues/226)) +* Strongly typed key refactor ([#17](https://github.com/sourcenetwork/defradb/issues/17)) + +### Documentation + +* Use permanent link to BSL license document ([#692](https://github.com/sourcenetwork/defradb/issues/692)) +* README update v0.3.0 ([#646](https://github.com/sourcenetwork/defradb/issues/646)) +* Improve code documentation ([#533](https://github.com/sourcenetwork/defradb/issues/533)) +* Add CONTRIBUTING.md ([#531](https://github.com/sourcenetwork/defradb/issues/531)) +* Add package level docs for logging lib ([#338](https://github.com/sourcenetwork/defradb/issues/338)) + +### Tooling + +* Include all touched packages in code coverage ([#673](https://github.com/sourcenetwork/defradb/issues/673)) +* Use `gotestsum` over `go test` ([#619](https://github.com/sourcenetwork/defradb/issues/619)) +* Update Github pull request template ([#524](https://github.com/sourcenetwork/defradb/issues/524)) +* Fix the cross-build script ([#460](https://github.com/sourcenetwork/defradb/issues/460)) +* Add test coverage html output ([#466](https://github.com/sourcenetwork/defradb/issues/466)) +* Add linter rule for `goconst`. ([#398](https://github.com/sourcenetwork/defradb/issues/398)) +* Add github PR template. ([#394](https://github.com/sourcenetwork/defradb/issues/394)) +* Disable auto-fixing linter issues by default ([#429](https://github.com/sourcenetwork/defradb/issues/429)) +* Fix linting of empty `else` code blocks ([#402](https://github.com/sourcenetwork/defradb/issues/402)) +* Add the `gofmt` linter rule. ([#405](https://github.com/sourcenetwork/defradb/issues/405)) +* Cleanup linter config file ([#400](https://github.com/sourcenetwork/defradb/issues/400)) +* Add linter rule for copyright headers ([#360](https://github.com/sourcenetwork/defradb/issues/360)) +* Organize our config files and tooling. ([#336](https://github.com/sourcenetwork/defradb/issues/336)) +* Limit line length to 100 characters (linter check) ([#224](https://github.com/sourcenetwork/defradb/issues/224)) +* Ignore db/tests folder and the bench marks. ([#280](https://github.com/sourcenetwork/defradb/issues/280)) + +### Continuous Integration + +* Fix circleci cache permission errors. ([#371](https://github.com/sourcenetwork/defradb/issues/371)) +* Ban extra elses ([#366](https://github.com/sourcenetwork/defradb/issues/366)) +* Fix change-detection to not fail when new tests are added. ([#333](https://github.com/sourcenetwork/defradb/issues/333)) +* Update golang-ci linter and explicit go-setup to use v1.17 ([#331](https://github.com/sourcenetwork/defradb/issues/331)) +* Comment the benchmarking result comparison to the PR ([#305](https://github.com/sourcenetwork/defradb/issues/305)) +* Add benchmark performance comparisons ([#232](https://github.com/sourcenetwork/defradb/issues/232)) +* Add caching / storing of bench report on default branch ([#290](https://github.com/sourcenetwork/defradb/issues/290)) +* Ensure full-benchmarks are ran on a PR-merge. ([#282](https://github.com/sourcenetwork/defradb/issues/282)) +* Add ability to control benchmarks by PR labels. ([#267](https://github.com/sourcenetwork/defradb/issues/267)) + +### Chore + +* Update APL to refer to D2 Foundation ([#711](https://github.com/sourcenetwork/defradb/issues/711)) +* Update gitignore to include `cmd` folders ([#617](https://github.com/sourcenetwork/defradb/issues/617)) +* Enable random execution order of tests ([#554](https://github.com/sourcenetwork/defradb/issues/554)) +* Enable linters exportloopref, nolintlint, whitespace ([#535](https://github.com/sourcenetwork/defradb/issues/535)) +* Add utility for generation of man pages ([#493](https://github.com/sourcenetwork/defradb/issues/493)) +* Add Dockerfile ([#517](https://github.com/sourcenetwork/defradb/issues/517)) +* Enable errorlint linter ([#520](https://github.com/sourcenetwork/defradb/issues/520)) +* Binaries in`cmd` folder, examples in `examples` folder ([#501](https://github.com/sourcenetwork/defradb/issues/501)) +* Improve log outputs ([#506](https://github.com/sourcenetwork/defradb/issues/506)) +* Move testing to top-level `tests` folder ([#446](https://github.com/sourcenetwork/defradb/issues/446)) +* Update dependencies ([#450](https://github.com/sourcenetwork/defradb/issues/450)) +* Update go-ipfs-blockstore and ipfs-lite ([#436](https://github.com/sourcenetwork/defradb/issues/436)) +* Update libp2p dependency to v0.19 ([#424](https://github.com/sourcenetwork/defradb/issues/424)) +* Update ioutil package to io / os packages. ([#376](https://github.com/sourcenetwork/defradb/issues/376)) +* git ignore vscode ([#343](https://github.com/sourcenetwork/defradb/issues/343)) +* Updated README.md contributors section ([#292](https://github.com/sourcenetwork/defradb/issues/292)) +* Update changelog v0.2.1 ([#252](https://github.com/sourcenetwork/defradb/issues/252)) + + + +## [v0.2.1](https://github.com/sourcenetwork/defradb/compare/v0.2.0...v0.2.1) + +> 2022-03-04 + +### Features + +* Add ability to delete multiple documents using filter ([#206](https://github.com/sourcenetwork/defradb/issues/206)) +* Add ability to delete multiple documents, using multiple ids ([#196](https://github.com/sourcenetwork/defradb/issues/196)) + +### Fixes + +* Concurrency control of Document using RWMutex ([#213](https://github.com/sourcenetwork/defradb/issues/213)) +* Only log errors and above when benchmarking ([#261](https://github.com/sourcenetwork/defradb/issues/261)) +* Handle proper type conversion on sort nodes ([#228](https://github.com/sourcenetwork/defradb/issues/228)) +* Return empty array if no values found ([#223](https://github.com/sourcenetwork/defradb/issues/223)) +* Close fetcher on error ([#210](https://github.com/sourcenetwork/defradb/issues/210)) +* Installing binary using defradb name ([#190](https://github.com/sourcenetwork/defradb/issues/190)) + +### Tooling + +* Add short benchmark runner option ([#263](https://github.com/sourcenetwork/defradb/issues/263)) + +### Documentation + +* Add data format changes documentation folder ([#89](https://github.com/sourcenetwork/defradb/issues/89)) +* Correcting typos ([#143](https://github.com/sourcenetwork/defradb/issues/143)) +* Update generated CLI docs ([#208](https://github.com/sourcenetwork/defradb/issues/208)) +* Updated readme with P2P section ([#220](https://github.com/sourcenetwork/defradb/issues/220)) +* Update old or missing license headers ([#205](https://github.com/sourcenetwork/defradb/issues/205)) +* Update git-chglog config and template ([#195](https://github.com/sourcenetwork/defradb/issues/195)) + +### Refactoring + +* Introduction of logging system ([#67](https://github.com/sourcenetwork/defradb/issues/67)) +* Restructure db/txn/multistore structures ([#199](https://github.com/sourcenetwork/defradb/issues/199)) +* Initialize database in constructor ([#211](https://github.com/sourcenetwork/defradb/issues/211)) +* Purge all println and ban it ([#253](https://github.com/sourcenetwork/defradb/issues/253)) + +### Testing + +* Detect and force breaking filesystem changes to be documented ([#89](https://github.com/sourcenetwork/defradb/issues/89)) +* Boost collection test coverage ([#183](https://github.com/sourcenetwork/defradb/issues/183)) + +### Continuous integration + +* Combine the Lint and Benchmark workflows so that the benchmark job depends on the lint job in one workflow ([#209](https://github.com/sourcenetwork/defradb/issues/209)) +* Add rule to only run benchmark if other check are successful ([#194](https://github.com/sourcenetwork/defradb/issues/194)) +* Increase linter timeout ([#230](https://github.com/sourcenetwork/defradb/issues/230)) + +### Chore + +* Remove commented out code ([#238](https://github.com/sourcenetwork/defradb/issues/238)) +* Remove dead code from multi node ([#186](https://github.com/sourcenetwork/defradb/issues/186)) + + + +## [v0.2.0](https://github.com/sourcenetwork/defradb/compare/v0.1.0...v0.2.0) + +> 2022-02-07 + +DefraDB v0.2 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + +This release is jam-packed with new features and a small number of breaking changes. Read the full changelog for a detailed description. Most notable features include a new Peer-to-Peer (P2P) data synchronization system, an expanded query system to support GroupBy & Aggregate operations, and lastly TimeTraveling queries allowing to query previous states of a document. + +Much more than just that has been added to ensure we're building reliable software expected of any database, such as expanded test & benchmark suites, automated bug detection, performance gains, and more. + +This release does include a Breaking Change to existing v0.1 databases regarding the internal data model, which affects the "Content Identifiers" we use to generate DocKeys and VersionIDs. If you need help migrating an existing deployment, reach out at hello@source.network or join our Discord at https://discord.gg/w7jYQVJ. + +### Features + +* Added Peer-to-Peer networking data synchronization ([#177](https://github.com/sourcenetwork/defradb/issues/177)) +* TimeTraveling (History Traversing) query engine and doc fetcher ([#59](https://github.com/sourcenetwork/defradb/issues/59)) +* Add Document Deletion with a Key ([#150](https://github.com/sourcenetwork/defradb/issues/150)) +* Add support for sum aggregate ([#121](https://github.com/sourcenetwork/defradb/issues/121)) +* Add support for lwwr scalar arrays (full replace on update) ([#115](https://github.com/sourcenetwork/defradb/issues/115)) +* Add count aggregate support ([#102](https://github.com/sourcenetwork/defradb/issues/102)) +* Add support for named relationships ([#108](https://github.com/sourcenetwork/defradb/issues/108)) +* Add multi doc key lookup support ([#76](https://github.com/sourcenetwork/defradb/issues/76)) +* Add basic group by functionality ([#43](https://github.com/sourcenetwork/defradb/issues/43)) +* Update datastore packages to allow use of context ([#48](https://github.com/sourcenetwork/defradb/issues/48)) + +### Bug fixes + +* Only add join if aggregating child object collection ([#188](https://github.com/sourcenetwork/defradb/issues/188)) +* Handle errors generated during input object thunks ([#123](https://github.com/sourcenetwork/defradb/issues/123)) +* Remove new types from in-memory cache on generate error ([#122](https://github.com/sourcenetwork/defradb/issues/122)) +* Support relationships where both fields have the same name ([#109](https://github.com/sourcenetwork/defradb/issues/109)) +* Handle errors generated in fields thunk ([#66](https://github.com/sourcenetwork/defradb/issues/66)) +* Ensure OperationDefinition case has at least one selection([#24](https://github.com/sourcenetwork/defradb/pull/24)) +* Close datastore iterator on scan close ([#56](https://github.com/sourcenetwork/defradb/pull/56)) (resulted in a panic when using limit) +* Close superseded iterators before orphaning ([#56](https://github.com/sourcenetwork/defradb/pull/56)) (fixes a panic in the join code) +* Move discard to after error check ([#88](https://github.com/sourcenetwork/defradb/pull/88)) (did result in panic if transaction creation fails) +* Check for nil iterator before closing document fetcher ([#108](https://github.com/sourcenetwork/defradb/pull/108)) + +### Tooling +* Added benchmark suite ([#160](https://github.com/sourcenetwork/defradb/issues/160)) + +### Documentation + +* Correcting comment typos ([#142](https://github.com/sourcenetwork/defradb/issues/142)) +* Correcting README typos ([#140](https://github.com/sourcenetwork/defradb/issues/140)) + +### Testing + +* Add transaction integration tests ([#175](https://github.com/sourcenetwork/defradb/issues/175)) +* Allow running of tests using badger-file as well as IM options ([#128](https://github.com/sourcenetwork/defradb/issues/128)) +* Add test datastore selection support ([#88](https://github.com/sourcenetwork/defradb/issues/88)) + +### Refactoring + +* Datatype modification protection ([#138](https://github.com/sourcenetwork/defradb/issues/138)) +* Cleanup Linter Complaints and Setup Makefile ([#63](https://github.com/sourcenetwork/defradb/issues/63)) +* Rework document rendering to avoid data duplication and mutation ([#68](https://github.com/sourcenetwork/defradb/issues/68)) +* Remove dependency on concrete datastore implementations from db package ([#51](https://github.com/sourcenetwork/defradb/issues/51)) +* Remove all `errors.Wrap` and update them with `fmt.Errorf`. ([#41](https://github.com/sourcenetwork/defradb/issues/41)) +* Restructure integration tests to provide better visibility ([#15](https://github.com/sourcenetwork/defradb/pull/15)) +* Remove schemaless code branches ([#23](https://github.com/sourcenetwork/defradb/pull/23)) + +### Performance +* Add badger multi scan support ([#85](https://github.com/sourcenetwork/defradb/pull/85)) +* Add support for range spans ([#86](https://github.com/sourcenetwork/defradb/pull/86)) + +### Continous integration + +* Use more accurate test coverage. ([#134](https://github.com/sourcenetwork/defradb/issues/134)) +* Disable Codecov's Patch Check +* Make codcov less strict for now to unblock development ([#125](https://github.com/sourcenetwork/defradb/issues/125)) +* Add codecov config file. ([#118](https://github.com/sourcenetwork/defradb/issues/118)) +* Add workflow that runs a job on AWS EC2 instance. ([#110](https://github.com/sourcenetwork/defradb/issues/110)) +* Add Code Test Coverage with CodeCov ([#116](https://github.com/sourcenetwork/defradb/issues/116)) +* Integrate GitHub Action for golangci-lint Annotations ([#106](https://github.com/sourcenetwork/defradb/issues/106)) +* Add Linter Check to CircleCi ([#92](https://github.com/sourcenetwork/defradb/issues/92)) + +### Chore + +* Remove the S1038 rule of the gosimple linter. ([#129](https://github.com/sourcenetwork/defradb/issues/129)) +* Update to badger v3, and use badger as default in memory store ([#56](https://github.com/sourcenetwork/defradb/issues/56)) +* Make Cid versions consistent ([#57](https://github.com/sourcenetwork/defradb/issues/57)) + + + +## v0.1.0 + +> 2021-03-15 + diff --git a/CHANGELOG.md b/CHANGELOG.md index 7345a58cc8..48671840d5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,89 @@ + +## [v0.12.0](https://github.com/sourcenetwork/defradb/compare/v0.11.0...v0.12.0) + +> 2024-06-28 + +DefraDB v0.12 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + +To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.11.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.gg/w7jYQVJ/. + +### Features + +* Ability to generate a new identity ([#2760](https://github.com/sourcenetwork/defradb/issues/2760)) +* Add async transaction callbacks ([#2708](https://github.com/sourcenetwork/defradb/issues/2708)) +* Allow lens runtime selection via config ([#2684](https://github.com/sourcenetwork/defradb/issues/2684)) +* Sec. indexes on relations ([#2670](https://github.com/sourcenetwork/defradb/issues/2670)) +* Add authentication for ACP ([#2649](https://github.com/sourcenetwork/defradb/issues/2649)) +* Inject ACP instance into the DB instance ([#2633](https://github.com/sourcenetwork/defradb/issues/2633)) +* Keyring ([#2557](https://github.com/sourcenetwork/defradb/issues/2557)) +* Enable sec. indexes with ACP ([#2602](https://github.com/sourcenetwork/defradb/issues/2602)) + +### Fixes + +* Race condition when testing CLI ([#2713](https://github.com/sourcenetwork/defradb/issues/2713)) +* Remove shared mutable state between database instances ([#2777](https://github.com/sourcenetwork/defradb/issues/2777)) +* Change new identity keys to hex format ([#2773](https://github.com/sourcenetwork/defradb/issues/2773)) +* Return slice of correct length from db.AddSchema ([#2765](https://github.com/sourcenetwork/defradb/issues/2765)) +* Use node representation for Block ([#2746](https://github.com/sourcenetwork/defradb/issues/2746)) +* Add version check in basicTxn.Query ([#2742](https://github.com/sourcenetwork/defradb/issues/2742)) +* Merge retry logic ([#2719](https://github.com/sourcenetwork/defradb/issues/2719)) +* Resolve incorrect merge conflict ([#2723](https://github.com/sourcenetwork/defradb/issues/2723)) +* Keyring output ([#2784](https://github.com/sourcenetwork/defradb/issues/2784)) +* Incorporate schema root into docID ([#2701](https://github.com/sourcenetwork/defradb/issues/2701)) +* Make node options composable ([#2648](https://github.com/sourcenetwork/defradb/issues/2648)) +* Remove limit for fetching secondary docs ([#2594](https://github.com/sourcenetwork/defradb/issues/2594)) + +### Documentation + +* Remove reference to client ping from readme ([#2793](https://github.com/sourcenetwork/defradb/issues/2793)) +* Add http/openapi documentation & ci workflow ([#2678](https://github.com/sourcenetwork/defradb/issues/2678)) +* Streamline cli documentation ([#2646](https://github.com/sourcenetwork/defradb/issues/2646)) +* Document Event Update struct ([#2598](https://github.com/sourcenetwork/defradb/issues/2598)) + +### Refactoring + +* Use events to test network logic ([#2700](https://github.com/sourcenetwork/defradb/issues/2700)) +* Change local_acp implementation to use acp_core ([#2691](https://github.com/sourcenetwork/defradb/issues/2691)) +* Rework definition validation ([#2720](https://github.com/sourcenetwork/defradb/issues/2720)) +* Extract definition stuff from collection.go ([#2706](https://github.com/sourcenetwork/defradb/issues/2706)) +* Change counters to support encryption ([#2698](https://github.com/sourcenetwork/defradb/issues/2698)) +* DAG sync and move merge outside of net package ([#2658](https://github.com/sourcenetwork/defradb/issues/2658)) +* Replace subscription events publisher ([#2686](https://github.com/sourcenetwork/defradb/issues/2686)) +* Extract Defra specific logic from ACPLocal type ([#2656](https://github.com/sourcenetwork/defradb/issues/2656)) +* Change from protobuf to cbor for IPLD ([#2604](https://github.com/sourcenetwork/defradb/issues/2604)) +* Reorganize global CLI flags ([#2615](https://github.com/sourcenetwork/defradb/issues/2615)) +* Move internal packages to internal dir ([#2599](https://github.com/sourcenetwork/defradb/issues/2599)) + +### Testing + +* Remove duplicate test ([#2787](https://github.com/sourcenetwork/defradb/issues/2787)) +* Support asserting on doc index in test results ([#2786](https://github.com/sourcenetwork/defradb/issues/2786)) +* Allow test harness to execute benchmarks ([#2740](https://github.com/sourcenetwork/defradb/issues/2740)) +* Add relation substitute mechanic to tests ([#2682](https://github.com/sourcenetwork/defradb/issues/2682)) +* Test node pkg constructor via integration test suite ([#2641](https://github.com/sourcenetwork/defradb/issues/2641)) + +### Continuous integration + +* Cache dependencies to speed up test runs ([#2732](https://github.com/sourcenetwork/defradb/issues/2732)) + +### Bot + +* Update dependencies (bulk dependabot PRs) 24-06-2024 ([#2761](https://github.com/sourcenetwork/defradb/issues/2761)) +* Bump [@typescript](https://github.com/typescript)-eslint/parser from 7.13.0 to 7.13.1 in /playground ([#2733](https://github.com/sourcenetwork/defradb/issues/2733)) +* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 7.13.0 to 7.13.1 in /playground ([#2734](https://github.com/sourcenetwork/defradb/issues/2734)) +* Update dependencies (bulk dependabot PRs) 06-17-2024 ([#2730](https://github.com/sourcenetwork/defradb/issues/2730)) +* Bump braces from 3.0.2 to 3.0.3 in /playground ([#2716](https://github.com/sourcenetwork/defradb/issues/2716)) +* Update dependencies (bulk dependabot PRs) 06-10-2024 ([#2705](https://github.com/sourcenetwork/defradb/issues/2705)) +* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 7.11.0 to 7.12.0 in /playground ([#2675](https://github.com/sourcenetwork/defradb/issues/2675)) +* Bump [@typescript](https://github.com/typescript)-eslint/parser from 7.11.0 to 7.12.0 in /playground ([#2676](https://github.com/sourcenetwork/defradb/issues/2676)) +* Update dependencies (bulk dependabot PRs) 03-06-2024 ([#2674](https://github.com/sourcenetwork/defradb/issues/2674)) +* Update dependencies (bulk dependabot PRs) 01-06-2024 ([#2660](https://github.com/sourcenetwork/defradb/issues/2660)) +* Bump [@typescript](https://github.com/typescript)-eslint/parser from 7.9.0 to 7.10.0 in /playground ([#2635](https://github.com/sourcenetwork/defradb/issues/2635)) +* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 7.9.0 to 7.10.0 in /playground ([#2637](https://github.com/sourcenetwork/defradb/issues/2637)) +* Bump swagger-ui-react from 5.17.10 to 5.17.12 in /playground ([#2636](https://github.com/sourcenetwork/defradb/issues/2636)) +* Bump google.golang.org/protobuf from 1.33.0 to 1.34.1 ([#2607](https://github.com/sourcenetwork/defradb/issues/2607)) +* Update dependencies (bulk dependabot PRs) 05-20-2024 ([#2631](https://github.com/sourcenetwork/defradb/issues/2631)) +* Update dependencies (bulk dependabot PRs) 05-14-2024 ([#2617](https://github.com/sourcenetwork/defradb/issues/2617)) ## [v0.11.0](https://github.com/sourcenetwork/defradb/compare/v0.10.0...v0.11.0) diff --git a/licenses/BSL.txt b/licenses/BSL.txt index 64d1d657d6..62ce0655d2 100644 --- a/licenses/BSL.txt +++ b/licenses/BSL.txt @@ -28,7 +28,7 @@ Additional Use Grant: You may only use the Licensed Work for the -Change Date: 2028-05-03 +Change Date: 2028-06-28 Change License: Apache License, Version 2.0