From 441252a69004c5b5af4d597baafb6d9d3c605882 Mon Sep 17 00:00:00 2001 From: "m.kindritskiy" Date: Mon, 16 Sep 2024 19:03:52 +0300 Subject: [PATCH] fix docs all over the place - fix code inclusions - fix syntax for some codeblocks --- docs/basics.rst | 8 +- docs/basics/test_stage1.py | 2 +- docs/caching.rst | 10 +- docs/changelog/index.rst | 1 + docs/endpoints/async_example.py | 26 ++++ docs/endpoints/sync_example.py | 26 ++++ docs/enums.rst | 104 ++++++++-------- docs/extensions.rst | 209 ++++++++++++++++++++++++++++++-- docs/federation.rst | 20 +-- docs/graphql.rst | 192 +++++++++++++++++++++-------- docs/index.rst | 1 + docs/interfaces.rst | 24 ++-- docs/scalars.rst | 8 +- docs/schema.rst | 91 ++++++++++++++ docs/subgraph.rst | 2 +- docs/telemetry.rst | 8 +- docs/unions.rst | 20 +-- 17 files changed, 586 insertions(+), 166 deletions(-) create mode 100644 docs/endpoints/async_example.py create mode 100644 docs/endpoints/sync_example.py create mode 100644 docs/schema.rst diff --git a/docs/basics.rst b/docs/basics.rst index 12cb81f7..18fd086a 100644 --- a/docs/basics.rst +++ b/docs/basics.rst @@ -30,7 +30,7 @@ This is the simplest :py:class:`~hiku.graph.Graph` with one Then this field could be queried using this query: -.. code-block:: +.. code-block:: graphql { now } @@ -98,7 +98,7 @@ node. This function should return character ids. So now you are able to try this query in the console: -.. code-block:: +.. code-block:: graphql { characters { name species } } @@ -163,8 +163,8 @@ finite structure and result follows it's structure. :lines: 106-127 :dedent: 4 :linenos: - :emphasize-lines: 11,13,15 + :emphasize-lines: 9,11,13 -As you can see, there are duplicate entries in the result :sup:`[11,13,15]` -- +As you can see, there are duplicate entries in the result :sup:`[9,11,13]` -- this is how our cycle can be seen, the same character `Spock` seen multiple times. diff --git a/docs/basics/test_stage1.py b/docs/basics/test_stage1.py index 8074af93..75ebf255 100644 --- a/docs/basics/test_stage1.py +++ b/docs/basics/test_stage1.py @@ -35,7 +35,7 @@ def execute(graph, query_string): @patch('{}.datetime'.format(__name__)) def test(dt): dt.now = Mock(return_value=_NOW) - result = execute(GRAPH, '[:now]') + result = execute(GRAPH, '{ now }') assert result == {'now': '2015-10-21T07:28:00'} # console diff --git a/docs/caching.rst b/docs/caching.rst index 07df93f9..8af85f71 100644 --- a/docs/caching.rst +++ b/docs/caching.rst @@ -49,7 +49,7 @@ Consider this graph: So in next 3 queries `company` node will be cached separately: -.. code-block:: +.. code-block:: graphql query { product(id: 1) { @@ -63,7 +63,7 @@ So in next 3 queries `company` node will be cached separately: ``company`` here has ``name`` field, which is the difference between this query and previous one. -.. code-block:: +.. code-block:: graphql query { product(id: 1) { @@ -79,7 +79,7 @@ previous one. previous one. -.. code-block:: +.. code-block:: graphql query { product(id: 1) { @@ -103,7 +103,7 @@ When we fetching data from cache, we basically restoring parts of index from cac For example, if we have this query: -.. code-block:: +.. code-block:: graphql query { products { @@ -301,7 +301,7 @@ How to specify cache on client Use `@cached` directive on any non root node. -.. code-block:: +.. code-block:: graphql query Products { products { diff --git a/docs/changelog/index.rst b/docs/changelog/index.rst index da4b29a3..88a18992 100644 --- a/docs/changelog/index.rst +++ b/docs/changelog/index.rst @@ -4,6 +4,7 @@ Changelog .. toctree:: :maxdepth: 2 + changes_08 changes_07 changes_06 changes_05 diff --git a/docs/endpoints/async_example.py b/docs/endpoints/async_example.py new file mode 100644 index 00000000..a63fc876 --- /dev/null +++ b/docs/endpoints/async_example.py @@ -0,0 +1,26 @@ +from hiku.types import String +from hiku.graph import Graph, Root, Field +from hike.schema import Schema +from hiku.endpoint.graphql import AsyncGraphQLEndpoint +from hiku.executors.asyncio import AsyncIOExecutor + +async def say_hello(fields): + return ['Hello World!' for _ in fields] + +QUERY_GRAPH = Graph([ + Root([Field('hello', String, say_hello)]), +]) + +schema = Schema(AsyncIOExecutor(), QUERY_GRAPH) + +endpoint = AsyncGraphQLEndpoint(schema) + +assert await endpoint.dispatch({ + 'query': "{ hello }", + 'variables': None, + 'operationName': "GetHello", +}) == { + 'data': { + 'hello': 'Hello World!', + }, +} diff --git a/docs/endpoints/sync_example.py b/docs/endpoints/sync_example.py new file mode 100644 index 00000000..252492fa --- /dev/null +++ b/docs/endpoints/sync_example.py @@ -0,0 +1,26 @@ +from hiku.graph import Graph, Root, Field +from hiku.types import String +from hiku.schema import Schema +from hiku.executors.sync import SyncExecutor +from hiku.endpoint.graphql import GraphQLEndpoint + +def say_hello(fields): + return ['Hello World!' for _ in fields] + +QUERY_GRAPH = Graph([ + Root([Field('hello', String, say_hello)]), +]) + +schema = Schema(SyncExecutor(), QUERY_GRAPH) + +endpoint = GraphQLEndpoint(schema) + +assert endpoint.dispatch({ + 'query': "{ hello }", + 'variables': None, + 'operationName': "GetHello", +}) == { + 'data': { + 'hello': 'Hello World!', + }, +} diff --git a/docs/enums.rst b/docs/enums.rst index f50087a8..d9f84b10 100644 --- a/docs/enums.rst +++ b/docs/enums.rst @@ -5,16 +5,16 @@ Enums Enums are a special types that can be used to define a set of possible values for a field. -In graphql you can use enum type like this: +In graphql you can define enum type and use it like this: -.. code-block:: +.. code-block:: graphql enum Status { ACTIVE DELETED } - type Usr { + type User { id: ID! status: Status! } @@ -24,73 +24,77 @@ In graphql you can use enum type like this: } -Enum from string ----------------- +Define enum +----------- -The simplest way to create enum in `hiku` from a list of strings: +.. tab:: Enum from string -.. code-block:: python + The simplest way to create enum in `hiku` from a list of strings: - from hiku.enum import Enum + .. code-block:: python - Enum('Status', ['ACTIVE', 'DELETED']) + from hiku.enum import Enum -Enum from python Enum ---------------------- + Enum('Status', ['ACTIVE', 'DELETED']) -You can also create enum from builtin python ``Enum`` type: +.. tab:: Enum from python Enum -.. code-block:: python + You can also create enum from builtin python ``Enum`` type: - from enum import Enum as PyEnum - from hiku.enum import Enum + .. code-block:: python - class Status(PyEnum): - ACTIVE = 'active' - DELETED = 'deleted' + from enum import Enum as PyEnum + from hiku.enum import Enum - Enum.from_builtin(Status) + class Status(PyEnum): + ACTIVE = 'active' + DELETED = 'deleted' -In this example: + Enum.from_builtin(Status) -- ``EnumFromBuiltin`` will use ``Enum.__name__`` as a enum name. -- ``EnumFromBuiltin`` will use ``Enum.__members__`` to get a list of possible values. -- ``EnumFromBuiltin`` will use ``member.name`` to get a value name: + In this example: -So this python enum: + - ``EnumFromBuiltin`` will use ``Enum.__name__`` as a enum name. + - ``EnumFromBuiltin`` will use ``Enum.__members__`` to get a list of possible values. + - ``EnumFromBuiltin`` will use ``member.name`` to get a value name: -.. code-block:: python + So this python enum: - class Status(PyEnum): - ACTIVE = 1 - DELETED = 2 + .. code-block:: python -is equivalent to this enum in graphql: + class Status(PyEnum): + ACTIVE = 1 + DELETED = 2 -.. code-block:: python + is equivalent to this enum in graphql: - enum Status { ACTIVE, DELETED } + .. code-block:: python -If you want to specify different name you can pass ``name`` argument to ``Enum.from_builtin`` method: + enum Status { ACTIVE, DELETED } -.. code-block:: python + If you want to specify different name you can pass ``name`` argument to ``Enum.from_builtin`` method: - Enum.from_builtin(Status, name='User_Status') + .. code-block:: python -.. note:: + Enum.from_builtin(Status, name='User_Status') - If you use builtin python ``Enum``, then you MUST return enum value from the resolver function, otherwise ``hiku`` will raise an error. + .. note:: - .. code-block:: python + If you use builtin python ``Enum``, then you MUST return enum value from the resolver function, otherwise ``hiku`` will raise an error. - def user_fields_resolver(fields, ids): - def get_field(field, user): - if field.name == 'id': - return user.id - elif field.name == 'status': - return Status(user.status) + .. code-block:: python - return [[get_field(field, users[id]) for field in fields] for id in ids] + def user_fields_resolver(fields, ids): + def get_field(field, user): + if field.name == 'id': + return user.id + elif field.name == 'status': + return Status(user.status) + + return [[get_field(field, users[id]) for field in fields] for id in ids] + +Use enum +-------- Lets look at the full example on how to use enum type in `hiku`: @@ -138,7 +142,7 @@ Lets decode the example above: If we run this query: -.. code-block:: python +.. code-block:: graphql query { user { @@ -149,11 +153,11 @@ If we run this query: We will get the following result: -.. code-block:: +.. code-block:: json { - 'id': "1", - 'status': 'ACTIVE', + "id": "1", + "status": "ACTIVE", } @@ -247,7 +251,7 @@ You can use enum as an field input argument: Now you can use enum as a field argument: -.. code-block:: +.. code-block:: graphql query { users(status: DELETED) { @@ -258,7 +262,7 @@ Now you can use enum as a field argument: The result will be: -.. code-block:: +.. code-block:: json [{ "id": "2", diff --git a/docs/extensions.rst b/docs/extensions.rst index 2cfc3d4c..e1abd58b 100644 --- a/docs/extensions.rst +++ b/docs/extensions.rst @@ -18,19 +18,23 @@ Here are all the methods that can be implemented: - :meth:`~hiku.extensions.Extension.on_validate` - when query is validated - :meth:`~hiku.extensions.Extension.on_execute` - when query is executed by engine -Built-in extensions -~~~~~~~~~~~~~~~~~~~ +Custom extension +---------------- -- ``QueryParseCache`` - cache parsed graphql queries ast. -- ``QueryValidationCache`` - cache query validation. -- ``QueryDepthValidator`` - validate query depth -- ``PrometheusMetrics`` - wrapper around ``GraphMetrics`` visitor -- ``PrometheusMetricsAsync`` - wrapper around ``AsyncGraphMetrics`` visitor -- ``CustomContext`` - allows to pass custom context to the query execution +To write your own extension you need to inherit from :class:`hiku.extensions.Extension` +and implement methods that you need. +.. note:: -Writing extension -~~~~~~~~~~~~~~~~~ + **Extension** class accepts **execution_context** argument, but it is optional. + + You can pass either instance or a class of the Extension to the schema's `extensions` argument. + + At runtime, if extension is a class, it will be instantiated with the **execution_context** argument. + If extension is an instance, **execution_context** argument will be set. + + +Here is an example of custom extension that measures query execution time: .. code-block:: python @@ -45,4 +49,187 @@ Writing extension yield print('Query execution took {:.3f} seconds'.format(time.perf_counter() - start)) - endpoint = GraphqlEndpoint(engine, graph, extensions=[TimeItExtension()]) \ No newline at end of file + schema = Schema(graph, extensions=[TimeItExtension()]) + +In this example we use :class:`hiku.extensions.Extension` as a base class, and implement +:meth:`~hiku.extensions.Extension.on_execute` method. + +This method is called when query is executed by the engine. So we measure the time +before and after the execution and print the result. + +Built-in extensions +------------------- + +QueryParseCache +~~~~~~~~~~~~~~~ + +Hiku uses ``graphql-core`` library to parse queries. But parsing same query again and again is a waste of resources and time. + +Hiku provides a way to cache parsed queries. To enable it, you need to use ``QueryParseCache`` extensions. + +.. code-block:: python + + schema = Schema( + graph, + extensions=[QueryParserCache(maxsize=50)], + ) + +Note than for cache to be effective, you need to separate query and variables, otherwise +cache will be useless. + +Query with inlined variables is bad for caching. + +.. code-block:: graphql + + query User { + user(id: 1) { + name + photo(size: 50) + } + } + +Query with separated variables is good for caching. + +.. code-block:: graphql + + query User($id: ID!, $photoSize: Int) { + user(id: $id) { + name + photo(size: $photoSize) + } + } + +**QueryParseCache** exposes metrics for query parsing time: + +.. code-block:: python + + Gauge('hiku_query_cache_hits', 'Query cache hits') + Gauge('hiku_query_cache_misses', 'Query cache misses') + +QueryTransformCache +~~~~~~~~~~~~~~~~~~~ + +Just like ``QueryParseCache``, ``QueryTransformCache`` caches the result of transformation from graphql ast into query :py:class:`hiku.query.Node`. + +QueryValidationCache +~~~~~~~~~~~~~~~~~~~~ + +``QueryValidationCache`` caches query validation result. + +QueryDepthValidator +~~~~~~~~~~~~~~~~~~~ + +``QueryDepthValidator`` validates query depth. If query depth is greater than ``max_depth`` argument, it returns error +which says that query depth is too big. + +.. code-block:: python + + schema = Schema( + graph, + extensions=[QueryDepthValidator(max_depth=10)], + ) + +PrometheusMetrics +~~~~~~~~~~~~~~~~~ + +``PrometheusMetrics`` is a wrapper around ``GraphMetrics`` visitor. It exposes metrics for query execution time. + +.. code-block:: python + + from hiku.extensions.prometheus import PrometheusMetrics + schema = Schema( + graph, + extensions=[PrometheusMetrics('user_graph')], + ) + +Custom metric +""""""""""""" + +By default, ``PrometheusMetrics`` uses built-in metric ``graph_field_time``: + +.. code-block:: python + + Summary("graph_field_time", "Graph field time (seconds)", ["graph", "node", "field"]) + +But you can pass your custom metric to ``PrometheusMetrics`` by using ``metric`` argument: + +.. code-block:: python + + from prometheus_client import Gauge + from hiku.extensions.prometheus import PrometheusMetrics + + metric = Histogram("my_custom_metric", "Graph field time (seconds)", ["graph", "node", "field"]) + + schema = Schema( + graph, + extensions=[PrometheusMetrics('user_graph', metric=metric)], + ) + +Custom labels +""""""""""""" + +``PrometheusMetrics`` has ``ctx_var`` argument, which allows to pass custom ``ContextVar`` variable, +which will be set to **execution_context.context**. This can be used for example to use this context to expose different lables: + +Here we adding new label ``os`` to the metric, and we want to use the ``os`` value from context: + +.. code-block:: python + + from prometheus_client import Gauge + from contextvars import ContextVar + from hiku.extensions.prometheus import PrometheusMetrics + + metric = Histogram("my_custom_metric", "Graph field time (seconds)", ["graph", "node", "field", "os"]) + metrics_ctx = ContextVar('os') + + class CustomGraphqMetrics(GraphMetrics): + def get_labels( + self, graph_name: str, node_name: str, field_name: str, ctx: dict + ) -> list: + return [graph_name, node_name, field_name, ctx.get('os', 'unknown')] + + schema = Schema( + graph, + extensions=[ + PrometheusMetrics( + 'user_graph', + metric=metric, + ctx_var=metrics_ctx, + transformer_cls=CustomGraphqMetrics + ) + ], + ) + + endpoint = GraphQLEndpoint(schema) + + @app.post('/graphql') + def graphql(request: Request): + os = get_os(request) + return endpoint.dispatch(request.body, context={'os': os}) + +PrometheusMetricsAsync +~~~~~~~~~~~~~~~~~~~~~~ + +``PrometheusMetricsAsync`` is a wrapper around ``AsyncGraphMetrics`` visitor. It exposes metrics for query execution time. + +CustomContext +~~~~~~~~~~~~~ + +``CustomContext`` allows to define custom context for query execution. + +If you do now want to pass context to `dispatch` method on every query, you can use :py:class:`hiku.extensions.context.CustomContext` extension, +which accepts a callback function, which will be called on every query execution and should return a context object: + +.. code-block:: python + + db = Database() + + def get_context(execution_context: ExecutionContext) -> dict: + return {'db': db} + + schema = Schema( + graph, + extensions=[CustomContext(get_context)] + ) + + result = schema.execute_sync(query) diff --git a/docs/federation.rst b/docs/federation.rst index 3f38510c..f28caa21 100644 --- a/docs/federation.rst +++ b/docs/federation.rst @@ -1,8 +1,8 @@ -Federation -========== +Apollo Federation +================= -What is Federation ------------------- +What is Apollo Federation +------------------------- Apollo Federation is a set of open-source tools that allow you to compose multiple GraphQL services into a single data graph. @@ -39,7 +39,7 @@ Let's start with a simple example of a federated subgraph using the following Gr Order Service -.. code-block:: +.. code-block:: graphql type Order @key(fields: "id") { id: ID! @@ -53,7 +53,7 @@ Order Service Shopping Cart Service -.. code-block:: +.. code-block:: graphql type ShoppingCart @key(fields: "id") { id: ID! @@ -109,7 +109,7 @@ Now let's implement the Order service using Hiku: def handle_graphql(): data = request.get_json() result = schema.execute_sync(data) - resp = jsonify(result) + resp = jsonify({"data": result.data}) return resp if __name__ == '__main__': @@ -166,7 +166,7 @@ Next, let's implement the ShoppingCart service using Hiku: def handle_graphql(): data = request.get_json() result = schema.execute_sync(data) - resp = jsonify(result) + resp = jsonify({"data": result.data}) return resp if __name__ == '__main__': @@ -216,7 +216,7 @@ With the composed schema, we can now start the router: With the router running, visit http://localhost:4000 and try running the following query: -.. code-block:: +.. code-block:: graphql { order(id: 1) { @@ -231,4 +231,4 @@ With the router running, visit http://localhost:4000 and try running the followi } } } - } \ No newline at end of file + } diff --git a/docs/graphql.rst b/docs/graphql.rst index e7279969..691485d8 100644 --- a/docs/graphql.rst +++ b/docs/graphql.rst @@ -4,6 +4,13 @@ Using GraphQL .. note:: Hiku is a general-purpose library to expose data as a graph of linked nodes. And it is possible to implement GraphQL server using Hiku. +In order to parse GraphQL queries you will need to install ``graphql-core`` +library: + +.. code-block:: shell + + $ pip install graphql-core + To implement GraphQL server we will have to add GraphQL introspection into our graph and to add GraphQL query execution process: @@ -13,8 +20,22 @@ graph and to add GraphQL query execution process: - denormalize result into simple data structure - serialize result and send back to the client -Graph Definition -~~~~~~~~~~~~~~~~ +Query +~~~~~ + +To create ``Query`` graph: + +.. code-block:: python + + query_graph = Graph([ + Root([ + Field('value', String, value_func), + ]), + ]) + + +Mutation +~~~~~~~~ GraphQL schema may have several root object types for each operation type: query, mutation, subscription... Hiku has only one :py:class:`~hiku.graph.Root` @@ -37,6 +58,122 @@ identical to the query graph, except :py:class:`~hiku.graph.Root` node: ]) +Schema +~~~~~~ + +In order to expose Hiku graph as GraphQL schema, you will need to create a ``Schema`` object: + +.. code-block:: python + + from hiku.graph import Graph, Root, Field + from hiku.types import String + from hiku.executors.sync import SyncExecutor + + def value_func(*_): + return 'Hello, World!' + + graph = Graph([ + Root([ + Field('value', String, value_func), + ]), + ]) + + schema = Schema(SyncExecutor(), graph) + +To learn more about schema api, see :ref:`Schema docs `. + +Endpoint +~~~~~~~~ + +Hiku has a so-called GraphQL endpoint, which can be used to expose Hiku graph as http endpoint. + +Here is an example of how to use it: + +.. tab:: GraphQLEndpoint + + .. literalinclude:: endpoints/sync_example.py + :lines: 1-26 + +.. tab:: AsyncGraphQLEndpoint + + .. literalinclude:: endpoints/async_example.py + :lines: 1-26 + +Sync endpoint +""""""""""""" + +.. automodule:: hiku.endpoint.graphql + :members: GraphQLEndpoint + :noindex: + +Async endpoint +""""""""""""""" + +.. automodule:: hiku.endpoint.graphql + :members: AsyncGraphQLEndpoint + :noindex: + +Introspection +""""""""""""" + +By default GraphQL introspection is enabled, but you can disable it by setting ``introspection`` argument to ``False``: + +.. code-block:: python + + endpoint = GraphQLEndpoint(schema, introspection=False) + +Validation +"""""""""" + +By default GraphQL validation is enabled, but you can disable it by setting ``validation`` argument to ``False``: + +.. code-block:: python + + endpoint = GraphQLEndpoint(schema, validation=False) + +Context +""""""" + +GraphQL endpoint has a **context** as a second argument, which is passed to the query execution process. + +.. code-block:: python + + db = Database() + endpoint = GraphQLEndpoint(schema) + result = endpoint.dispatch(query, context={'db': db}) + +If you do now want to pass context to ``dispatch`` method on every query, you can use :py:class:`hiku.extensions.context.CustomContext` extension, +which accepts a callback function, which will be called on every query execution and should return a context object: + +.. code-block:: python + + db = Database() + + def get_context(execution_context: ExecutionContext) -> dict: + return {'db': db} + + schema = Schema( + graph, + extensions=[CustomContext(get_context)] + ) + + endpoint = GraphQLEndpoint(schema) + result = endpoint.dispatch(query) + +Batching +"""""""" + +GraphQL endpoint has a **batching** option, which is disabled by default. When enabled, it will collect all queries +and execute them in one batch: + +.. code-block:: python + + endpoint = GraphQLEndpoint(schema, batching=True) + + assert endpoint.dispatch({ + "query": ["{ a }", "{ b }"], + }) == {"data": ["a", "b"]} + Introspection ~~~~~~~~~~~~~ @@ -74,13 +211,6 @@ option definitions, as long as they don't have references to nodes. Reading ~~~~~~~ -In order to parse GraphQL queries you will need to install ``graphql-core`` -library: - -.. code-block:: shell - - $ pip install graphql-core - There are two options: - :py:func:`~hiku.readers.graphql.read` simple queries, when only query @@ -142,47 +272,3 @@ execution result into JSON, it should be denormalized, to replace references .. _graphql-core: https://github.com/graphql-python/graphql-core - -Query parsing cache -~~~~~~~~~~~~~~~~~~~ - -Hiku uses ``graphql-core`` library to parse queries. But parsing same query again and again is a waste of resources and time. - -Hiku provides a way to cache parsed queries. To enable it, you need to use ``QueryParseCache`` extensions. - -.. code-block:: python - - endpoint = GraphQLEndpoint( - Engine(SyncExecutor()), sync_graph, - extensions=[QueryParserCache(maxsize=50)], - ) - -Note than for cache to be effective, you need to separate query and variables, otherwise -cache will be useless. - -Query with inlined variables is bad for caching. - -.. code-block:: python - - query User { - user(id: 1) { - name - photo(size: 50) - } - } - -Query with separated variables is good for caching. - -.. code-block:: - - query User($id: ID!, $photoSize: Int) { - user(id: $id) { - name - photo(size: $photoSize) - } - } - - { - "id": 1, - "photoSize": 50 - } diff --git a/docs/index.rst b/docs/index.rst index 049129c0..9b9737f3 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -11,6 +11,7 @@ User's Guide subgraph asyncio graphql + schema scalars enums interfaces diff --git a/docs/interfaces.rst b/docs/interfaces.rst index df8c41dd..e45a76b2 100644 --- a/docs/interfaces.rst +++ b/docs/interfaces.rst @@ -9,7 +9,7 @@ Interfaces are useful when you want to define a common set of fields. In graphql you can use interfaces types like this: -.. code-block:: +.. code-block:: graphql interface Media { id: ID! @@ -98,7 +98,7 @@ Lets decode the example above: If we run this query: -.. code-block:: python +.. code-block:: graphql query { search(text: "test") { @@ -117,20 +117,20 @@ If we run this query: As a result we will get a list of objects with ``__typename``, ``id`` and ``duration`` fields and fields that are specific to the type of the object. -.. code-block:: +.. code-block:: json [ { - '__typename': 'Audio', - 'id': 1, - 'duration': '1:20', - 'album': 'Cool album', + "__typename": "Audio", + "id": 1, + "duration": "1:20", + "album": "Cool album", }, { - '__typename': 'Video', - 'id': 2, - 'duration': '1:40', - 'thumbnailUrl': 'http://example.com/thumbnail.jpg', + "__typename": "Video", + "id": 2, + "duration": "1:40", + "thumbnailUrl": "http://example.com/thumbnail.jpg", }, ] @@ -139,4 +139,4 @@ Type narrowing Unlike other graphql implementations `hiku` supports type narrowing without ``__resolveType`` function. It is possible because `hiku` knows all possible types -at the link resolution time. \ No newline at end of file +at the link resolution time. diff --git a/docs/scalars.rst b/docs/scalars.rst index 2955561c..a6639091 100644 --- a/docs/scalars.rst +++ b/docs/scalars.rst @@ -134,7 +134,7 @@ Lets decode the example above: If we run this query: -.. code-block:: +.. code-block:: graphql query { user(olderThen: "2023-06-15") { @@ -145,10 +145,10 @@ If we run this query: We will get this result: -.. code-block:: +.. code-block:: json { - 'id': "1", - 'dateCreated': '2023-06-15', + "id": "1", + "dateCreated": "2023-06-15", } diff --git a/docs/schema.rst b/docs/schema.rst new file mode 100644 index 00000000..31c98f19 --- /dev/null +++ b/docs/schema.rst @@ -0,0 +1,91 @@ +Schema +====== + +.. _schema-doc: + +To create a schema you will need to define a graph and an choose an executor. + +.. code-block:: python + + from hiku.graph import Graph, Root, Field + from hiku.types import String + from hiku.executors.sync import SyncExecutor + + def value_func(*_): + return 'Hello, World!' + + graph = Graph([ + Root([ + Field('value', String, value_func), + ]), + ]) + + schema = Schema(SyncExecutor(), graph) + +Executing queries +----------------- + +In order to execute query, run: + +.. code-block:: python + + result = schema.execute_sync(query) + + +Or async execute: + +.. code-block:: python + + result = await schema.execute(query) + + +To pass variables to the query: + +.. code-block:: python + + result = await schema.execute(query, variables={'foo': 'bar'}) + +To run a query with a context: + +.. code-block:: python + + result = await schema.execute(query, context={"db": db}) + +Executor +-------- + +Schema accepts :py:class:`hiku.executors.base.BaseExecutor` instance as a first argument. + +Schema will be able execute queries based on what executor you choose. + +For example if you choose a :py:class:`hiku.executors.sync.SyncExecutor`, you will be able to execute queries synchronously: + +.. code-block:: python + + result = schema.execute_sync({'value': None}) + print(result) + +If you choose a :py:class:`hiku.executors.asyncio.AsyncIOExecutor`, you will be able to execute queries asynchronously: + +.. code-block:: python + + result = await schema.execute({'value': None}) + print(result) + +.. note:: + + Its not recommended to use sync executors with async execute method and vise versa unless you are know what you are doing. + + In other words do not use ``SyncExecutor`` with :py:meth:`hiku.schema.Schema.execute` + + or ``AsyncExecutor`` with :py:meth:`hiku.schema.Schema.execute_sync` + + +Extensions +---------- + +Schema accepts a list of extensions passed to ``extensions`` argument: + +.. code-block:: python + + schema = Schema(SyncExecutor(), graph, extensions=[CustomExtension()]) diff --git a/docs/subgraph.rst b/docs/subgraph.rst index 41b91b07..7d0281eb 100644 --- a/docs/subgraph.rst +++ b/docs/subgraph.rst @@ -55,7 +55,7 @@ So let's query some data, needed to show characters with their photos: What's wrong with this query? -.. code-block:: +.. code-block:: graphql { characters { name image { id name } } } diff --git a/docs/telemetry.rst b/docs/telemetry.rst index f8cb29f2..e9a9774c 100644 --- a/docs/telemetry.rst +++ b/docs/telemetry.rst @@ -40,12 +40,10 @@ Default metric: ) -Query cache metrics -~~~~~~~~~~~~~~~~~~~ +Query parse cache metrics +~~~~~~~~~~~~~~~~~~~~~~~~~ -It is possible to enable query cache. That means that the same query will be parsed only once. - -When query cache is enabled, the following metrics are exposed: +**QueryParseCache** exposes metrics for query parsing time: .. code-block:: python diff --git a/docs/unions.rst b/docs/unions.rst index dd023906..b3a1a164 100644 --- a/docs/unions.rst +++ b/docs/unions.rst @@ -9,7 +9,7 @@ Union types are special types used to represent a value that could be one of typ In graphql you can use union types like this: -.. code-block:: +.. code-block:: graphql type Audio { id: ID! @@ -80,7 +80,7 @@ Lets look at the example above: Now lets look at the query: -.. code-block:: python +.. code-block:: graphql query { search(text: "test") { @@ -99,18 +99,18 @@ Now lets look at the query: As a result of the query above you will get a list of objects with `__typename` and `id` fields and fields that are specific to the type of the object. -.. code-block:: +.. code-block:: json [ { - '__typename': 'Audio', - 'id': 1, - 'duration': 100, + "__typename": "Audio", + "id": 1, + "duration": 100, }, { - '__typename': 'Video', - 'id': 2, - 'thumbnailUrl': 'http://example.com/thumbnail.jpg', + "__typename": "Video", + "id": 2, + "thumbnailUrl": "http://example.com/thumbnail.jpg", }, ] @@ -119,4 +119,4 @@ Type narrowing Unlike other graphql implementations `hiku` supports type narrowing without ``__resolveType`` function. It is possible because `hiku` knows all possible types -at the link resolution time. \ No newline at end of file +at the link resolution time.