diff --git a/.flake8 b/.flake8 index b2424ca..1609f68 100644 --- a/.flake8 +++ b/.flake8 @@ -1,7 +1,7 @@ [flake8] max-line-length = 80 exclude = *_pb2.py,.tox,.git,env,docs,.venv,__pypackages__,tests -extend-ignore = E203 +extend-ignore = E203,E231 ignore = F811, W503, diff --git a/docs/changelog/changes_08.rst b/docs/changelog/changes_08.rst index a919fe9..fdd98b1 100644 --- a/docs/changelog/changes_08.rst +++ b/docs/changelog/changes_08.rst @@ -20,6 +20,11 @@ Changes in 0.8 - Change `GraphQLResponse` type - it now has both `data` and `errors` fields - Rename `on_dispatch` hook to `on_operation` - Remove old `on_operation` hook + - Remove `execute` method from `BaseGraphQLEndpoint` class + - Add `process_result` method to `BaseGraphQLEndpoint` class + - Move `GraphQLError` to `hiku.error` module + - Drop `GraphQLError.errors` field. Earlier we used to store multiple errors in single `GraphQLError` but now its one message - one `GraphQLError`. + - Add `GraphQLError.message` field Backward-incompatible changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -29,3 +34,7 @@ Backward-incompatible changes - Drop `hiku.federation.endpoint` - use `hiku.endpoint` instead - Drop `hiku.federation.denormalize` - Drop `hiku.federation.engine` - use `hiku.engine` instead + - Remove `execute` method from `BaseGraphQLEndpoint` class + - Move `GraphQLError` to `hiku.error` module + - Drop `GraphQLError.errors` field + - Add `GraphQLError.message` field diff --git a/hiku/builder.py b/hiku/builder.py index fac08cf..2ad8c97 100644 --- a/hiku/builder.py +++ b/hiku/builder.py @@ -15,7 +15,7 @@ def __init__( def __getattr__(self, name): assert self.__field_name__ is None, self.__field_name__ - return self.__class__(name) + return self.__class__(name, mutation=self.__mutation__) def __getitem__(self, items): assert self.__node_items__ is None, self.__node_items__ @@ -36,6 +36,7 @@ def __lshift__(self, other): other.__field_options__, self.__field_name__, # alias other.__node_items__, + other.__mutation__, ) def __call__(self, **options): @@ -43,6 +44,11 @@ def __call__(self, **options): self.__field_options__ = options return self + def __repr__(self) -> str: + return "Handle[{}]({})".format( + "Q" if not self.__mutation__ else "M", id(self) + ) + # Q for query Q = Handle() diff --git a/hiku/context.py b/hiku/context.py index da0706b..027a24f 100644 --- a/hiku/context.py +++ b/hiku/context.py @@ -111,10 +111,10 @@ def create_execution_context( elif isinstance(query, Node): query_node = query if "operation" not in kwargs: - kwargs["operation"] = Operation( - OperationType.QUERY, - query, - ) + op_type = OperationType.QUERY + if query.ordered: + op_type = OperationType.MUTATION + kwargs["operation"] = Operation(op_type, query) return ExecutionContext( query_src=query_src or "", diff --git a/hiku/endpoint/graphql.py b/hiku/endpoint/graphql.py index baba31b..dee2f37 100644 --- a/hiku/endpoint/graphql.py +++ b/hiku/endpoint/graphql.py @@ -4,8 +4,8 @@ from abc import ABC from asyncio import gather - -from hiku.schema import ExecutionResult, GraphQLError, Schema +from hiku.error import GraphQLError +from hiku.schema import ExecutionResult, Schema class GraphQLErrorObject(TypedDict): @@ -20,7 +20,7 @@ class GraphQLRequest(TypedDict, total=False): class GraphQLResponse(TypedDict, total=False): data: Optional[Dict[str, object]] - errors: Optional[List[object]] + errors: Optional[List[GraphQLErrorObject]] extensions: Optional[Dict[str, object]] @@ -47,8 +47,8 @@ def __init__( def process_result(self, result: ExecutionResult) -> GraphQLResponse: data: GraphQLResponse = {"data": result.data} - if result.error: - data["errors"] = [{"message": e} for e in result.error.errors] + if result.errors: + data["errors"] = [{"message": e.message} for e in result.errors] return data @@ -107,7 +107,7 @@ def dispatch( ) -> SingleOrBatchedResponse: if isinstance(data, list): if not self.batching: - raise GraphQLError(errors=["Batching is not supported"]) + raise GraphQLError("Batching is not supported") return [ super(GraphQLEndpoint, self).dispatch(item, context) diff --git a/hiku/engine.py b/hiku/engine.py index 549ebf9..4363e09 100644 --- a/hiku/engine.py +++ b/hiku/engine.py @@ -1005,7 +1005,10 @@ def __getitem__(self, item: Any) -> Any: ) -_ExecutorType = TypeVar("_ExecutorType", bound=SyncAsyncExecutor) +# Covariant must be used because we want to accept subclasses of Executor +_ExecutorType = TypeVar( + "_ExecutorType", covariant=True, bound=SyncAsyncExecutor +) class Engine(Generic[_ExecutorType]): diff --git a/hiku/error.py b/hiku/error.py new file mode 100644 index 0000000..4333c38 --- /dev/null +++ b/hiku/error.py @@ -0,0 +1,7 @@ +__all__ = ["GraphQLError"] + + +class GraphQLError(Exception): + def __init__(self, message: str) -> None: + super().__init__(message) + self.message = message diff --git a/hiku/extensions/base_extension.py b/hiku/extensions/base_extension.py index e4a0f04..5f18952 100644 --- a/hiku/extensions/base_extension.py +++ b/hiku/extensions/base_extension.py @@ -51,7 +51,7 @@ def on_operation( At this step the: - execution_context.query_src (if type str) is set to the query string - - execution_context.query (if type Noe) is set to the query Node + - execution_context.query (if type Node) is set to the query Node - execution_context.variables is set to the query variables - execution_context.operation_name is set to the query operation name - execution_context.query_graph is set to the query graph diff --git a/hiku/extensions/query_parse_cache.py b/hiku/extensions/query_parse_cache.py index bde8cb1..daec260 100644 --- a/hiku/extensions/query_parse_cache.py +++ b/hiku/extensions/query_parse_cache.py @@ -25,11 +25,12 @@ def __init__(self, maxsize: Optional[int] = None): self.cached_parser = lru_cache(maxsize=maxsize)(parse_query) def on_parse(self, execution_context: ExecutionContext) -> Iterator[None]: - execution_context.graphql_document = self.cached_parser( - execution_context.query_src, - ) - - info = self.cached_parser.cache_info() - QUERY_CACHE_HITS.set(info.hits) - QUERY_CACHE_MISSES.set(info.misses) + if execution_context.query_src: + execution_context.graphql_document = self.cached_parser( + execution_context.query_src, + ) + + info = self.cached_parser.cache_info() + QUERY_CACHE_HITS.set(info.hits) + QUERY_CACHE_MISSES.set(info.misses) yield diff --git a/hiku/federation/sdl.py b/hiku/federation/sdl.py index 65fc0f1..b6752f9 100644 --- a/hiku/federation/sdl.py +++ b/hiku/federation/sdl.py @@ -577,7 +577,6 @@ def skip(field: t.Union[Field, Link]) -> bool: def print_sdl( - # TODO: accept schema ??? graph: FederationGraph, mutation_graph: Optional[Graph] = None, federation_version: int = DEFAULT_FEDERATION_VERSION, diff --git a/hiku/schema.py b/hiku/schema.py index 22115e0..23bd8cf 100644 --- a/hiku/schema.py +++ b/hiku/schema.py @@ -20,6 +20,7 @@ ) from hiku.denormalize.graphql import DenormalizeGraphQL from hiku.engine import _ExecutorType, Engine +from hiku.error import GraphQLError from hiku.executors.base import ( BaseAsyncExecutor, BaseSyncExecutor, @@ -35,8 +36,8 @@ from hiku.validate.query import validate -class GraphQLError(Exception): - def __init__(self, *, errors: List[str]): +class ValidationError(Exception): + def __init__(self, errors: List[str]) -> None: super().__init__("{} errors".format(len(errors))) self.errors = errors @@ -56,7 +57,7 @@ def _run_validation( @dataclass class ExecutionResult: data: Optional[Dict[str, Any]] - error: Optional[GraphQLError] + errors: Optional[List[GraphQLError]] class Schema(Generic[_ExecutorType]): @@ -158,8 +159,12 @@ def execute_sync( ).process(execution_context.query) return ExecutionResult(data, None) + except ValidationError as e: + return ExecutionResult( + None, [GraphQLError(message) for message in e.errors] + ) except GraphQLError as e: - return ExecutionResult(None, e) + return ExecutionResult(None, [e]) async def execute( self: "Schema[BaseAsyncExecutor]", @@ -199,7 +204,6 @@ async def execute( ) with extensions_manager.execution(): - # result = self.engine.execute(execution_context) result = await self.engine.execute(execution_context) execution_context.result = result @@ -210,8 +214,12 @@ async def execute( ).process(execution_context.query) return ExecutionResult(data, None) + except ValidationError as e: + return ExecutionResult( + None, [GraphQLError(message) for message in e.errors] + ) except GraphQLError as e: - return ExecutionResult(None, e) + return ExecutionResult(None, [e]) def _validate( self, @@ -249,11 +257,7 @@ def _init_execution_context( execution_context.request_operation_name, ) except TypeError as e: - raise GraphQLError( - errors=[ - "Failed to read query: {}".format(e), - ] - ) + raise GraphQLError("Failed to read query: {}".format(e)) execution_context.query = execution_context.operation.query # save original query before merging to validate it @@ -266,7 +270,7 @@ def _init_execution_context( op = execution_context.operation if op.type not in (OperationType.QUERY, OperationType.MUTATION): raise GraphQLError( - errors=["Unsupported operation type: {!r}".format(op.type)] + "Unsupported operation type: {!r}".format(op.type) ) with extensions_manager.validation(): @@ -278,4 +282,4 @@ def _init_execution_context( ) if execution_context.errors: - raise GraphQLError(errors=execution_context.errors) + raise ValidationError(errors=execution_context.errors) diff --git a/tests/extensions/test_query_depth_validator.py b/tests/extensions/test_query_depth_validator.py index 3369bfc..ca149d3 100644 --- a/tests/extensions/test_query_depth_validator.py +++ b/tests/extensions/test_query_depth_validator.py @@ -51,4 +51,4 @@ def test_query_depth_validator(sync_graph): """ result = schema.execute_sync(query) - assert result.error.errors == ["Query depth 4 exceeds maximum allowed depth 2"] + assert [e.message for e in result.errors] == ["Query depth 4 exceeds maximum allowed depth 2"] diff --git a/tests/test_engine.py b/tests/test_engine.py index 614b626..8c27dd6 100644 --- a/tests/test_engine.py +++ b/tests/test_engine.py @@ -10,21 +10,29 @@ from sqlalchemy.pool import StaticPool from hiku import query as q -from hiku.builder import Q, build +from hiku.builder import Q, M, build from hiku.context import create_execution_context from hiku.denormalize.graphql import DenormalizeGraphQL from hiku.engine import Context, Engine, pass_context from hiku.executors.sync import SyncExecutor -from hiku.graph import (Field, Graph, Interface, Link, Node, Nothing, Option, - Root, Union) +from hiku.graph import Field, Graph, Interface, Link, Node, Nothing, Option, Root, Union from hiku.introspection.graphql import GraphQLIntrospection from hiku.merge import QueryMerger from hiku.readers.graphql import read from hiku.result import denormalize from hiku.schema import Schema from hiku.sources.sqlalchemy import FieldsQuery -from hiku.types import (Boolean, Integer, InterfaceRef, Optional, Record, Sequence, - String, TypeRef, UnionRef) +from hiku.types import ( + Boolean, + Integer, + InterfaceRef, + Optional, + Record, + Sequence, + String, + TypeRef, + UnionRef, +) from hiku.utils import ImmutableDict, listify from .base import ANY, Mock, check_result @@ -48,7 +56,9 @@ def id_field(fields, ids): def execute(graph, query_, ctx=None): engine = Engine(SyncExecutor()) - return engine.execute(create_execution_context(query=query_, query_graph=graph, context=ctx)) + return engine.execute( + create_execution_context(query=query_, query_graph=graph, context=ctx) + ) def execute_schema(graph, query): @@ -218,7 +228,7 @@ def test_links_requires_list(): link_song = Mock(return_value=100) def link_song_info( - reqs: List[ImmutableDict[str, Any]] + reqs: List[ImmutableDict[str, Any]], ) -> List[ImmutableDict[str, Any]]: return reqs @@ -294,7 +304,7 @@ def get_fields(require): Q.infoV2[ Q.album_name, Q.artist_name, - ] + ], ] ] ) @@ -481,9 +491,7 @@ def test_field_option_valid(option, args, result): ), ] ) - check_result( - execute(graph, build([Q.auslese(**args)])), {"auslese": "baking"} - ) + check_result(execute(graph, build([Q.auslese(**args)])), {"auslese": "baking"}) f.assert_called_once_with([q.Field("auslese", options=result)]) @@ -498,9 +506,7 @@ def test_field_option_missing(): [ Root( [ - Field( - "poofy", None, Mock(), options=[Option("mohism", None)] - ), + Field("poofy", None, Mock(), options=[Option("mohism", None)]), ] ), ] @@ -508,8 +514,7 @@ def test_field_option_missing(): with pytest.raises(TypeError) as err: execute(graph, build([Q.poofy])) err.match( - r'^Required option "mohism" for Field\(\'poofy\', ' - r"(.*) was not provided$" + r'^Required option "mohism" for Field\(\'poofy\', ' r"(.*) was not provided$" ) @@ -538,9 +543,7 @@ def test_link_option_valid(option, args, result): ), ] ) - check_result( - execute(graph, build([Q.b(**args)[Q.c]])), {"b": [{"c": "aunder"}]} - ) + check_result(execute(graph, build([Q.b(**args)[Q.c]])), {"b": [{"c": "aunder"}]}) f1.assert_called_once_with(result) f2.assert_called_once_with([q.Field("c")], [1]) @@ -576,8 +579,7 @@ def test_link_option_missing(): with pytest.raises(TypeError) as err: execute(graph, build([Q.eclairs[Q.papeete]])) err.match( - r'^Required option "nocks" for Link\(\'eclairs\', ' - r"(.*) was not provided$" + r'^Required option "nocks" for Link\(\'eclairs\', ' r"(.*) was not provided$" ) @@ -594,9 +596,7 @@ def test_pass_context_field(): ] ) - check_result( - execute(graph, build([Q.a]), {"vetch": "shadier"}), {"a": "boiardo"} - ) + check_result(execute(graph, build([Q.a]), {"vetch": "shadier"}), {"a": "boiardo"}) f.assert_called_once_with(ANY, [q.Field("a")]) @@ -700,9 +700,7 @@ def test_root_field_func_result_validation(value): ) -@pytest.mark.parametrize( - "value", [1, [], [1, 2], [[], []], [[1], []], [[], [2]]] -) +@pytest.mark.parametrize("value", [1, [], [1, 2], [[], []], [[1], []], [[], [2]]]) def test_node_field_func_result_validation(value): with pytest.raises(TypeError) as err: execute( @@ -777,9 +775,7 @@ def test_node_link_one_func_result_validation(value): execute( Graph( [ - Node( - "a", [Field("b", None, Mock(return_value=[[1], [2]]))] - ), + Node("a", [Field("b", None, Mock(return_value=[[1], [2]]))]), Node( "c", [ @@ -820,9 +816,7 @@ def test_node_link_many_func_result_validation(value): execute( Graph( [ - Node( - "a", [Field("b", None, Mock(return_value=[[1], [2]]))] - ), + Node("a", [Field("b", None, Mock(return_value=[[1], [2]]))]), Node( "c", [ @@ -1042,10 +1036,7 @@ def test_conflicting_fields(): @listify def x_fields(fields, ids): for i in ids: - yield [ - "{}-{}".format(x_data[i][f.name], f.options["k"]) - for f in fields - ] + yield ["{}-{}".format(x_data[i][f.name], f.options["k"]) for f in fields] graph = Graph( [ @@ -1382,6 +1373,42 @@ def link_b2(ids): } +def test_mutation_query_builder(): + def a_fields(fields, ids): + def get_fields(f, id_): + assert id_ is not None and id_ is not Nothing + if f.name == "a": + return 42 + raise AssertionError("Unexpected field: {}".format(f)) + + return [[get_fields(f, id_) for f in fields] for id_ in ids] + + graph = Graph( + [ + Node( + "A", + [ + Field("a", String, a_fields), + ], + ), + ] + ) + + mutation = Graph( + graph.nodes + + [ + Root([Link("createA", TypeRef["A"], lambda: 1, requires=None)]), + ] + ) + + query = build([M.createA[Q.a]]) + print("build query", query) + schema = Schema(SyncExecutor(), graph, mutation=mutation) + result = schema.execute_sync(query) + + assert result.data == {"createA": {"a": 42}} + + def test_merge_query__fragments(): num_link_user = 0 num_resolve_id = 0 @@ -1418,9 +1445,7 @@ def link_user(): "Context", [Link("user", TypeRef["User"], link_user, requires=None)], ), - Root( - [Link("context", TypeRef["Context"], lambda: 1, requires=None)] - ), + Root([Link("context", TypeRef["Context"], lambda: 1, requires=None)]), ] ) @@ -1443,9 +1468,11 @@ def link_user(): assert data == {"context": {"user": {"id": 1, "name": "John"}}} -@pytest.mark.parametrize("query", [ - pytest.param( - """ +@pytest.mark.parametrize( + "query", + [ + pytest.param( + """ query GetUser2 { context { user { @@ -1461,10 +1488,10 @@ def link_user(): } } """, - id="one level fragments" - ), - pytest.param( - """ + id="one level fragments", + ), + pytest.param( + """ query GetUser { context { ...ContextFragment @@ -1483,10 +1510,10 @@ def link_user(): } } """, - id="nested fragments", - ), - pytest.param( - """ + id="nested fragments", + ), + pytest.param( + """ query GetUser { context { ... on MyContext { @@ -1507,9 +1534,10 @@ def link_user(): } } """, - id="nested + neighbour fragments", - ), -]) + id="nested + neighbour fragments", + ), + ], +) def test_merge_query__interface_fragments(query): num_link_user = 0 num_resolve_id = 0 @@ -1582,14 +1610,14 @@ def link_user(): assert num_link_user == 1 assert num_resolve_id == 1 assert num_resolve_name == 1 - assert data == { - "context": {"user": {"id": 1, "name": "John"}, "balance": 100} - } + assert data == {"context": {"user": {"id": 1, "name": "John"}, "balance": 100}} -@pytest.mark.parametrize("query", [ - pytest.param( - """ +@pytest.mark.parametrize( + "query", + [ + pytest.param( + """ query GetUser { contexts { ... on BaseContext { user { name } } @@ -1598,9 +1626,10 @@ def link_user(): } } """, - id="one level fragments"), - pytest.param( - """ + id="one level fragments", + ), + pytest.param( + """ query GetUser { contexts { ...ContextsFragment @@ -1612,11 +1641,10 @@ def link_user(): ... on MyContext { balance } } """, - id="nested fragments", - - ), - pytest.param( - """ + id="nested fragments", + ), + pytest.param( + """ query GetUser { contexts { ... on MyContext { balance } @@ -1628,9 +1656,10 @@ def link_user(): ... on MyContext { user { id name } } } """, - id="nested + neighbour fragments", - ), -]) + id="nested + neighbour fragments", + ), + ], +) def test_merge_query__union_fragments(query): num_link_user = 0 num_resolve_id = 0 @@ -1667,9 +1696,7 @@ def link_user(ids): "MyContext", [ Field("user_id", Integer, lambda fields, ids: [ids]), - Link( - "user", TypeRef["User"], link_user, requires="user_id" - ), + Link("user", TypeRef["User"], link_user, requires="user_id"), Field("balance", Integer, lambda fields, ids: [[100]]), ], ), @@ -1677,9 +1704,7 @@ def link_user(ids): "BaseContext", [ Field("user_id", Integer, lambda fields, ids: [ids]), - Link( - "user", TypeRef["User"], link_user, requires="user_id" - ), + Link("user", TypeRef["User"], link_user, requires="user_id"), ], ), Root( @@ -1722,7 +1747,7 @@ def _count_calls(func): def wrapper(*args, **kwargs): if func.__name__.startswith("resolve"): for field in args[0]: - key = f'{func.__name__}:{field.name}' + key = f"{func.__name__}:{field.name}" call_count[key] += 1 else: call_count[func.__name__] += 1 @@ -1766,10 +1791,15 @@ def link_info() -> int: "User", [ Field("id", String, resolve_user), - Field("name", String, resolve_user, options=[ - Option("capitalize", Optional[Boolean], default=False) - ]), - Link("info", TypeRef["Info"], link_info, requires=None) + Field( + "name", + String, + resolve_user, + options=[ + Option("capitalize", Optional[Boolean], default=False) + ], + ), + Link("info", TypeRef["Info"], link_info, requires=None), ], ), Node( @@ -1781,13 +1811,9 @@ def link_info() -> int: ), Node( "Context", - [ - Link("user", TypeRef["User"], link_user, requires=None) - ], - ), - Root( - [Link("context", TypeRef["Context"], lambda: 100, requires=None)] + [Link("user", TypeRef["User"], link_user, requires=None)], ), + Root([Link("context", TypeRef["Context"], lambda: 100, requires=None)]), ] ) @@ -1852,7 +1878,7 @@ def link_info() -> int: "id": 1, "name": "john", "capName": "John", - "info": {"email": "john@example.com", "phone": "+1234567890"} + "info": {"email": "john@example.com", "phone": "+1234567890"}, } } } @@ -1876,20 +1902,21 @@ def get_field(f, id_) -> Any: "User", [ Field("id", String, resolve_user), - Field("name", String, resolve_user, options=[ - Option("capitalize", Optional[Boolean], default=False) - ]), + Field( + "name", + String, + resolve_user, + options=[ + Option("capitalize", Optional[Boolean], default=False) + ], + ), ], ), Node( "Context", - [ - Link("user", TypeRef["User"], lambda: 1, requires=None) - ], - ), - Root( - [Link("context", TypeRef["Context"], lambda: 100, requires=None)] + [Link("user", TypeRef["User"], lambda: 1, requires=None)], ), + Root([Link("context", TypeRef["Context"], lambda: 100, requires=None)]), ] ) @@ -1935,21 +1962,30 @@ def get_field(f, id_) -> Any: def test_merge_query__complex_field_fragment() -> None: def point_func(fields): - return [{ - "x": 1, - "y": 2, - }] - - graph = Graph([ - Root([ - Field('point', TypeRef["Point"], point_func), - ]), - ], data_types={ - 'Point': Record[{ - 'x': Integer, - 'y': Integer, - }], - }) + return [ + { + "x": 1, + "y": 2, + } + ] + + graph = Graph( + [ + Root( + [ + Field("point", TypeRef["Point"], point_func), + ] + ), + ], + data_types={ + "Point": Record[ + { + "x": Integer, + "y": Integer, + } + ], + }, + ) query = """ query GetPoint { @@ -1988,7 +2024,5 @@ def test_denormalize_introspection() -> str: query = QueryMerger(graph).merge(query) result = execute(graph, query) - data = denormalize( - graph, result - ) + data = denormalize(graph, result) assert data is not None diff --git a/tests/test_interface.py b/tests/test_interface.py index 0cb367c..827b88b 100644 --- a/tests/test_interface.py +++ b/tests/test_interface.py @@ -179,7 +179,7 @@ def test_option_not_provided_for_field(): """ result = execute(GRAPH, read(query)) - assert result.error.errors == [ + assert [e.message for e in result.errors] == [ 'Required option "Video.thumbnailUrl:size" is not specified' ] @@ -423,7 +423,7 @@ def test_validate_interface_has_no_implementations(): result = execute(graph, read(query)) - assert result.error.errors == [ + assert [e.message for e in result.errors] == [ "Can not query field 'id' on interface 'Media'. " "Interface 'Media' is not implemented by any type. " "Add at least one type implementing this interface.", @@ -447,7 +447,7 @@ def test_validate_query_implementation_node_field_without_inline_fragment(): result = execute(GRAPH, read(query)) - assert result.error.errors == [ + assert [e.message for e in result.errors] == [ "Can not query field 'album' on type 'Media'. " "Did you mean to use an inline fragment on 'Audio'?" ] @@ -465,7 +465,7 @@ def test_validate_query_fragment_no_type_condition(): result = execute(GRAPH, read(query, {'text': 'foo'})) - assert result.error.errors == [ + assert [e.message for e in result.errors] == [ "Can not query field 'album' on type 'Media'. " "Did you mean to use an inline fragment on 'Audio'?" ] @@ -484,7 +484,7 @@ def test_validate_query_fragment_on_unknown_type(): result = execute(GRAPH, read(query, {'text': 'foo'})) - assert result.error.errors == ["Fragment on unknown type 'X'"] + assert [e.message for e in result.errors] == ["Fragment on unknown type 'X'"] def test_validate_interface_type_has_no_such_field(): @@ -504,7 +504,7 @@ def test_validate_interface_type_has_no_such_field(): result = execute(GRAPH, read(query, {'text': 'foo'})) - assert result.error.errors == [ + assert [e.message for e in result.errors] == [ 'Field "invalid_field" is not implemented in the "Audio" node', ] @@ -525,6 +525,6 @@ def test_validate_interface_type_field_has_no_such_option(): result = execute(GRAPH, read(query, {'text': 'foo'})) - assert result.error.errors == [ + assert [e.message for e in result.errors] == [ 'Unknown options for "Audio.duration": size', ] diff --git a/tests/test_union.py b/tests/test_union.py index f01ace2..faa4d07 100644 --- a/tests/test_union.py +++ b/tests/test_union.py @@ -168,7 +168,7 @@ def test_option_not_provided_for_field(): } """ result = execute(read(query)) - result.error.errors == [ + result.errors == [ "Required option \"size\" for Field('thumbnailUrl'" ]