Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Sketching out a potential new architecture for pygls #418

Closed
wants to merge 17 commits into from
Closed
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
refactor: wip unit tests
alcarney committed Apr 4, 2024

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature.
commit a3f868d61a472eec500879b6a12dc0ebc148f6f9
600 changes: 600 additions & 0 deletions tests/lsp/test_capabilities.py

Large diffs are not rendered by default.

118 changes: 118 additions & 0 deletions tests/lsp/test_protocol.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
import json

import pytest
from lsprotocol import types

from pygls.lsp.protocol import LanguageServerProtocol
from pygls.lsp.protocol import default_converter


@pytest.mark.parametrize(
"method, params, expected",
[
(
types.TEXT_DOCUMENT_COMPLETION,
types.CompletionParams(
text_document=types.TextDocumentIdentifier(uri="file:///file.txt"),
position=types.Position(line=1, character=0),
),
{
"jsonrpc": "2.0",
"id": "1",
"method": types.TEXT_DOCUMENT_COMPLETION,
"params": {
"textDocument": {"uri": "file:///file.txt"},
"position": {"line": 1, "character": 0},
},
},
),
],
)
def test_encode_request(method, params, expected):
"""Ensure that we can encode request messages."""

protocol = LanguageServerProtocol(default_converter)
data = protocol.encode_request(method, params, msg_id="1", include_headers=False)
actual = json.loads(data.decode("utf8"))

assert actual == expected


@pytest.mark.parametrize(
"msg_type, result, expected",
[
(types.ShutdownResponse, None, {"jsonrpc": "2.0", "id": "1", "result": None}),
(
types.TextDocumentCompletionResponse,
[
types.CompletionItem(label="example-one"),
types.CompletionItem(
label="example-two",
kind=types.CompletionItemKind.Class,
preselect=False,
deprecated=True,
),
],
{
"jsonrpc": "2.0",
"id": "1",
"result": [
{"label": "example-one"},
{
"label": "example-two",
"kind": 7, # CompletionItemKind.Class
"preselect": False,
"deprecated": True,
},
],
},
),
],
)
def test_encode_response(msg_type, result, expected):
"""Ensure that we can serialize response messages"""

protocol = LanguageServerProtocol(default_converter)
protocol._result_types["1"] = msg_type

data = protocol.encode_result("1", result=result, include_headers=False)
actual = json.loads(data.decode("utf8"))

assert actual == expected


@pytest.mark.parametrize(
"method, params, expected",
[
(
types.PROGRESS,
types.ProgressParams(
token="id1",
value=types.WorkDoneProgressBegin(
title="Begin progress",
percentage=0,
),
),
{
"jsonrpc": "2.0",
"method": "$/progress",
"params": {
"token": "id1",
"value": {
"kind": "begin",
"percentage": 0,
"title": "Begin progress",
},
},
},
),
],
)
def test_encode_notification(method, params, expected):
"""Ensure that we can encode notification messages"""

protocol = LanguageServerProtocol(default_converter)
data = protocol.encode_notification(method, params=params, include_headers=False)
actual = json.loads(data.decode("utf8"))

assert actual == expected
File renamed without changes.
24 changes: 12 additions & 12 deletions tests/servers/invalid_json.py
Original file line number Diff line number Diff line change
@@ -1,25 +1,25 @@
"""This server does nothing but print invalid JSON."""
import asyncio
import threading
import sys
from concurrent.futures import ThreadPoolExecutor
import threading
from functools import partial

from pygls.server import aio_readline
from pygls.protocol import rpc_main_loop
from pygls.server._async_server import get_sdtio_streams


def handler(data):
def write_data(writer: asyncio.StreamWriter, data):
content = 'Content-Length: 5\r\n\r\n{"ll}'.encode("utf8")
sys.stdout.buffer.write(content)
sys.stdout.flush()
writer.write(content)


async def main():
await aio_readline(
asyncio.get_running_loop(),
ThreadPoolExecutor(),
threading.Event(),
sys.stdin.buffer,
handler,
reader, writer = await get_sdtio_streams(sys.stdin.buffer, sys.stdout.buffer)

await rpc_main_loop(
reader=reader,
stop_event=threading.Event(),
message_handler=partial(write_data, writer),
)


24 changes: 12 additions & 12 deletions tests/servers/large_response.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
"""This server returns a particuarly large response."""
import asyncio
import threading
import sys
from concurrent.futures import ThreadPoolExecutor
import threading
from functools import partial

from pygls.server import aio_readline
from pygls.protocol import rpc_main_loop
from pygls.server._async_server import get_sdtio_streams


def handler(data):
def write_data(writer, data):
payload = dict(
jsonrpc="2.0",
id=1,
@@ -18,17 +19,16 @@ def handler(data):
content = str(payload).replace("'", '"')
message = f"Content-Length: {len(content)}\r\n\r\n{content}".encode("utf8")

sys.stdout.buffer.write(message)
sys.stdout.flush()
writer.write(message)


async def main():
await aio_readline(
asyncio.get_running_loop(),
ThreadPoolExecutor(),
threading.Event(),
sys.stdin.buffer,
handler,
reader, writer = await get_sdtio_streams(sys.stdin.buffer, sys.stdout.buffer)

await rpc_main_loop(
reader=reader,
stop_event=threading.Event(),
message_handler=partial(write_data, writer),
)


21 changes: 21 additions & 0 deletions tests/servers/rpc.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
"""A generic JSON-RPC server"""
import asyncio
import logging
from typing import Dict

from pygls.server import JsonRPCServer

server = JsonRPCServer()


@server.feature("math/add")
def add(params: Dict[str, float]):
a = params["a"]
b = params["b"]

return dict(sum=a + b)


if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, filename="server.log", filemode="w")
asyncio.run(server.start_io())
39 changes: 12 additions & 27 deletions tests/test_client.py
Original file line number Diff line number Diff line change
@@ -17,20 +17,15 @@
import asyncio
import pathlib
import sys
from typing import Union

import pytest
from pygls import IS_PYODIDE

from pygls.client import JsonRPCClient
from pygls.exceptions import JsonRpcException, PyglsError


SERVERS = pathlib.Path(__file__).parent / "servers"


@pytest.mark.asyncio
@pytest.mark.skipif(IS_PYODIDE, reason="Subprocesses are not available on pyodide.")
async def test_client_detect_server_exit():
"""Ensure that the client detects when the server process exits."""

@@ -51,7 +46,6 @@ async def server_exit(self, server: asyncio.subprocess.Process):


@pytest.mark.asyncio
@pytest.mark.skipif(IS_PYODIDE, reason="Subprocesses are not available on pyodide.")
async def test_client_detect_invalid_json():
"""Ensure that the client can detect the case where the server returns invalid
json."""
@@ -60,43 +54,34 @@ class TestClient(JsonRPCClient):
report_error_called = False
future = None

def report_server_error(
self, error: Exception, source: Union[PyglsError, JsonRpcException]
):
def error_handler(self, error: Exception):
self.report_error_called = True
self.future.cancel()

self._server.kill()
self._stop_event.set()

assert "Unterminated string" in str(error)

client = TestClient()
await client.start_io(sys.executable, str(SERVERS / "invalid_json.py"))

future = client.protocol.send_request_async("method/name", {})
client.future = future

cancelled = False
try:
await future
except asyncio.CancelledError:
pass # Ignore the exception generated by cancelling the future
finally:
await client.stop()
await asyncio.wait_for(
client.send_request("method/name", {}), timeout=5.0 # seconds
)
except asyncio.TimeoutError:
assert_message = "Expected `error_handler` to have been called"
assert client.report_error_called, assert_message
cancelled = True

assert_message = "Expected `report_server_error` to have been called"
assert client.report_error_called, assert_message
assert cancelled is True, "Expected request to have been cancelled."


@pytest.mark.asyncio
@pytest.mark.skipif(IS_PYODIDE, reason="Subprocesses are not available on pyodide.")
async def test_client_large_responses():
"""Ensure that the client can correctly handle large responses from a server."""

client = JsonRPCClient()
await client.start_io(sys.executable, str(SERVERS / "large_response.py"))

result = await client.protocol.send_request_async("get/numbers", {}, msg_id=1)
assert len(result.numbers) == 100_000
result = await client.send_request("get/numbers", {}, msg_id=1)
assert len(result["numbers"]) == 100_000

await client.stop()
Loading