From 2099ee2a8545cc85340ed457fba95bfc5db4bdc6 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Thu, 20 Jun 2024 12:36:18 +0200 Subject: [PATCH 01/38] treewide: replace autobahn and crossbar with gRPC Crossbar and autobahn are used to communicate between client and coordinator as well as between exporter and coordinator. Both are unfortunately not very well maintained anymore. The crossbar component was moved to its own virtualenv for quite a while to cope with conflicting dependencies. python3.13 support for crossbar is still not available (at least not in a version releases on PyPi). That's why labgrid will now move to gRPC. It's a well maintained RPC framework. As a side effect, the message transfer is more performant and the import times are shorter. gRPC relies on the protocol buffers compiler (protoc) to generate code. Use grpcio-tools 1.62.2 to do that. It's used in Yocto scarthgap and is incompatible with generated code from newer grpcio-tools. Co-developed-by: Rouven Czerwinski Co-developed-by: Bastian Krause Signed-off-by: Jan Luebbe --- .crossbar/.gitignore | 3 - .crossbar/config-anonymous.yaml | 47 - .github/workflows/reusable-unit-tests.yml | 8 +- contrib/completion/labgrid-client.bash | 6 +- crossbar-requirements.txt | 2 - doc/conf.py | 11 - labgrid/config.py | 4 + labgrid/pytestplugin/fixtures.py | 4 +- labgrid/pytestplugin/hooks.py | 2 +- labgrid/remote/client.py | 563 ++++++----- labgrid/remote/common.py | 242 ++++- labgrid/remote/coordinator.py | 873 ++++++++++-------- labgrid/remote/exporter.py | 274 +++--- labgrid/remote/generated/generate-proto.sh | 4 + .../generated/labgrid_coordinator_pb2.py | 158 ++++ .../generated/labgrid_coordinator_pb2.pyi | 448 +++++++++ .../generated/labgrid_coordinator_pb2_grpc.py | 627 +++++++++++++ labgrid/remote/generated/requirements.in | 3 + labgrid/remote/generated/requirements.txt | 15 + .../remote/generated/update-requirements.sh | 5 + .../remote/proto/labgrid-coordinator.proto | 297 ++++++ labgrid/resource/remote.py | 9 +- labgrid/util/proxy.py | 7 + pyproject.toml | 11 +- tests/conftest.py | 107 +-- tests/{test_crossbar.py => test_client.py} | 132 ++- tests/test_coordinator.py | 167 ++++ tests/test_fixtures.py | 4 +- tests/test_pb2.py | 172 ++++ tests/test_remote.py | 3 - 30 files changed, 3262 insertions(+), 946 deletions(-) delete mode 100644 .crossbar/.gitignore delete mode 100644 .crossbar/config-anonymous.yaml delete mode 100644 crossbar-requirements.txt create mode 100755 labgrid/remote/generated/generate-proto.sh create mode 100644 labgrid/remote/generated/labgrid_coordinator_pb2.py create mode 100644 labgrid/remote/generated/labgrid_coordinator_pb2.pyi create mode 100644 labgrid/remote/generated/labgrid_coordinator_pb2_grpc.py create mode 100644 labgrid/remote/generated/requirements.in create mode 100644 labgrid/remote/generated/requirements.txt create mode 100755 labgrid/remote/generated/update-requirements.sh create mode 100644 labgrid/remote/proto/labgrid-coordinator.proto rename tests/{test_crossbar.py => test_client.py} (80%) create mode 100644 tests/test_coordinator.py create mode 100644 tests/test_pb2.py diff --git a/.crossbar/.gitignore b/.crossbar/.gitignore deleted file mode 100644 index a6c031384..000000000 --- a/.crossbar/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -key.priv -key.pub -node.pid diff --git a/.crossbar/config-anonymous.yaml b/.crossbar/config-anonymous.yaml deleted file mode 100644 index 8771a5aa1..000000000 --- a/.crossbar/config-anonymous.yaml +++ /dev/null @@ -1,47 +0,0 @@ -version: 2 -workers: -- type: router - realms: - - name: realm1 - roles: - - name: public - permissions: - - uri: '' - match: prefix - allow: - call: true - register: true - publish: true - subscribe: true - disclose: - caller: true - publisher: true - cache: true - transports: - - type: web - endpoint: - type: tcp - port: 20408 - paths: - /: - type: static - directory: ../web - ws: - type: websocket - options: - auto_fragment_size: 65536 - auth: - anonymous: - type: static - role: public -- id: coordinator - type: guest - executable: /path/to/labgrid-venv/bin/python3 - arguments: - - -m - - labgrid.remote.coordinator - options: - workdir: . - env: - vars: - WS: ws://localhost:20408/ws diff --git a/.github/workflows/reusable-unit-tests.yml b/.github/workflows/reusable-unit-tests.yml index 3bf401292..15be56078 100644 --- a/.github/workflows/reusable-unit-tests.yml +++ b/.github/workflows/reusable-unit-tests.yml @@ -30,7 +30,7 @@ jobs: ${{ runner.os }}-pip- - name: Install system dependencies run: | - sudo apt-get install -yq libow-dev openssh-server openssh-client libsnappy-dev graphviz openocd + sudo apt-get install -yq libow-dev openssh-server openssh-client graphviz openocd sudo mkdir -p /var/cache/labgrid/runner && sudo chown runner /var/cache/labgrid/runner - name: Prepare local SSH run: | @@ -46,17 +46,13 @@ jobs: - name: Install labgrid run: | pip install -e ".[dev]" - - name: Install crossbar in virtualenv - run: | - virtualenv -p python3 crossbar-venv - crossbar-venv/bin/pip install -r crossbar-requirements.txt - name: Lint with pylint run: | pylint --list-msgs-enabled pylint labgrid - name: Test with pytest run: | - pytest --cov-config .coveragerc --cov=labgrid --local-sshmanager --ssh-username runner --crossbar-venv crossbar-venv -k "not test_docker_with_daemon" + pytest --cov-config .coveragerc --cov=labgrid --local-sshmanager --ssh-username runner -k "not test_docker_with_daemon" - name: Build documentation run: | make -C doc clean diff --git a/contrib/completion/labgrid-client.bash b/contrib/completion/labgrid-client.bash index 7bc0d8499..81b6883c0 100644 --- a/contrib/completion/labgrid-client.bash +++ b/contrib/completion/labgrid-client.bash @@ -2,14 +2,14 @@ # options top level and subcommands support _labgrid_shared_options="--help" -_labgrid_main_opts_with_value="@(-x|--crossbar|-c|--config|-p|--place|-s|--state|-i|--initial-state|-P|--proxy)" +_labgrid_main_opts_with_value="@(-x|--coordinator|-c|--config|-p|--place|-s|--state|-i|--initial-state|-P|--proxy)" # Parses labgrid-client arguments # Sets arg to subcommand, excluding options and their values. # Sets last_arg_opt_with_value to true if the last argument is an option requiring a value, else # false. # Sets base_cmd to the labgrid-client base command up to subcommand and removes trailing -# option requiring a value - useful to call 'labgrid-client complete' with place/crossbar/proxy set +# option requiring a value - useful to call 'labgrid-client complete' with place/coordinator/proxy set # Before calling this function, make sure arg, base_cmd and last_arg_opt_with_value are local _labgrid_parse_args() { @@ -867,7 +867,7 @@ _labgrid_client() case "$cur" in --*) # top level args completion - local options="--crossbar \ + local options="--coordinator \ --config \ --place \ --state \ diff --git a/crossbar-requirements.txt b/crossbar-requirements.txt deleted file mode 100644 index d361d83d9..000000000 --- a/crossbar-requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -crossbar==21.3.1 -autobahn<=22.4.1 diff --git a/doc/conf.py b/doc/conf.py index 139f530f0..cfef4259b 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -182,17 +182,6 @@ 'special-members': True, } autodoc_mock_imports = ['onewire', - 'txaio', - 'autobahn', - 'autobahn.asyncio', - 'autobahn.asyncio.wamp', - 'autobahn.asyncio.websocket', - 'autobahn.wamp', - 'autobahn.wamp.types', - 'autobahn.twisted', - 'autobahn.twisted.wamp', - 'autobahn.wamp.exception', - 'twisted.internet.defer', 'gi', 'gi.repository',] diff --git a/labgrid/config.py b/labgrid/config.py index 7801e05e1..9b09dcb5e 100644 --- a/labgrid/config.py +++ b/labgrid/config.py @@ -4,6 +4,7 @@ """ import os +import warnings from yaml import YAMLError import attr @@ -50,6 +51,9 @@ def __attrs_post_init__(self): f"configuration file '{self.filename}' is invalid: {e}" ) + if self.get_option("crossbar_url", ""): + warnings.warn("Ignored option 'crossbar_url' in config, use 'coordinator_address' instead", UserWarning) + def resolve_path(self, path): """Resolve an absolute path diff --git a/labgrid/pytestplugin/fixtures.py b/labgrid/pytestplugin/fixtures.py index 1bd8d2f2b..f881377c3 100644 --- a/labgrid/pytestplugin/fixtures.py +++ b/labgrid/pytestplugin/fixtures.py @@ -28,8 +28,8 @@ def pytest_addoption(parser): '--lg-coordinator', action='store', dest='lg_coordinator', - metavar='CROSSBAR_URL', - help='labgrid coordinator websocket URL.') + metavar='COORDINATOR_ADDRESS', + help='labgrid coordinator HOST[:PORT].') group.addoption( '--lg-log', action='store', diff --git a/labgrid/pytestplugin/hooks.py b/labgrid/pytestplugin/hooks.py index a701e5ccb..f69507250 100644 --- a/labgrid/pytestplugin/hooks.py +++ b/labgrid/pytestplugin/hooks.py @@ -91,7 +91,7 @@ def pytest_configure(config): if lg_env is not None: env = Environment(config_file=lg_env) if lg_coordinator is not None: - env.config.set_option('crossbar_url', lg_coordinator) + env.config.set_option('coordinator_address', lg_coordinator) config.stash[LABGRID_ENV_KEY] = env processwrapper.enable_logging() diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index f5842c541..a78759fb7 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -15,16 +15,16 @@ import shlex import shutil import json +import itertools from textwrap import indent from socket import gethostname from getpass import getuser from collections import defaultdict, OrderedDict from datetime import datetime from pprint import pformat -import txaio -txaio.use_asyncio() -from autobahn.asyncio.wamp import ApplicationSession +import attr +import grpc from .common import ( ResourceEntry, @@ -34,21 +34,18 @@ ReservationState, TAG_KEY, TAG_VAL, - enable_tcp_nodelay, - monkey_patch_max_msg_payload_size_ws_option, + queue_as_aiter, ) from .. import Environment, Target, target_factory from ..exceptions import NoDriverFoundError, NoResourceFoundError, InvalidConfigError +from .generated import labgrid_coordinator_pb2, labgrid_coordinator_pb2_grpc from ..resource.remote import RemotePlaceManager, RemotePlace -from ..util import diff_dict, flat_dict, filter_dict, dump, atomic_replace, labgrid_version, Timeout +from ..util import diff_dict, flat_dict, dump, atomic_replace, labgrid_version, Timeout from ..util.proxy import proxymanager from ..util.helper import processwrapper from ..driver import Mode, ExecutionError from ..logging import basicConfig, StepLogger -txaio.config.loop = asyncio.get_event_loop() # pylint: disable=no-member -monkey_patch_max_msg_payload_size_ws_option() - class Error(Exception): pass @@ -66,57 +63,148 @@ class InteractiveCommandError(Error): pass -class ClientSession(ApplicationSession): - """The ClientSession encapsulates all the actions a Client can Invoke on +@attr.s(eq=False) +class ClientSession: + """The ClientSession encapsulates all the actions a Client can invoke on the coordinator.""" + address = attr.ib(validator=attr.validators.instance_of(str)) + loop = attr.ib(validator=attr.validators.instance_of(asyncio.BaseEventLoop)) + env = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(Environment))) + role = attr.ib(default=None, validator=attr.validators.optional(attr.validators.instance_of(str))) + prog = attr.ib(default=None, validator=attr.validators.optional(attr.validators.instance_of(str))) + args = attr.ib(default=None, validator=attr.validators.optional(attr.validators.instance_of(argparse.Namespace))) + monitor = attr.ib(default=False, validator=attr.validators.instance_of(bool)) + def gethostname(self): return os.environ.get("LG_HOSTNAME", gethostname()) def getuser(self): return os.environ.get("LG_USERNAME", getuser()) - def onConnect(self): + def __attrs_post_init__(self): """Actions which are executed if a connection is successfully opened.""" - self.loop = self.config.extra["loop"] - self.connected = self.config.extra["connected"] - self.args = self.config.extra.get("args") - self.env = self.config.extra.get("env", None) - self.role = self.config.extra.get("role", None) - self.prog = self.config.extra.get("prog", os.path.basename(sys.argv[0])) - self.monitor = self.config.extra.get("monitor", False) - enable_tcp_nodelay(self) - self.join( - self.config.realm, - authmethods=["anonymous", "ticket"], - authid=f"client/{self.gethostname()}/{self.getuser()}", - authextra={"authid": f"client/{self.gethostname()}/{self.getuser()}"}, - ) + self.stopping = asyncio.Event() - def onChallenge(self, challenge): - import warnings + self.channel = grpc.aio.insecure_channel(self.address) + self.stub = labgrid_coordinator_pb2_grpc.CoordinatorStub(self.channel) - warnings.warn("Ticket authentication is deprecated. Please update your coordinator.", DeprecationWarning) - logging.warning("Ticket authentication is deprecated. Please update your coordinator.") - return "dummy-ticket" + self.out_queue = asyncio.Queue() + self.stream_call = None + self.pump_task = None + self.sync_id = itertools.count(start=1) + self.sync_events = {} - async def onJoin(self, details): - # FIXME race condition? - resources = await self.call("org.labgrid.coordinator.get_resources") + async def start(self): + """Starts receiving resource and place updates from the coordinator.""" self.resources = {} - for exporter, groups in resources.items(): - for group_name, group in sorted(groups.items()): - for resource_name, resource in sorted(group.items()): - await self.on_resource_changed(exporter, group_name, resource_name, resource) - - places = await self.call("org.labgrid.coordinator.get_places") self.places = {} - for placename, config in places.items(): - await self.on_place_changed(placename, config) - await self.subscribe(self.on_resource_changed, "org.labgrid.coordinator.resource_changed") - await self.subscribe(self.on_place_changed, "org.labgrid.coordinator.place_changed") - await self.connected(self) + self.pump_task = self.loop.create_task(self.message_pump()) + msg = labgrid_coordinator_pb2.ClientInMessage() + msg.startup.version = labgrid_version() + msg.startup.name = f"{self.gethostname()}/{self.getuser()}" + self.out_queue.put_nowait(msg) + msg = labgrid_coordinator_pb2.ClientInMessage() + msg.subscribe.all_places = True + self.out_queue.put_nowait(msg) + msg = labgrid_coordinator_pb2.ClientInMessage() + msg.subscribe.all_resources = True + self.out_queue.put_nowait(msg) + await self.sync_with_coordinator() + if self.stopping.is_set(): + raise ServerError("Could not connect to coordinator") + + async def stop(self): + """Stops stream for resource and place updates started with ClientSession.start().""" + self.out_queue.put_nowait(None) # let the sender side exit gracefully + if self.stream_call: + self.stream_call.cancel() + try: + await self.pump_task + except asyncio.CancelledError: + pass + self.cancel_pending_syncs() + + async def close(self): + """Closes the channel to the coordinator.""" + await self.channel.close() + + async def sync_with_coordinator(self): + """Wait for coordinator to process all previous messages in stream.""" + identifier = next(self.sync_id) + event = self.sync_events[identifier] = asyncio.Event() + msg = labgrid_coordinator_pb2.ClientInMessage() + msg.sync.id = identifier + logging.debug("sending sync %s", identifier) + self.out_queue.put_nowait(msg) + await event.wait() + if self.stopping.is_set(): + logging.debug("sync %s failed", identifier) + else: + logging.debug("received sync %s", identifier) + return not self.stopping.is_set() + + def cancel_pending_syncs(self): + """Cancel all pending ClientSession.sync_with_coordinator() calls.""" + assert self.stopping.is_set() # only call when something has gone wrong + while True: + try: + identifier, event = self.sync_events.popitem() + logging.debug("cancelling %s %s", identifier, event) + event.set() + except KeyError: + break + + async def message_pump(self): + """Task for receiving resource and place updates.""" + got_message = False + try: + self.stream_call = call = self.stub.ClientStream(queue_as_aiter(self.out_queue)) + async for out_msg in call: + out_msg: labgrid_coordinator_pb2.ClientOutMessage + got_message = True + logging.debug("out_msg from coordinator: %s", out_msg) + for update in out_msg.updates: + update_kind = update.WhichOneof("kind") + if update_kind == "resource": + resource: labgrid_coordinator_pb2.Resource = update.resource + await self.on_resource_changed( + resource.path.exporter_name, + resource.path.group_name, + resource.path.resource_name, + ResourceEntry.data_from_pb2(resource), + ) + elif update_kind == "del_resource": + resource_path: labgrid_coordinator_pb2.Resource.Path = update.del_resource + await self.on_resource_changed( + resource_path.exporter_name, resource_path.group_name, resource_path.resource_name, {} + ) + elif update_kind == "place": + place = update.place + await self.on_place_changed(place) + elif update_kind == "del_place": + place_name = update.del_place + await self.on_place_deleted(place_name) + else: + logging.warning("unknown update from coordinator! %s", update_kind) + if out_msg.HasField("sync"): + event = self.sync_events.pop(out_msg.sync.id) + event.set() + except grpc.aio.AioRpcError as e: + if e.code() == grpc.StatusCode.UNAVAILABLE: + if got_message: + logging.error("coordinator became unavailable: %s", e.details()) + else: + logging.error("coordinator is unavailable: %s", e.details()) + else: + logging.exception("unexpected grpc error in coordinator message pump task") + except Exception: + logging.exception("error in coordinator message pump task") + finally: + self.stopping.set() + self.out_queue.put_nowait(None) # let the sender side exit gracefully + self.cancel_pending_syncs() async def on_resource_changed(self, exporter, group_name, resource_name, resource): group = self.resources.setdefault(exporter, {}).setdefault(group_name, {}) @@ -129,44 +217,40 @@ async def on_resource_changed(self, exporter, group_name, resource_name, resourc old = group[resource_name].data group[resource_name].data = resource if self.monitor: - if resource and not old: + if "cls" in resource and not old: print(f"Resource {exporter}/{group_name}/{resource['cls']}/{resource_name} created: {resource}") - elif resource and old: + elif "cls" in resource and old: print(f"Resource {exporter}/{group_name}/{resource['cls']}/{resource_name} changed:") for k, v_old, v_new in diff_dict(flat_dict(old), flat_dict(resource)): print(f" {k}: {v_old} -> {v_new}") else: print(f"Resource {exporter}/{group_name}/???/{resource_name} deleted") - async def on_place_changed(self, name, config): - if not config: - del self.places[name] - if self.monitor: - print(f"Place {name} deleted") - return - config = config.copy() - config["name"] = name - config["matches"] = [ResourceMatch(**match) for match in config["matches"]] - config = filter_dict(config, Place, warn=True) + async def on_place_changed(self, place_pb2: labgrid_coordinator_pb2.Place): + name = place_pb2.name + if name not in self.places: - place = Place(**config) - self.places[name] = place + self.places[name] = Place.from_pb2(place_pb2) if self.monitor: - print(f"Place {name} created: {place}") + print(f"Place {name} created: {place_pb2}") else: place = self.places[name] old = flat_dict(place.asdict()) - place.update(config) + place.update_from_pb2(place_pb2) new = flat_dict(place.asdict()) if self.monitor: print(f"Place {name} changed:") for k, v_old, v_new in diff_dict(old, new): print(f" {k}: {v_old} -> {v_new}") + async def on_place_deleted(self, name: str): + del self.places[name] + if self.monitor: + print(f"Place {name} deleted") + async def do_monitor(self): self.monitor = True - while True: - await asyncio.sleep(3600.0) + await self.stopping.wait() async def complete(self): if self.args.type == "resources": @@ -411,61 +495,62 @@ async def add_place(self): name = self.args.place if not name: raise UserError("missing place name. Set with -p or via env var LG_PLACE") - if name in self.places: - raise UserError(f"{name} already exists") - res = await self.call("org.labgrid.coordinator.add_place", name) - if not res: - raise ServerError(f"failed to add place {name}") - return res + + request = labgrid_coordinator_pb2.AddPlaceRequest(name=name) + try: + await self.stub.AddPlace(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) async def del_place(self): """Delete a place from the coordinator""" - pattern = self.args.place - if pattern not in self.places: - raise UserError("deletes require an exact place name") - place = self.places[pattern] - if place.acquired: - raise UserError(f"place {place.name} is not idle (acquired by {place.acquired})") - name = place.name - if not name: - raise UserError("missing place name. Set with -p or via env var $PLACE") - if name not in self.places: - raise UserError(f"{name} does not exist") - res = await self.call("org.labgrid.coordinator.del_place", name) - if not res: - raise ServerError(f"failed to delete place {name}") - return res + place = self.get_idle_place() + request = labgrid_coordinator_pb2.DeletePlaceRequest(name=place.name) + try: + await self.stub.DeletePlace(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) async def add_alias(self): """Add an alias for a place on the coordinator""" place = self.get_idle_place() alias = self.args.alias - if alias in place.aliases: - raise UserError(f"place {place.name} already has alias {alias}") - res = await self.call("org.labgrid.coordinator.add_place_alias", place.name, alias) - if not res: - raise ServerError(f"failed to add alias {alias} for place {place.name}") - return res + + request = labgrid_coordinator_pb2.AddPlaceAliasRequest(placename=place.name, alias=alias) + + try: + await self.stub.AddPlaceAlias(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) async def del_alias(self): """Delete an alias for a place from the coordinator""" place = self.get_idle_place() alias = self.args.alias - if alias not in place.aliases: - raise UserError(f"place {place.name} has no alias {alias}") - res = await self.call("org.labgrid.coordinator.del_place_alias", place.name, alias) - if not res: - raise ServerError(f"failed to delete alias {alias} for place {place.name}") - return res + + request = labgrid_coordinator_pb2.DeletePlaceAliasRequest(placename=place.name, alias=alias) + + try: + await self.stub.DeletePlaceAlias(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) async def set_comment(self): """Set the comment on a place""" place = self.get_place() comment = " ".join(self.args.comment) - res = await self.call("org.labgrid.coordinator.set_place_comment", place.name, comment) - if not res: - raise ServerError(f"failed to set comment {comment} for place {place.name}") - return res + + request = labgrid_coordinator_pb2.SetPlaceCommentRequest(placename=place.name, comment=comment) + + try: + await self.stub.SetPlaceComment(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) async def set_tags(self): """Set the tags on a place""" @@ -481,10 +566,14 @@ async def set_tags(self): if not TAG_VAL.match(v): raise UserError(f"tag value '{v}' needs to match the rexex '{TAG_VAL.pattern}'") tags[k] = v - res = await self.call("org.labgrid.coordinator.set_place_tags", place.name, tags) - if not res: - raise ServerError(f"failed to set tags {' '.join(self.args.tags)} for place {place.name}") - return res + + request = labgrid_coordinator_pb2.SetPlaceTagsRequest(placename=place.name, tags=tags) + + try: + await self.stub.SetPlaceTags(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) async def add_match(self): """Add a match for a place, making fuzzy matching available to the @@ -498,9 +587,14 @@ async def add_match(self): if place.hasmatch(pattern.split("/")): print(f"pattern '{pattern}' exists, skipping", file=sys.stderr) continue - res = await self.call("org.labgrid.coordinator.add_place_match", place.name, pattern) - if not res: - raise ServerError(f"failed to add match {pattern} for place {place.name}") + + request = labgrid_coordinator_pb2.AddPlaceMatchRequest(placename=place.name, pattern=pattern) + + try: + await self.stub.AddPlaceMatch(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) async def del_match(self): """Delete a match for a place""" @@ -512,9 +606,14 @@ async def del_match(self): raise UserError(f"invalid pattern format '{pattern}' (use 'exporter/group/cls/name')") if not place.hasmatch(pattern.split("/")): print(f"pattern '{pattern}' not found, skipping", file=sys.stderr) - res = await self.call("org.labgrid.coordinator.del_place_match", place.name, pattern) - if not res: - raise ServerError(f"failed to delete match {pattern} for place {place.name}") + + request = labgrid_coordinator_pb2.DeletePlaceMatchRequest(placename=place.name, pattern=pattern) + + try: + await self.stub.DeletePlaceMatch(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) async def add_named_match(self): """Add a named match for a place. @@ -527,15 +626,18 @@ async def add_named_match(self): name = self.args.name if not 2 <= pattern.count("/") <= 3: raise UserError(f"invalid pattern format '{pattern}' (use 'exporter/group/cls/name')") - if place.hasmatch(pattern.split("/")): - raise UserError(f"pattern '{pattern}' exists") if "*" in pattern: raise UserError(f"invalid pattern '{pattern}' ('*' not allowed for named matches)") if not name: raise UserError(f"invalid name '{name}'") - res = await self.call("org.labgrid.coordinator.add_place_match", place.name, pattern, name) - if not res: - raise ServerError(f"failed to add match {pattern} for place {place.name}") + + request = labgrid_coordinator_pb2.AddPlaceMatchRequest(placename=place.name, pattern=pattern, rename=name) + + try: + await self.stub.AddPlaceMatch(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) def check_matches(self, place): resources = [] @@ -558,30 +660,31 @@ async def acquire(self): if not self.args.allow_unmatched: self.check_matches(place) - res = await self.call("org.labgrid.coordinator.acquire_place", place.name) + request = labgrid_coordinator_pb2.AcquirePlaceRequest(placename=place.name) - if res: + try: + await self.stub.AcquirePlace(request) + await self.sync_with_coordinator() print(f"acquired place {place.name}") - return - - # check potential failure causes - for exporter, groups in sorted(self.resources.items()): - for group_name, group in sorted(groups.items()): - for resource_name, resource in sorted(group.items()): - resource_path = (exporter, group_name, resource.cls, resource_name) - if resource.acquired is None: - continue - match = place.getmatch(resource_path) - if match is None: - continue - name = resource_name - if match.rename: - name = match.rename - print( - f"Matching resource '{name}' ({exporter}/{group_name}/{resource.cls}/{resource_name}) already acquired by place '{resource.acquired}'" - ) # pylint: disable=line-too-long + except grpc.aio.AioRpcError as e: + # check potential failure causes + for exporter, groups in sorted(self.resources.items()): + for group_name, group in sorted(groups.items()): + for resource_name, resource in sorted(group.items()): + resource_path = (exporter, group_name, resource.cls, resource_name) + if not resource.acquired: + continue + match = place.getmatch(resource_path) + if match is None: + continue + name = resource_name + if match.rename: + name = match.rename + print( + f"Matching resource '{name}' ({exporter}/{group_name}/{resource.cls}/{resource_name}) already acquired by place '{resource.acquired}'" + ) # pylint: disable=line-too-long - raise ServerError(f"failed to acquire place {place.name}") + raise ServerError(e.details()) async def release(self): """Release a previously acquired place""" @@ -595,38 +698,43 @@ async def release(self): f"place {place.name} is acquired by a different user ({place.acquired}), use --kick if you are sure" ) # pylint: disable=line-too-long print(f"warning: kicking user ({place.acquired})") - res = await self.call("org.labgrid.coordinator.release_place", place.name) - if not res: - raise ServerError(f"failed to release place {place.name}") + + request = labgrid_coordinator_pb2.ReleasePlaceRequest(placename=place.name) + + try: + await self.stub.ReleasePlace(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) print(f"released place {place.name}") async def release_from(self): """Release a place, but only if acquired by a specific user""" place = self.get_place() - res = await self.call( - "org.labgrid.coordinator.release_place_from", - place.name, - self.args.acquired, - ) - if not res: - raise ServerError(f"failed to release place {place.name}") + + request = labgrid_coordinator_pb2.ReleasePlaceRequest(placename=place.name, fromuser=self.args.acquired) + + try: + await self.stub.ReleasePlace(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) print(f"{self.args.acquired} has released place {place.name}") async def allow(self): """Allow another use access to a previously acquired place""" place = self.get_place() - if not place.acquired: - raise UserError(f"place {place.name} is not acquired") - _, user = place.acquired.split("/") - if user != self.getuser(): - raise UserError(f"place {place.name} is acquired by a different user ({place.acquired})") if "/" not in self.args.user: raise UserError(f"user {self.args.user} must be in / format") - res = await self.call("org.labgrid.coordinator.allow_place", place.name, self.args.user) - if not res: - raise ServerError(f"failed to allow {self.args.user} for place {place.name}") + request = labgrid_coordinator_pb2.AllowPlaceRequest(placename=place.name, user=self.args.user) + + try: + await self.stub.AllowPlace(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) print(f"allowed {self.args.user} for place {place.name}") @@ -1292,14 +1400,32 @@ def write_image(self): raise UserError(e) async def create_reservation(self): - filters = " ".join(self.args.filters) prio = self.args.prio - res = await self.call("org.labgrid.coordinator.create_reservation", filters, prio=prio) - if res is None: - raise ServerError("failed to create reservation") - ((token, config),) = res.items() # we get a one-item dict - config = filter_dict(config, Reservation, warn=True) - res = Reservation(token=token, **config) + + fltr = {} + for pair in self.args.filters: + try: + k, v = pair.split("=") + except ValueError: + raise UserError(f"'{pair}' is not a valid filter (must contain a '=')") + if not TAG_KEY.match(k): + raise UserError(f"Key '{k}' in filter '{pair}' is invalid") + if not TAG_KEY.match(v): + raise UserError(f"Value '{v}' in filter '{pair}' is invalid") + fltr[k] = v + + fltrs = { + "main": labgrid_coordinator_pb2.Reservation.Filter(filter=fltr), + } + + request = labgrid_coordinator_pb2.CreateReservationRequest(filters=fltrs, prio=prio) + + try: + response: labgrid_coordinator_pb2.CreateReservationResponse = await self.stub.CreateReservation(request) + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) + + res = Reservation.from_pb2(response.reservation) if self.args.shell: print(f"export LG_TOKEN={res.token}") else: @@ -1311,18 +1437,25 @@ async def create_reservation(self): await self._wait_reservation(res.token, verbose=False) async def cancel_reservation(self): - token = self.args.token - res = await self.call("org.labgrid.coordinator.cancel_reservation", token) - if not res: - raise ServerError(f"failed to cancel reservation {token}") + token: str = self.args.token + + request = labgrid_coordinator_pb2.CancelReservationRequest(token=token) + + try: + await self.stub.CancelReservation(request) + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) - async def _wait_reservation(self, token, verbose=True): + async def _wait_reservation(self, token: str, verbose=True): while True: - config = await self.call("org.labgrid.coordinator.poll_reservation", token) - if config is None: - raise ServerError("reservation not found") - config = filter_dict(config, Reservation, warn=True) - res = Reservation(token=token, **config) + request = labgrid_coordinator_pb2.PollReservationRequest(token=token) + + try: + response: labgrid_coordinator_pb2.PollReservationResponse = await self.stub.PollReservation(request) + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) + + res = Reservation.from_pb2(response.reservation) if verbose: res.show() if res.state is ReservationState.waiting: @@ -1335,10 +1468,15 @@ async def wait_reservation(self): await self._wait_reservation(token) async def print_reservations(self): - reservations = await self.call("org.labgrid.coordinator.get_reservations") - for token, config in sorted(reservations.items(), key=lambda x: (-x[1]["prio"], x[1]["created"])): # pylint: disable=line-too-long - config = filter_dict(config, Reservation, warn=True) - res = Reservation(token=token, **config) + request = labgrid_coordinator_pb2.GetReservationsRequest() + + try: + response: labgrid_coordinator_pb2.GetReservationsResponse = await self.stub.GetReservations(request) + reservations = [Reservation.from_pb2(x) for x in response.reservations] + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) + + for res in sorted(reservations, key=lambda x: (-x.prio, x.created)): print(f"Reservation '{res.token}':") res.show(level=1) @@ -1378,46 +1516,16 @@ def print_version(self): print(labgrid_version()) -def start_session(url, realm, extra): - from autobahn.asyncio.wamp import ApplicationRunner - +def start_session(address, extra, debug=False): loop = asyncio.get_event_loop() - ready = asyncio.Event() - - async def connected(session): # pylint: disable=unused-argument - ready.set() - - if not extra: - extra = {} - extra["loop"] = loop - extra["connected"] = connected - - session = [None] - - def make(*args, **kwargs): - nonlocal session - session[0] = ClientSession(*args, **kwargs) - return session[0] + if debug: + loop.set_debug(True) - url = proxymanager.get_url(url, default_port=20408) + address = proxymanager.get_grpc_address(address, default_port=20408) - runner = ApplicationRunner(url, realm=realm, extra=extra) - coro = runner.run(make, start_loop=False) - - _, protocol = loop.run_until_complete(coro) - - # there is no other notification when the WAMP connection setup times out, - # so we need to wait for one of these protocol futures to resolve - done, pending = loop.run_until_complete( - asyncio.wait({protocol.is_open, protocol.is_closed}, timeout=30, return_when=asyncio.FIRST_COMPLETED) - ) - if protocol.is_closed in done: - raise Error("connection closed during setup") - if protocol.is_open in pending: - raise Error("connection timed out during setup") - - loop.run_until_complete(ready.wait()) - return session[0] + session = ClientSession(address, loop, **extra) + loop.run_until_complete(session.start()) + return session def find_role_by_place(config, place): @@ -1504,10 +1612,10 @@ def main(): parser = argparse.ArgumentParser() parser.add_argument( "-x", - "--crossbar", - metavar="URL", + "--coordinator", + metavar="ADDRESS", type=str, - help="crossbar websocket URL (default: value from env variable LG_CROSSBAR, otherwise ws://127.0.0.1:20408/ws)", + help="coordinator HOST[:PORT] (default: value from env variable LG_COORDINATOR, otherwise 127.0.0.1:20408)", ) parser.add_argument("-c", "--config", type=str, default=os.environ.get("LG_ENV"), help="config file") parser.add_argument("-p", "--place", type=str, default=place, help="place name/alias") @@ -1913,20 +2021,15 @@ def main(): signal.signal(signal.SIGTERM, lambda *_: sys.exit(0)) try: - crossbar_url = args.crossbar or env.config.get_option("crossbar_url") - except (AttributeError, KeyError): - # in case of no env or not set, use LG_CROSSBAR env variable or default - crossbar_url = os.environ.get("LG_CROSSBAR", "ws://127.0.0.1:20408/ws") - - try: - crossbar_realm = env.config.get_option("crossbar_realm") + coordinator_address = args.coordinator or env.config.get_option("coordinator_address") except (AttributeError, KeyError): - # in case of no env, use LG_CROSSBAR_REALM env variable or default - crossbar_realm = os.environ.get("LG_CROSSBAR_REALM", "realm1") + # in case of no env or not set, use LG_COORDINATOR env variable or default + coordinator_address = os.environ.get("LG_COORDINATOR", "127.0.0.1:20408") - logging.debug('Starting session with "%s", realm: "%s"', crossbar_url, crossbar_realm) + logging.debug('Starting session with "%s"', coordinator_address) + session = start_session(coordinator_address, extra, args.debug) + logging.debug("Started session") - session = start_session(crossbar_url, crossbar_realm, extra) try: if asyncio.iscoroutinefunction(args.func): if getattr(args.func, "needs_target", False): @@ -1939,6 +2042,10 @@ def main(): else: args.func(session) finally: + logging.debug("Stopping session") + session.loop.run_until_complete(session.stop()) + session.loop.run_until_complete(session.close()) + logging.debug("Stopping loop") session.loop.close() except (NoResourceFoundError, NoDriverFoundError, InvalidConfigError) as e: if args.debug: @@ -1968,8 +2075,8 @@ def main(): ) # pylint: disable=line-too-long exitcode = 1 - except ConnectionError as e: - print(f"Could not connect to coordinator: {e}", file=sys.stderr) + except ServerError as e: + print(f"Server error: {e}", file=sys.stderr) exitcode = 1 except InteractiveCommandError as e: if args.debug: diff --git a/labgrid/remote/common.py b/labgrid/remote/common.py index 2ea1d2f1a..93b6b22e7 100644 --- a/labgrid/remote/common.py +++ b/labgrid/remote/common.py @@ -1,14 +1,17 @@ -import socket +import asyncio import time import enum import random import re import string +import logging from datetime import datetime from fnmatch import fnmatchcase import attr +from .generated import labgrid_coordinator_pb2 + __all__ = [ "TAG_KEY", "TAG_VAL", @@ -17,19 +20,50 @@ "Place", "ReservationState", "Reservation", - "enable_tcp_nodelay", - "monkey_patch_max_msg_payload_size_ws_option", ] TAG_KEY = re.compile(r"[a-z][a-z0-9_]+") TAG_VAL = re.compile(r"[a-z0-9_]?") +def set_map_from_dict(m, d): + for k, v in d.items(): + assert isinstance(k, str) + if v is None: + m[k].Clear() + elif isinstance(v, bool): + m[k].bool_value = v + elif isinstance(v, int): + if v < 0: + m[k].int_value = v + else: + m[k].uint_value = v + elif isinstance(v, float): + m[k].float_value = v + elif isinstance(v, str): + m[k].string_value = v + else: + raise ValueError(f"cannot translate {repr(v)} to MapValue") + + +def build_dict_from_map(m): + d = {} + for k, v in m.items(): + v: labgrid_coordinator_pb2.MapValue + kind = v.WhichOneof("kind") + if kind is None: + d[k] = None + else: + d[k] = getattr(v, kind) + return d + + @attr.s(eq=False) class ResourceEntry: data = attr.ib() # cls, params def __attrs_post_init__(self): + assert isinstance(self.data, dict) self.data.setdefault("acquired", None) self.data.setdefault("avail", False) @@ -84,6 +118,35 @@ def release(self): # ignore repeated releases self.data["acquired"] = None + def as_pb2(self): + msg = labgrid_coordinator_pb2.Resource() + msg.cls = self.cls + params = self.params.copy() + extra = params.pop("extra", {}) + set_map_from_dict(msg.params, params) + set_map_from_dict(msg.extra, extra) + if self.acquired is not None: + msg.acquired = self.acquired + msg.avail = self.avail + return msg + + @staticmethod + def data_from_pb2(pb2): + assert isinstance(pb2, labgrid_coordinator_pb2.Resource) + data = { + "cls": pb2.cls, + "params": build_dict_from_map(pb2.params), + "acquired": pb2.acquired or None, + "avail": pb2.avail, + } + data["params"]["extra"] = build_dict_from_map(pb2.extra) + return data + + @classmethod + def from_pb2(cls, pb2): + assert isinstance(pb2, labgrid_coordinator_pb2.Place) + return cls(cls.data_from_pb2(pb2)) + @attr.s(eq=True, repr=False, str=False) # This class requires eq=True, since we put the matches into a list and require @@ -133,6 +196,26 @@ def ismatch(self, resource_path): return True + def as_pb2(self): + return labgrid_coordinator_pb2.ResourceMatch( + exporter=self.exporter, + group=self.group, + cls=self.cls, + name=self.name, + rename=self.rename, + ) + + @classmethod + def from_pb2(cls, pb2): + assert isinstance(pb2, labgrid_coordinator_pb2.ResourceMatch) + return cls( + exporter=pb2.exporter, + group=pb2.group, + cls=pb2.cls, + name=pb2.name if pb2.HasField("name") else None, + rename=pb2.rename, + ) + @attr.s(eq=False) class Place: @@ -170,14 +253,19 @@ def asdict(self): "reservation": self.reservation, } - def update(self, config): + def update_from_pb2(self, place_pb2): + # FIXME untangle this... + place = Place.from_pb2(place_pb2) fields = attr.fields_dict(type(self)) - for k, v in config.items(): + for k, v in place.asdict().items(): assert k in fields if k == "name": # we cannot rename places assert v == self.name continue + if k == "matches": + self.matches = [ResourceMatch.from_pb2(m) for m in place_pb2.matches] + continue setattr(self, k, v) def show(self, level=0): @@ -241,6 +329,56 @@ def unmatched(self, resource_paths): def touch(self): self.changed = time.time() + def as_pb2(self): + try: + acquired_resources = [] + for resource in self.acquired_resources: + assert not isinstance(resource, (tuple, list)), "as_pb2() only implemented for coordinator" + assert len(resource.path) == 4 + path = "/".join(resource.path) + acquired_resources.append(path) + + place = labgrid_coordinator_pb2.Place() + place.name = self.name + place.aliases.extend(self.aliases) + place.comment = self.comment + place.matches.extend(m.as_pb2() for m in self.matches) + place.acquired = self.acquired or "" + place.acquired_resources.extend(acquired_resources) + place.allowed.extend(self.allowed) + place.changed = self.changed + place.created = self.created + if self.reservation: + place.reservation = self.reservation + for key, value in self.tags.items(): + place.tags[key] = value + return place + except TypeError: + logging.exception("failed to convert place %s to protobuf", self) + raise + + @classmethod + def from_pb2(cls, pb2): + assert isinstance(pb2, labgrid_coordinator_pb2.Place) + acquired_resources = [] + for path in pb2.acquired_resources: + path = path.split("/") + assert len(path) == 4 + acquired_resources.append(path) + return cls( + name=pb2.name, + aliases=pb2.aliases, + comment=pb2.comment, + tags=dict(pb2.tags), + matches=[ResourceMatch.from_pb2(m) for m in pb2.matches], + acquired=pb2.acquired if pb2.HasField("acquired") and pb2.acquired else None, + acquired_resources=acquired_resources, + allowed=pb2.allowed, + created=pb2.created, + changed=pb2.changed, + reservation=pb2.reservation if pb2.HasField("reservation") else None, + ) + class ReservationState(enum.Enum): waiting = 0 @@ -304,44 +442,58 @@ def show(self, level=0): print(indent + f"created: {datetime.fromtimestamp(self.created)}") print(indent + f"timeout: {datetime.fromtimestamp(self.timeout)}") + def as_pb2(self): + res = labgrid_coordinator_pb2.Reservation() + res.owner = self.owner + res.token = self.token + res.state = self.state.value + res.prio = self.prio + for name, fltr in self.filters.items(): + res.filters[name].CopyFrom(labgrid_coordinator_pb2.Reservation.Filter(filter=fltr)) + if self.allocations: + # TODO: refactor to have only one place per filter group + assert len(self.allocations) == 1 + assert "main" in self.allocations + allocation = self.allocations["main"] + assert len(allocation) == 1 + res.allocations.update({"main": allocation[0]}) + res.created = self.created + res.timeout = self.timeout + return res -def enable_tcp_nodelay(session): - """ - asyncio/autobahn does not set TCP_NODELAY by default, so we need to do it - like this for now. - """ - s = session._transport.transport.get_extra_info("socket") - s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True) - - -def monkey_patch_max_msg_payload_size_ws_option(): - """ - The default maxMessagePayloadSize in autobahn is 1M. For larger setups with a big number of - exported resources, this becomes the limiting factor. - Increase maxMessagePayloadSize in WampWebSocketClientFactory.setProtocolOptions() by monkey - patching it, so autobahn.asyncio.wamp.ApplicationRunner effectively sets the increased value. - - This function must be called before ApplicationRunner is instanciated. - """ - from autobahn.asyncio.websocket import WampWebSocketClientFactory - - original_method = WampWebSocketClientFactory.setProtocolOptions - - def set_protocol_options(*args, **kwargs): - new_max_message_payload_size = 10485760 - - # maxMessagePayloadSize given as positional arg - args = list(args) - try: - args[9] = max((args[9], new_max_message_payload_size)) - except IndexError: - pass - - # maxMessagePayloadSize given as kwarg - kwarg_name = "maxMessagePayloadSize" - if kwarg_name in kwargs and kwargs[kwarg_name] is not None: - kwargs[kwarg_name] = max((kwargs[kwarg_name], new_max_message_payload_size)) - - return original_method(*args, **kwargs) - - WampWebSocketClientFactory.setProtocolOptions = set_protocol_options + @classmethod + def from_pb2(cls, pb2: labgrid_coordinator_pb2.Reservation): + filters = {} + for name, fltr_pb2 in pb2.filters.items(): + filters[name] = dict(fltr_pb2.filter) + allocations = {} + for fltr_name, place_name in pb2.allocations.items(): + allocations[fltr_name] = [place_name] + return cls( + owner=pb2.owner, + token=pb2.token, + state=ReservationState(pb2.state), + prio=pb2.prio, + filters=filters, + allocations=allocations, + created=pb2.created, + timeout=pb2.timeout, + ) + + +async def queue_as_aiter(q): + try: + while True: + try: + item = await q.get() + except asyncio.CancelledError: + # gRPC doesn't like to receive exceptions from the request_iterator + return + if item is None: + return + yield item + q.task_done() + logging.debug("sent message %s", item) + except Exception: + logging.exception("error in queue_as_aiter") + raise diff --git a/labgrid/remote/coordinator.py b/labgrid/remote/coordinator.py index e3ba8210f..b4d4cf27a 100644 --- a/labgrid/remote/coordinator.py +++ b/labgrid/remote/coordinator.py @@ -1,26 +1,29 @@ -"""The coordinator module coordinates exported resources and clients accessing them.""" - -# pylint: disable=no-member,unused-argument +#!/usr/bin/env python3 +import argparse +import logging import asyncio -import sys import traceback -from collections import defaultdict -from os import environ -from pprint import pprint from enum import Enum from functools import wraps import attr -from autobahn import wamp -from autobahn.asyncio.wamp import ApplicationRunner, ApplicationSession -from autobahn.wamp.types import RegisterOptions - -from .common import * # pylint: disable=wildcard-import +import grpc +from grpc_reflection.v1alpha import reflection + +from .common import ( + ResourceEntry, + ResourceMatch, + Place, + Reservation, + ReservationState, + queue_as_aiter, + TAG_KEY, + TAG_VAL, +) from .scheduler import TagSet, schedule -from ..util import atomic_replace, yaml - - -monkey_patch_max_msg_payload_size_ws_option() +from .generated import labgrid_coordinator_pb2 +from .generated import labgrid_coordinator_pb2_grpc +from ..util import atomic_replace, labgrid_version, yaml class Action(Enum): @@ -34,19 +37,10 @@ class RemoteSession: """class encapsulating a session, used by ExporterSession and ClientSession""" coordinator = attr.ib() - session = attr.ib() - authid = attr.ib() - version = attr.ib(default="unknown", init=False) - - @property - def key(self): - """Key of the session""" - return self.session - - @property - def name(self): - """Name of the session""" - return self.authid.split("/", 1)[1] + peer = attr.ib() + name = attr.ib() + queue = attr.ib() + version = attr.ib() @attr.s(eq=False) @@ -56,26 +50,41 @@ class ExporterSession(RemoteSession): groups = attr.ib(default=attr.Factory(dict), init=False) - def set_resource(self, groupname, resourcename, resourcedata): + def set_resource(self, groupname, resourcename, resource): + """This is called when Exporters update resources or when they disconnect.""" + logging.info("set_resource %s %s %s", groupname, resourcename, resource) group = self.groups.setdefault(groupname, {}) old = group.get(resourcename) - if resourcedata and old: - old.update(resourcedata) - new = old - elif resourcedata and not old: - new = group[resourcename] = ResourceImport( - resourcedata, path=(self.name, groupname, resourcedata["cls"], resourcename) + if resource is not None: + new = ResourceImport( + data=ResourceImport.data_from_pb2(resource), path=(self.name, groupname, resource.cls, resourcename) ) - elif not resourcedata and old: - new = None - del group[resourcename] + if old: + old.data.update(new.data) + new = old + else: + group[resourcename] = new else: - assert not resourcedata and not old new = None + try: + del group[resourcename] + except KeyError: + pass - self.coordinator.publish( - "org.labgrid.coordinator.resource_changed", self.name, groupname, resourcename, new.asdict() if new else {} - ) + msg = labgrid_coordinator_pb2.ClientOutMessage() + update = msg.updates.add() + if new: + update.resource.CopyFrom(new.as_pb2()) + update.resource.path.exporter_name = self.name + update.resource.path.group_name = groupname + update.resource.path.resource_name = resourcename + else: + update.del_resource.exporter_name = self.name + update.del_resource.group_name = groupname + update.del_resource.resource_name = resourcename + + for client in self.coordinator.clients.values(): + client.queue.put_nowait(msg) if old and new: assert old is new @@ -99,7 +108,38 @@ def get_resources(self): @attr.s(eq=False) class ClientSession(RemoteSession): - pass + def subscribe_places(self): + # send initial places + out_msg = labgrid_coordinator_pb2.ClientOutMessage() + for place in self.coordinator.places.values(): + place: Place + out_msg.updates.add().place.CopyFrom(place.as_pb2()) + self.queue.put_nowait(out_msg) + + def subscribe_resources(self): + # collect initial resources + collected = [] + logging.debug("sending resources to %s", self) + for exporter in self.coordinator.exporters.values(): + logging.debug("sending resources %s", exporter) + exporter: ExporterSession + for groupname, group in exporter.groups.items(): + logging.debug("sending resources %s", groupname) + for resourcename, resource in group.items(): + logging.debug("sending resources %s", resourcename) + resource: ResourceImport + update = labgrid_coordinator_pb2.UpdateResponse() + update.resource.CopyFrom(resource.as_pb2()) + update.resource.path.exporter_name = exporter.name + update.resource.path.group_name = groupname + update.resource.path.resource_name = resourcename + collected.append(update) + # send batches + while collected: + batch, collected = collected[:100], collected[100:] + out_msg = labgrid_coordinator_pb2.ClientOutMessage() + out_msg.updates.extend(batch) + self.queue.put_nowait(out_msg) @attr.s(eq=False) @@ -121,135 +161,42 @@ async def wrapper(self, *args, **kwargs): return wrapper -class CoordinatorComponent(ApplicationSession): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.lock = asyncio.Lock() +class ExporterCommand: + def __init__(self, request) -> None: + self.request = request + self.response = None + self.completed = asyncio.Event() - @locked - async def onConnect(self): - self.sessions = {} + def complete(self, response) -> None: + self.response = response + self.completed.set() + + async def wait(self): + await asyncio.wait_for(self.completed.wait(), 10) + + +class ExporterError(Exception): + pass + + +class Coordinator(labgrid_coordinator_pb2_grpc.CoordinatorServicer): + def __init__(self) -> None: self.places = {} self.reservations = {} self.poll_task = None self.save_scheduled = False + self.lock = asyncio.Lock() + self.exporters: dict[str, ExporterSession] = {} + self.clients: dict[str, ClientSession] = {} self.load() - self.save_later() - - enable_tcp_nodelay(self) - self.join( - self.config.realm, - authmethods=["anonymous"], - authid="coordinator", - authextra={"authid": "coordinator"}, - ) - - @locked - async def onJoin(self, details): - await self.subscribe(self.on_session_join, "wamp.session.on_join") - await self.subscribe(self.on_session_leave, "wamp.session.on_leave") - await self.register( - self.attach, "org.labgrid.coordinator.attach", options=RegisterOptions(details_arg="details") - ) - - # resources - await self.register( - self.set_resource, "org.labgrid.coordinator.set_resource", options=RegisterOptions(details_arg="details") - ) - await self.register(self.get_resources, "org.labgrid.coordinator.get_resources") - - # places - await self.register(self.add_place, "org.labgrid.coordinator.add_place") - await self.register(self.del_place, "org.labgrid.coordinator.del_place") - await self.register(self.add_place_alias, "org.labgrid.coordinator.add_place_alias") - await self.register(self.del_place_alias, "org.labgrid.coordinator.del_place_alias") - await self.register(self.set_place_tags, "org.labgrid.coordinator.set_place_tags") - await self.register(self.set_place_comment, "org.labgrid.coordinator.set_place_comment") - await self.register(self.add_place_match, "org.labgrid.coordinator.add_place_match") - await self.register(self.del_place_match, "org.labgrid.coordinator.del_place_match") - await self.register( - self.acquire_place, "org.labgrid.coordinator.acquire_place", options=RegisterOptions(details_arg="details") - ) - await self.register( - self.release_place, "org.labgrid.coordinator.release_place", options=RegisterOptions(details_arg="details") - ) - await self.register( - self.release_place_from, - "org.labgrid.coordinator.release_place_from", - options=RegisterOptions(details_arg="details"), - ) - await self.register( - self.allow_place, "org.labgrid.coordinator.allow_place", options=RegisterOptions(details_arg="details") - ) - await self.register(self.get_places, "org.labgrid.coordinator.get_places") - - # reservations - await self.register( - self.create_reservation, - "org.labgrid.coordinator.create_reservation", - options=RegisterOptions(details_arg="details"), - ) - await self.register( - self.cancel_reservation, - "org.labgrid.coordinator.cancel_reservation", - ) - await self.register( - self.poll_reservation, - "org.labgrid.coordinator.poll_reservation", - ) - await self.register( - self.get_reservations, - "org.labgrid.coordinator.get_reservations", - ) self.poll_task = asyncio.get_event_loop().create_task(self.poll()) - print("Coordinator ready.") - - @locked - async def onLeave(self, details): - await self.save() - if self.poll_task: - self.poll_task.cancel() - await asyncio.wait([self.poll_task]) - super().onLeave(details) - - @locked - async def onDisconnect(self): - await self.save() - if self.poll_task: - self.poll_task.cancel() - await asyncio.wait([self.poll_task]) - await asyncio.sleep(0.5) # give others a chance to clean up - async def _poll_step(self): # save changes if self.save_scheduled: await self.save() - # poll exporters - for session in list(self.sessions.values()): - if isinstance(session, ExporterSession): - fut = self.call(f"org.labgrid.exporter.{session.name}.version") - done, _ = await asyncio.wait([fut], timeout=5) - if not done: - print(f"kicking exporter ({session.key}/{session.name})") - await self.call("wamp.session.kill", session.key, message="timeout detected by coordinator") - print(f"cleaning up exporter ({session.key}/{session.name})") - await self.on_session_leave(session.key) - print(f"removed exporter ({session.key}/{session.name})") - continue - try: - session.version = done.pop().result() - except wamp.exception.ApplicationError as e: - if e.error == "wamp.error.no_such_procedure": - pass # old client - elif e.error == "wamp.error.canceled": - pass # disconnected - elif e.error == "wamp.error.no_such_session": - pass # client has already disconnected - else: - raise # update reservations self.schedule_reservations() @@ -265,9 +212,17 @@ async def poll(self): traceback.print_exc() def save_later(self): + logging.debug("Setting Save-later") self.save_scheduled = True + def _get_resources(self): + result = {} + for session in self.exporters.values(): + result[session.name] = session.get_resources() + return result + async def save(self): + logging.debug("Running Save") self.save_scheduled = False resources = self._get_resources() @@ -278,7 +233,9 @@ async def save(self): places = places.encode() loop = asyncio.get_event_loop() + logging.debug("Awaiting resources") await loop.run_in_executor(None, atomic_replace, "resources.yaml", resources) + logging.debug("Awaiting places") await loop.run_in_executor(None, atomic_replace, "places.yaml", places) def load(self): @@ -302,6 +259,59 @@ def load(self): self.places[placename] = place except FileNotFoundError: pass + logging.info("loaded %s place(s)", len(self.places)) + + async def ClientStream(self, request_iterator, context): + peer = context.peer() + logging.info("client connected: %s", peer) + assert peer not in self.clients + out_msg_queue = asyncio.Queue() + + async def request_task(): + name = None + version = None + try: + async for in_msg in request_iterator: + in_msg: labgrid_coordinator_pb2.ClientInMessage + logging.debug("client in_msg %s", in_msg) + kind = in_msg.WhichOneof("kind") + if kind == "sync": + out_msg = labgrid_coordinator_pb2.ClientOutMessage() + out_msg.sync.id = in_msg.sync.id + out_msg_queue.put_nowait(out_msg) + elif kind == "startup": + version = in_msg.startup.version + name = in_msg.startup.name + session = self.clients[peer] = ClientSession(self, peer, name, out_msg_queue, version) + logging.debug("Received startup from %s with %s", name, version) + elif kind == "subscribe": + if in_msg.subscribe.all_places: + session.subscribe_places() + if in_msg.subscribe.all_resources: + session.subscribe_resources() + else: + logging.warning("received unknown kind %s from client %s (version %s)", kind, name, version) + logging.debug("client request_task done: %s", context.done()) + except Exception: + logging.exception("error in client message handler") + + runnning_request_task = asyncio.get_event_loop().create_task(request_task()) + + try: + async for out_msg in queue_as_aiter(out_msg_queue): + out_msg: labgrid_coordinator_pb2.ClientOutMessage + logging.debug("client output %s", out_msg) + yield out_msg + finally: + try: + session = self.clients.pop(peer) + except KeyError: + logging.info("Never received startup from peer %s that disconnected", peer) + return + + runnning_request_task.cancel() + await runnning_request_task + logging.debug("client aborted %s, cancelled: %s", session, context.cancelled()) def _add_default_place(self, name): if name in self.places: @@ -313,6 +323,11 @@ def _add_default_place(self, name): place.matches.append(ResourceMatch(exporter="*", group=name, cls="*")) self.places[name] = place + def get_exporter_by_name(self, name): + for exporter in self.exporters.values(): + if exporter.name == name: + return exporter + async def _update_acquired_places(self, action, resource, callback=True): """Update acquired places when resources are added or removed.""" if action not in [Action.ADD, Action.DEL]: @@ -340,151 +355,178 @@ async def _update_acquired_places(self, action, resource, callback=True): self._publish_place(place) def _publish_place(self, place): - self.publish("org.labgrid.coordinator.place_changed", place.name, place.asdict()) + msg = labgrid_coordinator_pb2.ClientOutMessage() + msg.updates.add().place.CopyFrom(place.as_pb2()) + + for client in self.clients.values(): + client.queue.put_nowait(msg) + + def _publish_resource(self, resource: ResourceImport): + msg = labgrid_coordinator_pb2.ClientOutMessage() + update = msg.updates.add() + update.resource.CopyFrom(resource.as_pb2()) + update.resource.path.exporter_name = resource.path[0] + update.resource.path.group_name = resource.path[1] + update.resource.path.resource_name = resource.path[3] + + for client in self.clients.values(): + client.queue.put_nowait(msg) + + async def ExporterStream(self, request_iterator, context): + peer = context.peer() + logging.info("exporter connected: %s", peer) + assert peer not in self.exporters + command_queue = asyncio.Queue() + pending_commands = [] + + out_msg = labgrid_coordinator_pb2.ExporterOutMessage() + out_msg.hello.version = labgrid_version() + yield out_msg + + async def request_task(): + name = None + version = None + try: + async for in_msg in request_iterator: + in_msg: labgrid_coordinator_pb2.ExporterInMessage + logging.debug("exporter in_msg %s", in_msg) + kind = in_msg.WhichOneof("kind") + if kind in "response": + cmd = pending_commands.pop(0) + cmd.complete(in_msg.response) + logging.debug("Command %s is done", cmd) + elif kind == "startup": + version = in_msg.startup.version + name = in_msg.startup.name + session = self.exporters[peer] = ExporterSession(self, peer, name, command_queue, version) + logging.debug("Exporters: %s", self.exporters) + logging.debug("Received startup from %s with %s", name, version) + elif kind == "resource": + logging.debug("Received resource from %s with %s", name, in_msg.resource) + action, resource = session.set_resource( + in_msg.resource.path.group_name, in_msg.resource.path.resource_name, in_msg.resource + ) + if action is Action.ADD: + async with self.lock: + self._add_default_place(in_msg.resource.path.group_name) + if action in (Action.ADD, Action.DEL): + async with self.lock: + await self._update_acquired_places(action, resource) + self.save_later() + else: + logging.warning("received unknown kind %s from exporter %s (version %s)", kind, name, version) - def _publish_resource(self, resource): - self.publish( - "org.labgrid.coordinator.resource_changed", - resource.path[0], # exporter name - resource.path[1], # group name - resource.path[3], # resource name - resource.asdict(), - ) + logging.debug("exporter request_task done: %s", context.done()) + except Exception: + logging.exception("error in exporter message handler") - @locked - async def on_session_join(self, session_details): - print("join") - pprint(session_details) - session = session_details["session"] - authid = session_details["authextra"].get("authid") or session_details["authid"] - if authid.startswith("client/"): - session = ClientSession(self, session, authid) - elif authid.startswith("exporter/"): - session = ExporterSession(self, session, authid) - else: - return - self.sessions[session.key] = session + runnning_request_task = asyncio.get_event_loop().create_task(request_task()) - @locked - async def on_session_leave(self, session_id): - print(f"leave ({session_id})") try: - session = self.sessions.pop(session_id) - except KeyError: - return - if isinstance(session, ExporterSession): + async for cmd in queue_as_aiter(command_queue): + logging.debug("exporter cmd %s", cmd) + out_msg = labgrid_coordinator_pb2.ExporterOutMessage() + out_msg.set_acquired_request.CopyFrom(cmd.request) + pending_commands.append(cmd) + yield out_msg + except asyncio.exceptions.CancelledError: + logging.info("exporter disconnected %s", context.peer()) + except Exception: + logging.exception("error in exporter command handler") + finally: + runnning_request_task.cancel() + await runnning_request_task + + try: + session = self.exporters.pop(peer) + except KeyError: + logging.info("Never received startup from peer %s that disconnected", peer) + return + for groupname, group in session.groups.items(): for resourcename in group.copy(): - action, resource = session.set_resource(groupname, resourcename, {}) + action, resource = session.set_resource(groupname, resourcename, None) await self._update_acquired_places(action, resource, callback=False) - self.save_later() - - @locked - async def attach(self, name, details=None): - # TODO check if name is in use - session = self.sessions[details.caller] - session_details = self.sessions[session] - session_details["name"] = name - self.exporters[name] = defaultdict(dict) - - # not @locked because set_resource my be triggered by a acquire() call to - # an exporter, leading to a deadlock on acquire_place() - async def set_resource(self, groupname, resourcename, resourcedata, details=None): - """Called by exporter to create/update/remove resources.""" - session = self.sessions.get(details.caller) - if session is None: - return - assert isinstance(session, ExporterSession) - - groupname = str(groupname) - resourcename = str(resourcename) - # TODO check if acquired - print(details) - pprint(resourcedata) - action, resource = session.set_resource(groupname, resourcename, resourcedata) - if action is Action.ADD: - async with self.lock: - self._add_default_place(groupname) - if action in (Action.ADD, Action.DEL): - async with self.lock: - await self._update_acquired_places(action, resource) - self.save_later() - def _get_resources(self): - result = {} - for session in self.sessions.values(): - if isinstance(session, ExporterSession): - result[session.name] = session.get_resources() - return result - - @locked - async def get_resources(self, details=None): - return self._get_resources() + logging.debug("exporter aborted %s, cancelled: %s", context.peer(), context.cancelled()) @locked - async def add_place(self, name, details=None): + async def AddPlace(self, request, context): + name = request.name if not name or not isinstance(name, str): - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, "name was not a string") if name in self.places: - return False + await context.abort(grpc.StatusCode.ALREADY_EXISTS, f"Place {name} already exists") + logging.debug("Adding %s", name) place = Place(name) self.places[name] = place self._publish_place(place) self.save_later() - return True + return labgrid_coordinator_pb2.AddPlaceResponse() @locked - async def del_place(self, name, details=None): + async def DeletePlace(self, request, context): + name = request.name if not name or not isinstance(name, str): - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, "name was not a string") if name not in self.places: - return False + await context.abort(grpc.StatusCode.ALREADY_EXISTS, f"Place {name} does not exist") + logging.debug("Deleting %s", name) del self.places[name] - self.publish("org.labgrid.coordinator.place_changed", name, {}) + msg = labgrid_coordinator_pb2.ClientOutMessage() + msg.updates.add().del_place = name + for client in self.clients.values(): + client.queue.put_nowait(msg) self.save_later() - return True + return labgrid_coordinator_pb2.DeletePlaceResponse() @locked - async def add_place_alias(self, placename, alias, details=None): + async def AddPlaceAlias(self, request, context): + placename = request.placename + alias = request.alias try: place = self.places[placename] except KeyError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {placename} does not exist") place.aliases.add(alias) place.touch() self._publish_place(place) self.save_later() - return True + return labgrid_coordinator_pb2.AddPlaceAliasResponse() @locked - async def del_place_alias(self, placename, alias, details=None): + async def DeletePlaceAlias(self, request, context): + placename = request.placename + alias = request.alias try: place = self.places[placename] except KeyError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {placename} does not exist") try: place.aliases.remove(alias) except ValueError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Failed to remove {alias} from {placename}") place.touch() self._publish_place(place) self.save_later() - return True + return labgrid_coordinator_pb2.DeletePlaceAliasResponse() @locked - async def set_place_tags(self, placename, tags, details=None): + async def SetPlaceTags(self, request, context): + placename = request.placename + tags = dict(request.tags) try: place = self.places[placename] except KeyError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {placename} does not exist") assert isinstance(tags, dict) for k, v in tags.items(): assert isinstance(k, str) assert isinstance(v, str) if not TAG_KEY.match(k): - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Key {k} in {tags} is invalid") if not TAG_VAL.match(v): - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Value {v} in {tags} is invalid") for k, v in tags.items(): if not v: try: @@ -496,52 +538,62 @@ async def set_place_tags(self, placename, tags, details=None): place.touch() self._publish_place(place) self.save_later() - return True + return labgrid_coordinator_pb2.SetPlaceTagsResponse() @locked - async def set_place_comment(self, placename, comment, details=None): + async def SetPlaceComment(self, request, context): + placename = request.placename + comment = request.comment try: place = self.places[placename] except KeyError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {placename} does not exist") place.comment = comment place.touch() self._publish_place(place) self.save_later() - return True + return labgrid_coordinator_pb2.SetPlaceCommentResponse() @locked - async def add_place_match(self, placename, pattern, rename=None, details=None): + async def AddPlaceMatch(self, request, context): + placename = request.placename + pattern = request.pattern + rename = request.rename if request.HasField("rename") else None try: place = self.places[placename] except KeyError: - return False - match = ResourceMatch(*pattern.split("/"), rename=rename) - if match in place.matches: - return False - place.matches.append(match) + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {placename} does not exist") + rm = ResourceMatch(*pattern.split("/"), rename=rename) + if rm in place.matches: + await context.abort(grpc.StatusCode.ALREADY_EXISTS, f"Match {rm} already exists") + place.matches.append(rm) place.touch() self._publish_place(place) self.save_later() - return True + return labgrid_coordinator_pb2.AddPlaceMatchResponse() @locked - async def del_place_match(self, placename, pattern, rename=None, details=None): + async def DeletePlaceMatch(self, request, context): + placename = request.placename + pattern = request.pattern + rename = request.rename if request.HasField("rename") else None try: place = self.places[placename] except KeyError: - return False - match = ResourceMatch(*pattern.split("/"), rename=rename) + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {placename} does not exist") + rm = ResourceMatch(*pattern.split("/"), rename=rename) try: - place.matches.remove(match) + place.matches.remove(rm) except ValueError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Match {rm} does not exist in {placename}") place.touch() self._publish_place(place) self.save_later() - return True + return labgrid_coordinator_pb2.DeletePlaceMatchResponse() async def _acquire_resources(self, place, resources): + assert self.lock.locked() + resources = resources.copy() # we may modify the list # all resources need to be free for resource in resources: @@ -554,12 +606,18 @@ async def _acquire_resources(self, place, resources): for resource in resources: # this triggers an update from the exporter which is published # to the clients - await self.call( - f"org.labgrid.exporter.{resource.path[0]}.acquire", resource.path[1], resource.path[3], place.name - ) + request = labgrid_coordinator_pb2.ExporterSetAcquiredRequest() + request.group_name = resource.path[1] + request.resource_name = resource.path[3] + request.place_name = place.name + cmd = ExporterCommand(request) + self.get_exporter_by_name(resource.path[0]).queue.put_nowait(cmd) + await cmd.wait() + if not cmd.response.success: + raise ExporterError("failed to acquire {resource}") acquired.append(resource) - except: - print(f"failed to acquire {resource}", file=sys.stderr) + except Exception: + logging.exception("failed to acquire %s", resource) # cleanup await self._release_resources(place, acquired) return False @@ -570,6 +628,8 @@ async def _acquire_resources(self, place, resources): return True async def _release_resources(self, place, resources, callback=True): + assert self.lock.locked() + resources = resources.copy() # we may modify the list for resource in resources: @@ -583,37 +643,48 @@ async def _release_resources(self, place, resources, callback=True): # this triggers an update from the exporter which is published # to the clients if callback: - await self.call( - f"org.labgrid.exporter.{resource.path[0]}.release", resource.path[1], resource.path[3] - ) - except: - print(f"failed to release {resource}", file=sys.stderr) + request = labgrid_coordinator_pb2.ExporterSetAcquiredRequest() + request.group_name = resource.path[1] + request.resource_name = resource.path[3] + # request.place_name is left unset to indicate release + cmd = ExporterCommand(request) + self.get_exporter_by_name(resource.path[0]).queue.put_nowait(cmd) + await cmd.wait() + if not cmd.response.success: + raise ExporterError(f"failed to release {resource}") + except (ExporterError, TimeoutError): + logging.exception("failed to release %s", resource) # at leaset try to notify the clients try: self._publish_resource(resource) except: - pass + logging.exception("failed to publish released resource %s", resource) @locked - async def acquire_place(self, name, details=None): - print(details) + async def AcquirePlace(self, request, context): + peer = context.peer() + name = request.placename + try: + username = self.clients[peer].name + except KeyError: + await context.abort(grpc.StatusCode.FAILED_PRECONDITION, f"Peer {peer} does not have a valid session") + print(request) + try: place = self.places[name] except KeyError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {name} does not exist") if place.acquired: - return False + await context.abort(grpc.StatusCode.FAILED_PRECONDITION, f"Place {name} is already acquired") if place.reservation: res = self.reservations[place.reservation] - if not res.owner == self.sessions[details.caller].name: - return False + if not res.owner == username: + await context.abort(grpc.StatusCode.PERMISSION_DENIED, f"Place {name} was not reserved for {username}") # FIXME use the session object instead? or something else which # survives disconnecting clients? - place.acquired = self.sessions[details.caller].name + place.acquired = username resources = [] - for _, session in sorted(self.sessions.items()): - if not isinstance(session, ExporterSession): - continue + for _, session in sorted(self.exporters.items()): for _, group in sorted(session.groups.items()): for _, resource in sorted(group.items()): if not place.hasmatch(resource.path): @@ -622,23 +693,29 @@ async def acquire_place(self, name, details=None): if not await self._acquire_resources(place, resources): # revert earlier change place.acquired = None - return False + await context.abort(grpc.StatusCode.FAILED_PRECONDITION, f"Failed to acquire resources for place {name}") place.touch() self._publish_place(place) self.save_later() self.schedule_reservations() print(f"{place.name}: place acquired by {place.acquired}") - return True + return labgrid_coordinator_pb2.AcquirePlaceResponse() @locked - async def release_place(self, name, details=None): - print(details) + async def ReleasePlace(self, request, context): + name = request.placename + print(request) + fromuser = request.fromuser if request.HasField("fromuser") else None try: place = self.places[name] except KeyError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {name} does not exist") if not place.acquired: - return False + if fromuser: + return labgrid_coordinator_pb2.ReleasePlaceResponse() + await context.abort(grpc.StatusCode.FAILED_PRECONDITION, f"Place {name} is not acquired") + if fromuser and place.acquired != fromuser: + return labgrid_coordinator_pb2.ReleasePlaceResponse() await self._release_resources(place, place.acquired_resources) @@ -649,66 +726,43 @@ async def release_place(self, name, details=None): self.save_later() self.schedule_reservations() print(f"{place.name}: place released") - return True + return labgrid_coordinator_pb2.ReleasePlaceResponse() @locked - async def release_place_from(self, name, acquired, details=None): - """ - Release a place, but only if acquired by a specific user - - Note that unlike the release_place API, this function returns True as - long as the specific place is not acquired by the specified user. This - may mean that the place was not acquired at all, is acquired by - another, or was released; which of these states cannot be inferred from - the return code. This is intentional as the purpose of the command is - to validate that the specified user no longer owns the place, and the - exact state is irrelevant as long as that condition is met. - - Returns: - bool: True if the user no longer owns the place, or False if there - was an error that prevented releasing the place - """ + async def AllowPlace(self, request, context): + placename = request.placename + user = request.user + peer = context.peer() try: - place = self.places[name] + username = self.clients[peer].name except KeyError: - return False - if not place.acquired: - return True - if place.acquired != acquired: - return True - - await self._release_resources(place, place.acquired_resources) - - place.acquired = None - place.allowed = set() - place.touch() - self._publish_place(place) - self.save_later() - self.schedule_reservations() - return True - - @locked - async def allow_place(self, name, user, details=None): + await context.abort(grpc.StatusCode.FAILED_PRECONDITION, f"Peer {peer} does not have a valid session") try: - place = self.places[name] + place = self.places[placename] except KeyError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {placename} does not exist") if not place.acquired: - return False - if not place.acquired == self.sessions[details.caller].name: - return False + await context.abort(grpc.StatusCode.FAILED_PRECONDITION, f"Place {placename} is not acquired") + if not place.acquired == username: + await context.abort( + grpc.StatusCode.FAILED_PRECONDITION, f"Place {placename} is not acquired by {username}" + ) place.allowed.add(user) place.touch() self._publish_place(place) self.save_later() - return True + return labgrid_coordinator_pb2.AllowPlaceResponse() def _get_places(self): return {k: v.asdict() for k, v in self.places.items()} @locked - async def get_places(self, details=None): - return self._get_places() + async def GetPlaces(self, unused_request, unused_context): + logging.debug("GetPlaces") + try: + return labgrid_coordinator_pb2.GetPlacesResponse(places=[x.as_pb2() for x in self.places.values()]) + except Exception: + logging.exception("error during get places") def schedule_reservations(self): # The primary information is stored in the reservations and the places @@ -816,54 +870,129 @@ def schedule_reservations(self): self._publish_place(place) @locked - async def create_reservation(self, spec, prio=0.0, details=None): - filter_ = {} - for pair in spec.split(): - try: - k, v = pair.split("=") - except ValueError: - return None - if not TAG_KEY.match(k): - return None - if not TAG_VAL.match(v): - return None - filter_[k] = v - - filters = {"main": filter_} # currently, only one group is implemented - - owner = self.sessions[details.caller].name - res = Reservation(owner=owner, prio=prio, filters=filters) + async def CreateReservation(self, request: labgrid_coordinator_pb2.CreateReservationRequest, context): + peer = context.peer() + + fltrs = {} + for name, fltr_pb in request.filters.items(): + if name != "main": + await context.abort( + grpc.StatusCode.UNIMPLEMENTED, "Reservations for multiple groups are not implemented yet" + ) + fltr = fltrs[name] = {} + for k, v in fltr_pb.filter.items(): + if not TAG_KEY.match(k): + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Key {k} is invalid") + if not TAG_VAL.match(v): + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Value {v} is invalid") + fltr[k] = v + + owner = self.clients[peer].name + res = Reservation(owner=owner, prio=request.prio, filters=fltrs) self.reservations[res.token] = res self.schedule_reservations() - return {res.token: res.asdict()} + return labgrid_coordinator_pb2.CreateReservationResponse(reservation=res.as_pb2()) @locked - async def cancel_reservation(self, token, details=None): - if not isinstance(token, str): - return False + async def CancelReservation(self, request: labgrid_coordinator_pb2.CancelReservationRequest, context): + token = request.token + if not isinstance(token, str) or not token: + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Invalid token {token}") if token not in self.reservations: - return False + await context.abort(grpc.StatusCode.FAILED_PRECONDITION, f"Reservation {token} does not exist") del self.reservations[token] self.schedule_reservations() - return True + return labgrid_coordinator_pb2.CancelReservationResponse() @locked - async def poll_reservation(self, token, details=None): + async def PollReservation(self, request: labgrid_coordinator_pb2.PollReservationRequest, context): + token = request.token try: res = self.reservations[token] except KeyError: - return None + await context.abort(grpc.StatusCode.FAILED_PRECONDITION, f"Reservation {token} does not exist") res.refresh() - return res.asdict() + return labgrid_coordinator_pb2.PollReservationResponse(reservation=res.as_pb2()) @locked - async def get_reservations(self, details=None): - return {k: v.asdict() for k, v in self.reservations.items()} + async def GetReservations(self, request: labgrid_coordinator_pb2.GetReservationsRequest, context): + reservations = [x.as_pb2() for x in self.reservations.values()] + return labgrid_coordinator_pb2.GetReservationsResponse(reservations=reservations) + + +async def serve(listen, cleanup) -> None: + server = grpc.aio.server( + options=[ + ("grpc.keepalive_time_ms", 30000), # Send keepalive ping every 30 seconds + ( + "grpc.keepalive_timeout_ms", + 10000, + ), # Wait 10 seconds for ping ack before considering the connection dead + ("grpc.http2.min_time_between_pings_ms", 15000), # Minimum amount of time between pings + ("grpc.http2.max_pings_without_data", 0), # Allow pings even without active streams + ("grpc.keepalive_permit_without_calls", 1), # Allow keepalive pings even when there are no calls + ], + ) + coordinator = Coordinator() + labgrid_coordinator_pb2_grpc.add_CoordinatorServicer_to_server(coordinator, server) + # enable reflection for use with grpcurl + reflection.enable_server_reflection( + ( + labgrid_coordinator_pb2.DESCRIPTOR.services_by_name["Coordinator"].full_name, + reflection.SERVICE_NAME, + ), + server, + ) + # optionally enable channelz for use with grpcdebug + try: + from grpc_channelz.v1 import channelz + + channelz.add_channelz_servicer(server) + logging.info("Enabled channelz support") + except ImportError: + logging.info("Module grpcio-channelz not available") + + server.add_insecure_port(listen) + logging.debug("Starting server") + await server.start() + + async def server_graceful_shutdown(): + logging.info("Starting graceful shutdown...") + # Shuts down the server with 0 seconds of grace period. During the + # grace period, the server won't accept new connections and allow + # existing RPCs to continue within the grace period. + await server.stop(5) + + cleanup.append(server_graceful_shutdown()) + logging.info("Coordinator ready") + await server.wait_for_termination() + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "-l", + "--listen", + metavar="HOST:PORT", + type=str, + default="[::]:20408", + help="coordinator listening host and port", + ) + parser.add_argument("-d", "--debug", action="store_true", default=False, help="enable debug mode") + + args = parser.parse_args() + + logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO) + + loop = asyncio.get_event_loop() + cleanup = [] + loop.set_debug(True) + try: + loop.run_until_complete(serve(args.listen, cleanup)) + finally: + if cleanup: + loop.run_until_complete(*cleanup) if __name__ == "__main__": - runner = ApplicationRunner( - url=environ.get("WS", "ws://127.0.0.1:20408/ws"), - realm="realm1", - ) - runner.run(CoordinatorComponent) + main() diff --git a/labgrid/remote/exporter.py b/labgrid/remote/exporter.py index dde83bb7a..78b8ca606 100755 --- a/labgrid/remote/exporter.py +++ b/labgrid/remote/exporter.py @@ -7,7 +7,6 @@ import sys import os import os.path -import time import traceback import shutil import subprocess @@ -15,17 +14,16 @@ from pathlib import Path from typing import Dict, Type from socket import gethostname, getfqdn + import attr -from autobahn.asyncio.wamp import ApplicationRunner, ApplicationSession +import grpc from .config import ResourceConfig -from .common import ResourceEntry, enable_tcp_nodelay, monkey_patch_max_msg_payload_size_ws_option +from .common import ResourceEntry, queue_as_aiter +from .generated import labgrid_coordinator_pb2, labgrid_coordinator_pb2_grpc from ..util import get_free_port, labgrid_version -monkey_patch_max_msg_payload_size_ws_option() - -__version__ = labgrid_version() exports: Dict[str, Type[ResourceEntry]] = {} reexec = False @@ -112,10 +110,10 @@ def start(self): start_params = self._get_start_params() try: self._start(start_params) - except Exception: + except Exception as e: self.broken = "start failed" self.logger.exception("failed to start with %s", start_params) - raise + raise BrokenResourceError("Failed to start resource") from e self.start_params = start_params def stop(self): @@ -773,111 +771,158 @@ def _get_params(self): exports["YKUSHPowerPort"] = YKUSHPowerPortExport -class ExporterSession(ApplicationSession): - def onConnect(self): +class Exporter: + def __init__(self, config) -> None: """Set up internal datastructures on successful connection: - Setup loop, name, authid and address - Join the coordinator as an exporter""" - self.loop = self.config.extra["loop"] - self.name = self.config.extra["name"] - self.hostname = self.config.extra["hostname"] - self.isolated = self.config.extra["isolated"] - self.address = self._transport.transport.get_extra_info("sockname")[0] - self.checkpoint = time.monotonic() + self.config = config + self.loop = asyncio.get_event_loop() + self.name = config["name"] + self.hostname = config["hostname"] + self.isolated = config["isolated"] + + self.channel = grpc.aio.insecure_channel(config["coordinator"]) + self.stub = labgrid_coordinator_pb2_grpc.CoordinatorStub(self.channel) + self.out_queue = asyncio.Queue() + self.pump_task = None + self.poll_task = None self.groups = {} - enable_tcp_nodelay(self) - self.join( - self.config.realm, - authmethods=["anonymous", "ticket"], - authid=f"exporter/{self.name}", - authextra={"authid": f"exporter/{self.name}"}, - ) - - def onChallenge(self, challenge): - """Function invoked on received challege, returns just a dummy ticket - at the moment, authentication is not supported yet""" - logging.warning("Ticket authentication is deprecated. Please update your coordinator.") - return "dummy-ticket" - - async def onJoin(self, details): - """On successful join: - - export available resources - - bail out if we are unsuccessful - """ - print(details) - - prefix = f"org.labgrid.exporter.{self.name}" - try: - await self.register(self.acquire, f"{prefix}.acquire") - await self.register(self.release, f"{prefix}.release") - await self.register(self.version, f"{prefix}.version") - - config_template_env = { - "env": os.environ, - "isolated": self.isolated, - "hostname": self.hostname, - "name": self.name, - } - resource_config = ResourceConfig(self.config.extra["resources"], config_template_env) - for group_name, group in resource_config.data.items(): - group_name = str(group_name) - for resource_name, params in group.items(): - resource_name = str(resource_name) - if resource_name == "location": - continue - if params is None: - continue - cls = params.pop("cls", resource_name) - - # this may call back to acquire the resource immediately - await self.add_resource(group_name, resource_name, cls, params) - self.checkpoint = time.monotonic() - - except Exception: # pylint: disable=broad-except - traceback.print_exc(file=sys.stderr) - self.loop.stop() - return + async def run(self) -> None: + self.pump_task = self.loop.create_task(self.message_pump()) + self.send_started() + config_template_env = { + "env": os.environ, + "isolated": self.isolated, + "hostname": self.hostname, + "name": self.name, + } + resource_config = ResourceConfig(self.config["resources"], config_template_env) + for group_name, group in resource_config.data.items(): + group_name = str(group_name) + for resource_name, params in group.items(): + resource_name = str(resource_name) + if resource_name == "location": + continue + if params is None: + continue + cls = params.pop("cls", resource_name) + + # this may call back to acquire the resource immediately + await self.add_resource(group_name, resource_name, cls, params) + + # flush queued message + while not self.pump_task.done(): + try: + await asyncio.wait_for(self.out_queue.join(), timeout=1) + break + except asyncio.TimeoutError: + if self.pump_task.done(): + await self.pump_task + logging.debug("pump task exited, shutting down exporter") + return + + logging.info("creating poll task") self.poll_task = self.loop.create_task(self.poll()) - async def onLeave(self, details): - """Cleanup after leaving the coordinator connection""" - if self.poll_task: - self.poll_task.cancel() - await asyncio.wait([self.poll_task]) - super().onLeave(details) - - async def onDisconnect(self): - print("connection lost", file=sys.stderr) - global reexec - reexec = True - if self.poll_task: - self.poll_task.cancel() - await asyncio.wait([self.poll_task]) - await asyncio.sleep(0.5) # give others a chance to clean up - self.loop.stop() + (done, pending) = await asyncio.wait((self.pump_task, self.poll_task), return_when=asyncio.FIRST_COMPLETED) + logging.debug("task(s) %s exited, shutting down exporter", done) + for task in pending: + task.cancel() + + await self.pump_task + await self.poll_task + + def send_started(self): + msg = labgrid_coordinator_pb2.ExporterInMessage() + msg.startup.version = labgrid_version() + msg.startup.name = self.name + self.out_queue.put_nowait(msg) + + async def message_pump(self): + got_message = False + try: + async for out_message in self.stub.ExporterStream(queue_as_aiter(self.out_queue)): + got_message = True + logging.debug("received message %s", out_message) + kind = out_message.WhichOneof("kind") + if kind == "hello": + logging.info("connected to exporter version %s", out_message.hello.version) + elif kind == "set_acquired_request": + logging.debug("acquire request") + success = False + reason = None + try: + if out_message.set_acquired_request.place_name: + await self.acquire( + out_message.set_acquired_request.group_name, + out_message.set_acquired_request.resource_name, + out_message.set_acquired_request.place_name, + ) + else: + await self.release( + out_message.set_acquired_request.group_name, + out_message.set_acquired_request.resource_name, + ) + success = True + except BrokenResourceError as e: + reason = e.args[0] + finally: + in_message = labgrid_coordinator_pb2.ExporterInMessage() + in_message.response.success = success + if reason: + in_message.response.reason = reason + logging.debug("queuing %s", in_message) + self.out_queue.put_nowait(in_message) + logging.debug("queued %s", in_message) + else: + logging.debug("unknown request: %s", kind) + except grpc.aio.AioRpcError as e: + self.out_queue.put_nowait(None) # let the sender side exit gracefully + if e.code() == grpc.StatusCode.UNAVAILABLE: + if got_message: + logging.error("coordinator became unavailable: %s", e.details()) + else: + logging.error("coordinator is unavailable: %s", e.details()) + + global reexec + reexec = True + else: + logging.exception("unexpected grpc error in coordinator message pump task") + except Exception: + self.out_queue.put_nowait(None) # let the sender side exit gracefully + logging.exception("error in coordinator message pump") + + # only send command response when the other updates have left the queue + # perhaps with queue join/task_done + # this should be a command from the coordinator async def acquire(self, group_name, resource_name, place_name): - resource = self.groups[group_name][resource_name] + resource = self.groups.get(group_name, {}).get(resource_name) + if resource is None: + logging.error("acquire request for unknown resource %s/%s by %s", group_name, resource_name, place_name) + return + try: resource.acquire(place_name) finally: await self.update_resource(group_name, resource_name) async def release(self, group_name, resource_name): - resource = self.groups[group_name][resource_name] + resource = self.groups.get(group_name, {}).get(resource_name) + if resource is None: + logging.error("release request for unknown resource %s/%s", group_name, resource_name) + return + try: resource.release() finally: await self.update_resource(group_name, resource_name) - async def version(self): - self.checkpoint = time.monotonic() - return __version__ - async def _poll_step(self): for group_name, group in self.groups.items(): for resource_name, resource in group.items(): @@ -904,10 +949,6 @@ async def poll(self): break except Exception: # pylint: disable=broad-except traceback.print_exc(file=sys.stderr) - age = time.monotonic() - self.checkpoint - if age > 300: - print(f"missed checkpoint, exiting (last was {age} seconds ago)", file=sys.stderr) - self.disconnect() async def add_resource(self, group_name, resource_name, cls, params): """Add a resource to the exporter and update status on the coordinator""" @@ -934,20 +975,28 @@ async def add_resource(self, group_name, resource_name, cls, params): async def update_resource(self, group_name, resource_name): """Update status on the coordinator""" resource = self.groups[group_name][resource_name] - data = resource.asdict() - print(data) - await self.call("org.labgrid.coordinator.set_resource", group_name, resource_name, data) + msg = labgrid_coordinator_pb2.ExporterInMessage() + msg.resource.CopyFrom(resource.as_pb2()) + msg.resource.path.group_name = group_name + msg.resource.path.resource_name = resource_name + self.out_queue.put_nowait(msg) + logging.info("queued update for resource %s/%s", group_name, resource_name) + + +async def amain(config) -> bool: + exporter = Exporter(config) + await exporter.run() def main(): parser = argparse.ArgumentParser() parser.add_argument( - "-x", - "--crossbar", - metavar="URL", + "-c", + "--coordinator", + metavar="HOST:PORT", type=str, - default=os.environ.get("LG_CROSSBAR", "ws://127.0.0.1:20408/ws"), - help="crossbar websocket URL", + default=os.environ.get("LG_COORDINATOR", "127.0.0.1:20408"), + help="coordinator host and port", ) parser.add_argument( "-n", @@ -979,29 +1028,22 @@ def main(): args = parser.parse_args() - level = "debug" if args.debug else "info" + logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO) - extra = { + config = { "name": args.name or gethostname(), "hostname": args.hostname or (getfqdn() if args.fqdn else gethostname()), "resources": args.resources, + "coordinator": args.coordinator, "isolated": args.isolated, } - crossbar_url = args.crossbar - crossbar_realm = os.environ.get("LG_CROSSBAR_REALM", "realm1") + print(f"exporter name: {config['name']}") + print(f"exporter hostname: {config['hostname']}") + print(f"resource config file: {config['resources']}") - print(f"crossbar URL: {crossbar_url}") - print(f"crossbar realm: {crossbar_realm}") - print(f"exporter name: {extra['name']}") - print(f"exporter hostname: {extra['hostname']}") - print(f"resource config file: {extra['resources']}") + asyncio.run(amain(config), debug=bool(args.debug)) - extra["loop"] = loop = asyncio.get_event_loop() - if args.debug: - loop.set_debug(True) - runner = ApplicationRunner(url=crossbar_url, realm=crossbar_realm, extra=extra) - runner.run(ExporterSession, log_level=level) if reexec: exit(100) diff --git a/labgrid/remote/generated/generate-proto.sh b/labgrid/remote/generated/generate-proto.sh new file mode 100755 index 000000000..d160b0c74 --- /dev/null +++ b/labgrid/remote/generated/generate-proto.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +set -ex +python3 -m grpc_tools.protoc -I../proto --python_out=. --pyi_out=. --grpc_python_out=. ../proto/labgrid-coordinator.proto +sed -i "s/import labgrid/from . import labgrid/g" labgrid_coordinator_pb2_grpc.py diff --git a/labgrid/remote/generated/labgrid_coordinator_pb2.py b/labgrid/remote/generated/labgrid_coordinator_pb2.py new file mode 100644 index 000000000..37652bff7 --- /dev/null +++ b/labgrid/remote/generated/labgrid_coordinator_pb2.py @@ -0,0 +1,158 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: labgrid-coordinator.proto +# Protobuf Python Version: 4.25.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19labgrid-coordinator.proto\x12\x07labgrid\"\x8a\x01\n\x0f\x43lientInMessage\x12\x1d\n\x04sync\x18\x01 \x01(\x0b\x32\r.labgrid.SyncH\x00\x12\'\n\x07startup\x18\x02 \x01(\x0b\x32\x14.labgrid.StartupDoneH\x00\x12\'\n\tsubscribe\x18\x03 \x01(\x0b\x32\x12.labgrid.SubscribeH\x00\x42\x06\n\x04kind\"\x12\n\x04Sync\x12\n\n\x02id\x18\x01 \x01(\x04\",\n\x0bStartupDone\x12\x0f\n\x07version\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"r\n\tSubscribe\x12\x1b\n\x0eis_unsubscribe\x18\x01 \x01(\x08H\x01\x88\x01\x01\x12\x14\n\nall_places\x18\x02 \x01(\x08H\x00\x12\x17\n\rall_resources\x18\x03 \x01(\x08H\x00\x42\x06\n\x04kindB\x11\n\x0f_is_unsubscribe\"g\n\x10\x43lientOutMessage\x12 \n\x04sync\x18\x01 \x01(\x0b\x32\r.labgrid.SyncH\x00\x88\x01\x01\x12(\n\x07updates\x18\x02 \x03(\x0b\x32\x17.labgrid.UpdateResponseB\x07\n\x05_sync\"\xa5\x01\n\x0eUpdateResponse\x12%\n\x08resource\x18\x01 \x01(\x0b\x32\x11.labgrid.ResourceH\x00\x12.\n\x0c\x64\x65l_resource\x18\x02 \x01(\x0b\x32\x16.labgrid.Resource.PathH\x00\x12\x1f\n\x05place\x18\x03 \x01(\x0b\x32\x0e.labgrid.PlaceH\x00\x12\x13\n\tdel_place\x18\x04 \x01(\tH\x00\x42\x06\n\x04kind\"\x9a\x01\n\x11\x45xporterInMessage\x12%\n\x08resource\x18\x01 \x01(\x0b\x32\x11.labgrid.ResourceH\x00\x12\'\n\x07startup\x18\x02 \x01(\x0b\x32\x14.labgrid.StartupDoneH\x00\x12-\n\x08response\x18\x03 \x01(\x0b\x32\x19.labgrid.ExporterResponseH\x00\x42\x06\n\x04kind\"\x9e\x03\n\x08Resource\x12$\n\x04path\x18\x01 \x01(\x0b\x32\x16.labgrid.Resource.Path\x12\x0b\n\x03\x63ls\x18\x02 \x01(\t\x12-\n\x06params\x18\x03 \x03(\x0b\x32\x1d.labgrid.Resource.ParamsEntry\x12+\n\x05\x65xtra\x18\x04 \x03(\x0b\x32\x1c.labgrid.Resource.ExtraEntry\x12\x10\n\x08\x61\x63quired\x18\x05 \x01(\t\x12\r\n\x05\x61vail\x18\x06 \x01(\x08\x1a_\n\x04Path\x12\x1a\n\rexporter_name\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\ngroup_name\x18\x02 \x01(\t\x12\x15\n\rresource_name\x18\x03 \x01(\tB\x10\n\x0e_exporter_name\x1a@\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.labgrid.MapValue:\x02\x38\x01\x1a?\n\nExtraEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.labgrid.MapValue:\x02\x38\x01\"\x82\x01\n\x08MapValue\x12\x14\n\nbool_value\x18\x01 \x01(\x08H\x00\x12\x13\n\tint_value\x18\x02 \x01(\x03H\x00\x12\x14\n\nuint_value\x18\x03 \x01(\x04H\x00\x12\x15\n\x0b\x66loat_value\x18\x04 \x01(\x01H\x00\x12\x16\n\x0cstring_value\x18\x05 \x01(\tH\x00\x42\x06\n\x04kind\"C\n\x10\x45xporterResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x13\n\x06reason\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\t\n\x07_reason\"\x18\n\x05Hello\x12\x0f\n\x07version\x18\x01 \x01(\t\"\x82\x01\n\x12\x45xporterOutMessage\x12\x1f\n\x05hello\x18\x01 \x01(\x0b\x32\x0e.labgrid.HelloH\x00\x12\x43\n\x14set_acquired_request\x18\x02 \x01(\x0b\x32#.labgrid.ExporterSetAcquiredRequestH\x00\x42\x06\n\x04kind\"o\n\x1a\x45xporterSetAcquiredRequest\x12\x12\n\ngroup_name\x18\x01 \x01(\t\x12\x15\n\rresource_name\x18\x02 \x01(\t\x12\x17\n\nplace_name\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\r\n\x0b_place_name\"\x1f\n\x0f\x41\x64\x64PlaceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x12\n\x10\x41\x64\x64PlaceResponse\"\"\n\x12\x44\x65letePlaceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x15\n\x13\x44\x65letePlaceResponse\"\x12\n\x10GetPlacesRequest\"3\n\x11GetPlacesResponse\x12\x1e\n\x06places\x18\x01 \x03(\x0b\x32\x0e.labgrid.Place\"\xd2\x02\n\x05Place\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07\x61liases\x18\x02 \x03(\t\x12\x0f\n\x07\x63omment\x18\x03 \x01(\t\x12&\n\x04tags\x18\x04 \x03(\x0b\x32\x18.labgrid.Place.TagsEntry\x12\'\n\x07matches\x18\x05 \x03(\x0b\x32\x16.labgrid.ResourceMatch\x12\x15\n\x08\x61\x63quired\x18\x06 \x01(\tH\x00\x88\x01\x01\x12\x1a\n\x12\x61\x63quired_resources\x18\x07 \x03(\t\x12\x0f\n\x07\x61llowed\x18\x08 \x03(\t\x12\x0f\n\x07\x63reated\x18\t \x01(\x01\x12\x0f\n\x07\x63hanged\x18\n \x01(\x01\x12\x18\n\x0breservation\x18\x0b \x01(\tH\x01\x88\x01\x01\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x0b\n\t_acquiredB\x0e\n\x0c_reservation\"y\n\rResourceMatch\x12\x10\n\x08\x65xporter\x18\x01 \x01(\t\x12\r\n\x05group\x18\x02 \x01(\t\x12\x0b\n\x03\x63ls\x18\x03 \x01(\t\x12\x11\n\x04name\x18\x04 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06rename\x18\x05 \x01(\tH\x01\x88\x01\x01\x42\x07\n\x05_nameB\t\n\x07_rename\"8\n\x14\x41\x64\x64PlaceAliasRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\x12\r\n\x05\x61lias\x18\x02 \x01(\t\"\x17\n\x15\x41\x64\x64PlaceAliasResponse\";\n\x17\x44\x65letePlaceAliasRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\x12\r\n\x05\x61lias\x18\x02 \x01(\t\"\x1a\n\x18\x44\x65letePlaceAliasResponse\"\x8b\x01\n\x13SetPlaceTagsRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\x12\x34\n\x04tags\x18\x02 \x03(\x0b\x32&.labgrid.SetPlaceTagsRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x16\n\x14SetPlaceTagsResponse\"<\n\x16SetPlaceCommentRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\x12\x0f\n\x07\x63omment\x18\x02 \x01(\t\"\x19\n\x17SetPlaceCommentResponse\"Z\n\x14\x41\x64\x64PlaceMatchRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\x12\x0f\n\x07pattern\x18\x02 \x01(\t\x12\x13\n\x06rename\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\t\n\x07_rename\"\x17\n\x15\x41\x64\x64PlaceMatchResponse\"]\n\x17\x44\x65letePlaceMatchRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\x12\x0f\n\x07pattern\x18\x02 \x01(\t\x12\x13\n\x06rename\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\t\n\x07_rename\"\x1a\n\x18\x44\x65letePlaceMatchResponse\"(\n\x13\x41\x63quirePlaceRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\"\x16\n\x14\x41\x63quirePlaceResponse\"L\n\x13ReleasePlaceRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\x12\x15\n\x08\x66romuser\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x0b\n\t_fromuser\"\x16\n\x14ReleasePlaceResponse\"4\n\x11\x41llowPlaceRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\x12\x0c\n\x04user\x18\x02 \x01(\t\"\x14\n\x12\x41llowPlaceResponse\"\xb6\x01\n\x18\x43reateReservationRequest\x12?\n\x07\x66ilters\x18\x01 \x03(\x0b\x32..labgrid.CreateReservationRequest.FiltersEntry\x12\x0c\n\x04prio\x18\x02 \x01(\x01\x1aK\n\x0c\x46iltersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12*\n\x05value\x18\x02 \x01(\x0b\x32\x1b.labgrid.Reservation.Filter:\x02\x38\x01\"F\n\x19\x43reateReservationResponse\x12)\n\x0breservation\x18\x01 \x01(\x0b\x32\x14.labgrid.Reservation\"\xcd\x03\n\x0bReservation\x12\r\n\x05owner\x18\x01 \x01(\t\x12\r\n\x05token\x18\x02 \x01(\t\x12\r\n\x05state\x18\x03 \x01(\x05\x12\x0c\n\x04prio\x18\x04 \x01(\x01\x12\x32\n\x07\x66ilters\x18\x05 \x03(\x0b\x32!.labgrid.Reservation.FiltersEntry\x12:\n\x0b\x61llocations\x18\x06 \x03(\x0b\x32%.labgrid.Reservation.AllocationsEntry\x12\x0f\n\x07\x63reated\x18\x07 \x01(\x01\x12\x0f\n\x07timeout\x18\x08 \x01(\x01\x1ap\n\x06\x46ilter\x12\x37\n\x06\x66ilter\x18\x01 \x03(\x0b\x32\'.labgrid.Reservation.Filter.FilterEntry\x1a-\n\x0b\x46ilterEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1aK\n\x0c\x46iltersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12*\n\x05value\x18\x02 \x01(\x0b\x32\x1b.labgrid.Reservation.Filter:\x02\x38\x01\x1a\x32\n\x10\x41llocationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\")\n\x18\x43\x61ncelReservationRequest\x12\r\n\x05token\x18\x01 \x01(\t\"\x1b\n\x19\x43\x61ncelReservationResponse\"\'\n\x16PollReservationRequest\x12\r\n\x05token\x18\x01 \x01(\t\"D\n\x17PollReservationResponse\x12)\n\x0breservation\x18\x01 \x01(\x0b\x32\x14.labgrid.Reservation\"E\n\x17GetReservationsResponse\x12*\n\x0creservations\x18\x01 \x03(\x0b\x32\x14.labgrid.Reservation\"\x18\n\x16GetReservationsRequest2\xd2\x0b\n\x0b\x43oordinator\x12I\n\x0c\x43lientStream\x12\x18.labgrid.ClientInMessage\x1a\x19.labgrid.ClientOutMessage\"\x00(\x01\x30\x01\x12O\n\x0e\x45xporterStream\x12\x1a.labgrid.ExporterInMessage\x1a\x1b.labgrid.ExporterOutMessage\"\x00(\x01\x30\x01\x12\x41\n\x08\x41\x64\x64Place\x12\x18.labgrid.AddPlaceRequest\x1a\x19.labgrid.AddPlaceResponse\"\x00\x12J\n\x0b\x44\x65letePlace\x12\x1b.labgrid.DeletePlaceRequest\x1a\x1c.labgrid.DeletePlaceResponse\"\x00\x12\x44\n\tGetPlaces\x12\x19.labgrid.GetPlacesRequest\x1a\x1a.labgrid.GetPlacesResponse\"\x00\x12P\n\rAddPlaceAlias\x12\x1d.labgrid.AddPlaceAliasRequest\x1a\x1e.labgrid.AddPlaceAliasResponse\"\x00\x12Y\n\x10\x44\x65letePlaceAlias\x12 .labgrid.DeletePlaceAliasRequest\x1a!.labgrid.DeletePlaceAliasResponse\"\x00\x12M\n\x0cSetPlaceTags\x12\x1c.labgrid.SetPlaceTagsRequest\x1a\x1d.labgrid.SetPlaceTagsResponse\"\x00\x12V\n\x0fSetPlaceComment\x12\x1f.labgrid.SetPlaceCommentRequest\x1a .labgrid.SetPlaceCommentResponse\"\x00\x12P\n\rAddPlaceMatch\x12\x1d.labgrid.AddPlaceMatchRequest\x1a\x1e.labgrid.AddPlaceMatchResponse\"\x00\x12Y\n\x10\x44\x65letePlaceMatch\x12 .labgrid.DeletePlaceMatchRequest\x1a!.labgrid.DeletePlaceMatchResponse\"\x00\x12M\n\x0c\x41\x63quirePlace\x12\x1c.labgrid.AcquirePlaceRequest\x1a\x1d.labgrid.AcquirePlaceResponse\"\x00\x12M\n\x0cReleasePlace\x12\x1c.labgrid.ReleasePlaceRequest\x1a\x1d.labgrid.ReleasePlaceResponse\"\x00\x12G\n\nAllowPlace\x12\x1a.labgrid.AllowPlaceRequest\x1a\x1b.labgrid.AllowPlaceResponse\"\x00\x12\\\n\x11\x43reateReservation\x12!.labgrid.CreateReservationRequest\x1a\".labgrid.CreateReservationResponse\"\x00\x12\\\n\x11\x43\x61ncelReservation\x12!.labgrid.CancelReservationRequest\x1a\".labgrid.CancelReservationResponse\"\x00\x12V\n\x0fPollReservation\x12\x1f.labgrid.PollReservationRequest\x1a .labgrid.PollReservationResponse\"\x00\x12V\n\x0fGetReservations\x12\x1f.labgrid.GetReservationsRequest\x1a .labgrid.GetReservationsResponse\"\x00\x62\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'labgrid_coordinator_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + _globals['_RESOURCE_PARAMSENTRY']._options = None + _globals['_RESOURCE_PARAMSENTRY']._serialized_options = b'8\001' + _globals['_RESOURCE_EXTRAENTRY']._options = None + _globals['_RESOURCE_EXTRAENTRY']._serialized_options = b'8\001' + _globals['_PLACE_TAGSENTRY']._options = None + _globals['_PLACE_TAGSENTRY']._serialized_options = b'8\001' + _globals['_SETPLACETAGSREQUEST_TAGSENTRY']._options = None + _globals['_SETPLACETAGSREQUEST_TAGSENTRY']._serialized_options = b'8\001' + _globals['_CREATERESERVATIONREQUEST_FILTERSENTRY']._options = None + _globals['_CREATERESERVATIONREQUEST_FILTERSENTRY']._serialized_options = b'8\001' + _globals['_RESERVATION_FILTER_FILTERENTRY']._options = None + _globals['_RESERVATION_FILTER_FILTERENTRY']._serialized_options = b'8\001' + _globals['_RESERVATION_FILTERSENTRY']._options = None + _globals['_RESERVATION_FILTERSENTRY']._serialized_options = b'8\001' + _globals['_RESERVATION_ALLOCATIONSENTRY']._options = None + _globals['_RESERVATION_ALLOCATIONSENTRY']._serialized_options = b'8\001' + _globals['_CLIENTINMESSAGE']._serialized_start=39 + _globals['_CLIENTINMESSAGE']._serialized_end=177 + _globals['_SYNC']._serialized_start=179 + _globals['_SYNC']._serialized_end=197 + _globals['_STARTUPDONE']._serialized_start=199 + _globals['_STARTUPDONE']._serialized_end=243 + _globals['_SUBSCRIBE']._serialized_start=245 + _globals['_SUBSCRIBE']._serialized_end=359 + _globals['_CLIENTOUTMESSAGE']._serialized_start=361 + _globals['_CLIENTOUTMESSAGE']._serialized_end=464 + _globals['_UPDATERESPONSE']._serialized_start=467 + _globals['_UPDATERESPONSE']._serialized_end=632 + _globals['_EXPORTERINMESSAGE']._serialized_start=635 + _globals['_EXPORTERINMESSAGE']._serialized_end=789 + _globals['_RESOURCE']._serialized_start=792 + _globals['_RESOURCE']._serialized_end=1206 + _globals['_RESOURCE_PATH']._serialized_start=980 + _globals['_RESOURCE_PATH']._serialized_end=1075 + _globals['_RESOURCE_PARAMSENTRY']._serialized_start=1077 + _globals['_RESOURCE_PARAMSENTRY']._serialized_end=1141 + _globals['_RESOURCE_EXTRAENTRY']._serialized_start=1143 + _globals['_RESOURCE_EXTRAENTRY']._serialized_end=1206 + _globals['_MAPVALUE']._serialized_start=1209 + _globals['_MAPVALUE']._serialized_end=1339 + _globals['_EXPORTERRESPONSE']._serialized_start=1341 + _globals['_EXPORTERRESPONSE']._serialized_end=1408 + _globals['_HELLO']._serialized_start=1410 + _globals['_HELLO']._serialized_end=1434 + _globals['_EXPORTEROUTMESSAGE']._serialized_start=1437 + _globals['_EXPORTEROUTMESSAGE']._serialized_end=1567 + _globals['_EXPORTERSETACQUIREDREQUEST']._serialized_start=1569 + _globals['_EXPORTERSETACQUIREDREQUEST']._serialized_end=1680 + _globals['_ADDPLACEREQUEST']._serialized_start=1682 + _globals['_ADDPLACEREQUEST']._serialized_end=1713 + _globals['_ADDPLACERESPONSE']._serialized_start=1715 + _globals['_ADDPLACERESPONSE']._serialized_end=1733 + _globals['_DELETEPLACEREQUEST']._serialized_start=1735 + _globals['_DELETEPLACEREQUEST']._serialized_end=1769 + _globals['_DELETEPLACERESPONSE']._serialized_start=1771 + _globals['_DELETEPLACERESPONSE']._serialized_end=1792 + _globals['_GETPLACESREQUEST']._serialized_start=1794 + _globals['_GETPLACESREQUEST']._serialized_end=1812 + _globals['_GETPLACESRESPONSE']._serialized_start=1814 + _globals['_GETPLACESRESPONSE']._serialized_end=1865 + _globals['_PLACE']._serialized_start=1868 + _globals['_PLACE']._serialized_end=2206 + _globals['_PLACE_TAGSENTRY']._serialized_start=2134 + _globals['_PLACE_TAGSENTRY']._serialized_end=2177 + _globals['_RESOURCEMATCH']._serialized_start=2208 + _globals['_RESOURCEMATCH']._serialized_end=2329 + _globals['_ADDPLACEALIASREQUEST']._serialized_start=2331 + _globals['_ADDPLACEALIASREQUEST']._serialized_end=2387 + _globals['_ADDPLACEALIASRESPONSE']._serialized_start=2389 + _globals['_ADDPLACEALIASRESPONSE']._serialized_end=2412 + _globals['_DELETEPLACEALIASREQUEST']._serialized_start=2414 + _globals['_DELETEPLACEALIASREQUEST']._serialized_end=2473 + _globals['_DELETEPLACEALIASRESPONSE']._serialized_start=2475 + _globals['_DELETEPLACEALIASRESPONSE']._serialized_end=2501 + _globals['_SETPLACETAGSREQUEST']._serialized_start=2504 + _globals['_SETPLACETAGSREQUEST']._serialized_end=2643 + _globals['_SETPLACETAGSREQUEST_TAGSENTRY']._serialized_start=2134 + _globals['_SETPLACETAGSREQUEST_TAGSENTRY']._serialized_end=2177 + _globals['_SETPLACETAGSRESPONSE']._serialized_start=2645 + _globals['_SETPLACETAGSRESPONSE']._serialized_end=2667 + _globals['_SETPLACECOMMENTREQUEST']._serialized_start=2669 + _globals['_SETPLACECOMMENTREQUEST']._serialized_end=2729 + _globals['_SETPLACECOMMENTRESPONSE']._serialized_start=2731 + _globals['_SETPLACECOMMENTRESPONSE']._serialized_end=2756 + _globals['_ADDPLACEMATCHREQUEST']._serialized_start=2758 + _globals['_ADDPLACEMATCHREQUEST']._serialized_end=2848 + _globals['_ADDPLACEMATCHRESPONSE']._serialized_start=2850 + _globals['_ADDPLACEMATCHRESPONSE']._serialized_end=2873 + _globals['_DELETEPLACEMATCHREQUEST']._serialized_start=2875 + _globals['_DELETEPLACEMATCHREQUEST']._serialized_end=2968 + _globals['_DELETEPLACEMATCHRESPONSE']._serialized_start=2970 + _globals['_DELETEPLACEMATCHRESPONSE']._serialized_end=2996 + _globals['_ACQUIREPLACEREQUEST']._serialized_start=2998 + _globals['_ACQUIREPLACEREQUEST']._serialized_end=3038 + _globals['_ACQUIREPLACERESPONSE']._serialized_start=3040 + _globals['_ACQUIREPLACERESPONSE']._serialized_end=3062 + _globals['_RELEASEPLACEREQUEST']._serialized_start=3064 + _globals['_RELEASEPLACEREQUEST']._serialized_end=3140 + _globals['_RELEASEPLACERESPONSE']._serialized_start=3142 + _globals['_RELEASEPLACERESPONSE']._serialized_end=3164 + _globals['_ALLOWPLACEREQUEST']._serialized_start=3166 + _globals['_ALLOWPLACEREQUEST']._serialized_end=3218 + _globals['_ALLOWPLACERESPONSE']._serialized_start=3220 + _globals['_ALLOWPLACERESPONSE']._serialized_end=3240 + _globals['_CREATERESERVATIONREQUEST']._serialized_start=3243 + _globals['_CREATERESERVATIONREQUEST']._serialized_end=3425 + _globals['_CREATERESERVATIONREQUEST_FILTERSENTRY']._serialized_start=3350 + _globals['_CREATERESERVATIONREQUEST_FILTERSENTRY']._serialized_end=3425 + _globals['_CREATERESERVATIONRESPONSE']._serialized_start=3427 + _globals['_CREATERESERVATIONRESPONSE']._serialized_end=3497 + _globals['_RESERVATION']._serialized_start=3500 + _globals['_RESERVATION']._serialized_end=3961 + _globals['_RESERVATION_FILTER']._serialized_start=3720 + _globals['_RESERVATION_FILTER']._serialized_end=3832 + _globals['_RESERVATION_FILTER_FILTERENTRY']._serialized_start=3787 + _globals['_RESERVATION_FILTER_FILTERENTRY']._serialized_end=3832 + _globals['_RESERVATION_FILTERSENTRY']._serialized_start=3350 + _globals['_RESERVATION_FILTERSENTRY']._serialized_end=3425 + _globals['_RESERVATION_ALLOCATIONSENTRY']._serialized_start=3911 + _globals['_RESERVATION_ALLOCATIONSENTRY']._serialized_end=3961 + _globals['_CANCELRESERVATIONREQUEST']._serialized_start=3963 + _globals['_CANCELRESERVATIONREQUEST']._serialized_end=4004 + _globals['_CANCELRESERVATIONRESPONSE']._serialized_start=4006 + _globals['_CANCELRESERVATIONRESPONSE']._serialized_end=4033 + _globals['_POLLRESERVATIONREQUEST']._serialized_start=4035 + _globals['_POLLRESERVATIONREQUEST']._serialized_end=4074 + _globals['_POLLRESERVATIONRESPONSE']._serialized_start=4076 + _globals['_POLLRESERVATIONRESPONSE']._serialized_end=4144 + _globals['_GETRESERVATIONSRESPONSE']._serialized_start=4146 + _globals['_GETRESERVATIONSRESPONSE']._serialized_end=4215 + _globals['_GETRESERVATIONSREQUEST']._serialized_start=4217 + _globals['_GETRESERVATIONSREQUEST']._serialized_end=4241 + _globals['_COORDINATOR']._serialized_start=4244 + _globals['_COORDINATOR']._serialized_end=5734 +# @@protoc_insertion_point(module_scope) diff --git a/labgrid/remote/generated/labgrid_coordinator_pb2.pyi b/labgrid/remote/generated/labgrid_coordinator_pb2.pyi new file mode 100644 index 000000000..366f4e438 --- /dev/null +++ b/labgrid/remote/generated/labgrid_coordinator_pb2.pyi @@ -0,0 +1,448 @@ +from google.protobuf.internal import containers as _containers +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class ClientInMessage(_message.Message): + __slots__ = ("sync", "startup", "subscribe") + SYNC_FIELD_NUMBER: _ClassVar[int] + STARTUP_FIELD_NUMBER: _ClassVar[int] + SUBSCRIBE_FIELD_NUMBER: _ClassVar[int] + sync: Sync + startup: StartupDone + subscribe: Subscribe + def __init__(self, sync: _Optional[_Union[Sync, _Mapping]] = ..., startup: _Optional[_Union[StartupDone, _Mapping]] = ..., subscribe: _Optional[_Union[Subscribe, _Mapping]] = ...) -> None: ... + +class Sync(_message.Message): + __slots__ = ("id",) + ID_FIELD_NUMBER: _ClassVar[int] + id: int + def __init__(self, id: _Optional[int] = ...) -> None: ... + +class StartupDone(_message.Message): + __slots__ = ("version", "name") + VERSION_FIELD_NUMBER: _ClassVar[int] + NAME_FIELD_NUMBER: _ClassVar[int] + version: str + name: str + def __init__(self, version: _Optional[str] = ..., name: _Optional[str] = ...) -> None: ... + +class Subscribe(_message.Message): + __slots__ = ("is_unsubscribe", "all_places", "all_resources") + IS_UNSUBSCRIBE_FIELD_NUMBER: _ClassVar[int] + ALL_PLACES_FIELD_NUMBER: _ClassVar[int] + ALL_RESOURCES_FIELD_NUMBER: _ClassVar[int] + is_unsubscribe: bool + all_places: bool + all_resources: bool + def __init__(self, is_unsubscribe: bool = ..., all_places: bool = ..., all_resources: bool = ...) -> None: ... + +class ClientOutMessage(_message.Message): + __slots__ = ("sync", "updates") + SYNC_FIELD_NUMBER: _ClassVar[int] + UPDATES_FIELD_NUMBER: _ClassVar[int] + sync: Sync + updates: _containers.RepeatedCompositeFieldContainer[UpdateResponse] + def __init__(self, sync: _Optional[_Union[Sync, _Mapping]] = ..., updates: _Optional[_Iterable[_Union[UpdateResponse, _Mapping]]] = ...) -> None: ... + +class UpdateResponse(_message.Message): + __slots__ = ("resource", "del_resource", "place", "del_place") + RESOURCE_FIELD_NUMBER: _ClassVar[int] + DEL_RESOURCE_FIELD_NUMBER: _ClassVar[int] + PLACE_FIELD_NUMBER: _ClassVar[int] + DEL_PLACE_FIELD_NUMBER: _ClassVar[int] + resource: Resource + del_resource: Resource.Path + place: Place + del_place: str + def __init__(self, resource: _Optional[_Union[Resource, _Mapping]] = ..., del_resource: _Optional[_Union[Resource.Path, _Mapping]] = ..., place: _Optional[_Union[Place, _Mapping]] = ..., del_place: _Optional[str] = ...) -> None: ... + +class ExporterInMessage(_message.Message): + __slots__ = ("resource", "startup", "response") + RESOURCE_FIELD_NUMBER: _ClassVar[int] + STARTUP_FIELD_NUMBER: _ClassVar[int] + RESPONSE_FIELD_NUMBER: _ClassVar[int] + resource: Resource + startup: StartupDone + response: ExporterResponse + def __init__(self, resource: _Optional[_Union[Resource, _Mapping]] = ..., startup: _Optional[_Union[StartupDone, _Mapping]] = ..., response: _Optional[_Union[ExporterResponse, _Mapping]] = ...) -> None: ... + +class Resource(_message.Message): + __slots__ = ("path", "cls", "params", "extra", "acquired", "avail") + class Path(_message.Message): + __slots__ = ("exporter_name", "group_name", "resource_name") + EXPORTER_NAME_FIELD_NUMBER: _ClassVar[int] + GROUP_NAME_FIELD_NUMBER: _ClassVar[int] + RESOURCE_NAME_FIELD_NUMBER: _ClassVar[int] + exporter_name: str + group_name: str + resource_name: str + def __init__(self, exporter_name: _Optional[str] = ..., group_name: _Optional[str] = ..., resource_name: _Optional[str] = ...) -> None: ... + class ParamsEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: MapValue + def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[MapValue, _Mapping]] = ...) -> None: ... + class ExtraEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: MapValue + def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[MapValue, _Mapping]] = ...) -> None: ... + PATH_FIELD_NUMBER: _ClassVar[int] + CLS_FIELD_NUMBER: _ClassVar[int] + PARAMS_FIELD_NUMBER: _ClassVar[int] + EXTRA_FIELD_NUMBER: _ClassVar[int] + ACQUIRED_FIELD_NUMBER: _ClassVar[int] + AVAIL_FIELD_NUMBER: _ClassVar[int] + path: Resource.Path + cls: str + params: _containers.MessageMap[str, MapValue] + extra: _containers.MessageMap[str, MapValue] + acquired: str + avail: bool + def __init__(self, path: _Optional[_Union[Resource.Path, _Mapping]] = ..., cls: _Optional[str] = ..., params: _Optional[_Mapping[str, MapValue]] = ..., extra: _Optional[_Mapping[str, MapValue]] = ..., acquired: _Optional[str] = ..., avail: bool = ...) -> None: ... + +class MapValue(_message.Message): + __slots__ = ("bool_value", "int_value", "uint_value", "float_value", "string_value") + BOOL_VALUE_FIELD_NUMBER: _ClassVar[int] + INT_VALUE_FIELD_NUMBER: _ClassVar[int] + UINT_VALUE_FIELD_NUMBER: _ClassVar[int] + FLOAT_VALUE_FIELD_NUMBER: _ClassVar[int] + STRING_VALUE_FIELD_NUMBER: _ClassVar[int] + bool_value: bool + int_value: int + uint_value: int + float_value: float + string_value: str + def __init__(self, bool_value: bool = ..., int_value: _Optional[int] = ..., uint_value: _Optional[int] = ..., float_value: _Optional[float] = ..., string_value: _Optional[str] = ...) -> None: ... + +class ExporterResponse(_message.Message): + __slots__ = ("success", "reason") + SUCCESS_FIELD_NUMBER: _ClassVar[int] + REASON_FIELD_NUMBER: _ClassVar[int] + success: bool + reason: str + def __init__(self, success: bool = ..., reason: _Optional[str] = ...) -> None: ... + +class Hello(_message.Message): + __slots__ = ("version",) + VERSION_FIELD_NUMBER: _ClassVar[int] + version: str + def __init__(self, version: _Optional[str] = ...) -> None: ... + +class ExporterOutMessage(_message.Message): + __slots__ = ("hello", "set_acquired_request") + HELLO_FIELD_NUMBER: _ClassVar[int] + SET_ACQUIRED_REQUEST_FIELD_NUMBER: _ClassVar[int] + hello: Hello + set_acquired_request: ExporterSetAcquiredRequest + def __init__(self, hello: _Optional[_Union[Hello, _Mapping]] = ..., set_acquired_request: _Optional[_Union[ExporterSetAcquiredRequest, _Mapping]] = ...) -> None: ... + +class ExporterSetAcquiredRequest(_message.Message): + __slots__ = ("group_name", "resource_name", "place_name") + GROUP_NAME_FIELD_NUMBER: _ClassVar[int] + RESOURCE_NAME_FIELD_NUMBER: _ClassVar[int] + PLACE_NAME_FIELD_NUMBER: _ClassVar[int] + group_name: str + resource_name: str + place_name: str + def __init__(self, group_name: _Optional[str] = ..., resource_name: _Optional[str] = ..., place_name: _Optional[str] = ...) -> None: ... + +class AddPlaceRequest(_message.Message): + __slots__ = ("name",) + NAME_FIELD_NUMBER: _ClassVar[int] + name: str + def __init__(self, name: _Optional[str] = ...) -> None: ... + +class AddPlaceResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class DeletePlaceRequest(_message.Message): + __slots__ = ("name",) + NAME_FIELD_NUMBER: _ClassVar[int] + name: str + def __init__(self, name: _Optional[str] = ...) -> None: ... + +class DeletePlaceResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class GetPlacesRequest(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class GetPlacesResponse(_message.Message): + __slots__ = ("places",) + PLACES_FIELD_NUMBER: _ClassVar[int] + places: _containers.RepeatedCompositeFieldContainer[Place] + def __init__(self, places: _Optional[_Iterable[_Union[Place, _Mapping]]] = ...) -> None: ... + +class Place(_message.Message): + __slots__ = ("name", "aliases", "comment", "tags", "matches", "acquired", "acquired_resources", "allowed", "created", "changed", "reservation") + class TagsEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... + NAME_FIELD_NUMBER: _ClassVar[int] + ALIASES_FIELD_NUMBER: _ClassVar[int] + COMMENT_FIELD_NUMBER: _ClassVar[int] + TAGS_FIELD_NUMBER: _ClassVar[int] + MATCHES_FIELD_NUMBER: _ClassVar[int] + ACQUIRED_FIELD_NUMBER: _ClassVar[int] + ACQUIRED_RESOURCES_FIELD_NUMBER: _ClassVar[int] + ALLOWED_FIELD_NUMBER: _ClassVar[int] + CREATED_FIELD_NUMBER: _ClassVar[int] + CHANGED_FIELD_NUMBER: _ClassVar[int] + RESERVATION_FIELD_NUMBER: _ClassVar[int] + name: str + aliases: _containers.RepeatedScalarFieldContainer[str] + comment: str + tags: _containers.ScalarMap[str, str] + matches: _containers.RepeatedCompositeFieldContainer[ResourceMatch] + acquired: str + acquired_resources: _containers.RepeatedScalarFieldContainer[str] + allowed: _containers.RepeatedScalarFieldContainer[str] + created: float + changed: float + reservation: str + def __init__(self, name: _Optional[str] = ..., aliases: _Optional[_Iterable[str]] = ..., comment: _Optional[str] = ..., tags: _Optional[_Mapping[str, str]] = ..., matches: _Optional[_Iterable[_Union[ResourceMatch, _Mapping]]] = ..., acquired: _Optional[str] = ..., acquired_resources: _Optional[_Iterable[str]] = ..., allowed: _Optional[_Iterable[str]] = ..., created: _Optional[float] = ..., changed: _Optional[float] = ..., reservation: _Optional[str] = ...) -> None: ... + +class ResourceMatch(_message.Message): + __slots__ = ("exporter", "group", "cls", "name", "rename") + EXPORTER_FIELD_NUMBER: _ClassVar[int] + GROUP_FIELD_NUMBER: _ClassVar[int] + CLS_FIELD_NUMBER: _ClassVar[int] + NAME_FIELD_NUMBER: _ClassVar[int] + RENAME_FIELD_NUMBER: _ClassVar[int] + exporter: str + group: str + cls: str + name: str + rename: str + def __init__(self, exporter: _Optional[str] = ..., group: _Optional[str] = ..., cls: _Optional[str] = ..., name: _Optional[str] = ..., rename: _Optional[str] = ...) -> None: ... + +class AddPlaceAliasRequest(_message.Message): + __slots__ = ("placename", "alias") + PLACENAME_FIELD_NUMBER: _ClassVar[int] + ALIAS_FIELD_NUMBER: _ClassVar[int] + placename: str + alias: str + def __init__(self, placename: _Optional[str] = ..., alias: _Optional[str] = ...) -> None: ... + +class AddPlaceAliasResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class DeletePlaceAliasRequest(_message.Message): + __slots__ = ("placename", "alias") + PLACENAME_FIELD_NUMBER: _ClassVar[int] + ALIAS_FIELD_NUMBER: _ClassVar[int] + placename: str + alias: str + def __init__(self, placename: _Optional[str] = ..., alias: _Optional[str] = ...) -> None: ... + +class DeletePlaceAliasResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class SetPlaceTagsRequest(_message.Message): + __slots__ = ("placename", "tags") + class TagsEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... + PLACENAME_FIELD_NUMBER: _ClassVar[int] + TAGS_FIELD_NUMBER: _ClassVar[int] + placename: str + tags: _containers.ScalarMap[str, str] + def __init__(self, placename: _Optional[str] = ..., tags: _Optional[_Mapping[str, str]] = ...) -> None: ... + +class SetPlaceTagsResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class SetPlaceCommentRequest(_message.Message): + __slots__ = ("placename", "comment") + PLACENAME_FIELD_NUMBER: _ClassVar[int] + COMMENT_FIELD_NUMBER: _ClassVar[int] + placename: str + comment: str + def __init__(self, placename: _Optional[str] = ..., comment: _Optional[str] = ...) -> None: ... + +class SetPlaceCommentResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class AddPlaceMatchRequest(_message.Message): + __slots__ = ("placename", "pattern", "rename") + PLACENAME_FIELD_NUMBER: _ClassVar[int] + PATTERN_FIELD_NUMBER: _ClassVar[int] + RENAME_FIELD_NUMBER: _ClassVar[int] + placename: str + pattern: str + rename: str + def __init__(self, placename: _Optional[str] = ..., pattern: _Optional[str] = ..., rename: _Optional[str] = ...) -> None: ... + +class AddPlaceMatchResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class DeletePlaceMatchRequest(_message.Message): + __slots__ = ("placename", "pattern", "rename") + PLACENAME_FIELD_NUMBER: _ClassVar[int] + PATTERN_FIELD_NUMBER: _ClassVar[int] + RENAME_FIELD_NUMBER: _ClassVar[int] + placename: str + pattern: str + rename: str + def __init__(self, placename: _Optional[str] = ..., pattern: _Optional[str] = ..., rename: _Optional[str] = ...) -> None: ... + +class DeletePlaceMatchResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class AcquirePlaceRequest(_message.Message): + __slots__ = ("placename",) + PLACENAME_FIELD_NUMBER: _ClassVar[int] + placename: str + def __init__(self, placename: _Optional[str] = ...) -> None: ... + +class AcquirePlaceResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class ReleasePlaceRequest(_message.Message): + __slots__ = ("placename", "fromuser") + PLACENAME_FIELD_NUMBER: _ClassVar[int] + FROMUSER_FIELD_NUMBER: _ClassVar[int] + placename: str + fromuser: str + def __init__(self, placename: _Optional[str] = ..., fromuser: _Optional[str] = ...) -> None: ... + +class ReleasePlaceResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class AllowPlaceRequest(_message.Message): + __slots__ = ("placename", "user") + PLACENAME_FIELD_NUMBER: _ClassVar[int] + USER_FIELD_NUMBER: _ClassVar[int] + placename: str + user: str + def __init__(self, placename: _Optional[str] = ..., user: _Optional[str] = ...) -> None: ... + +class AllowPlaceResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class CreateReservationRequest(_message.Message): + __slots__ = ("filters", "prio") + class FiltersEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: Reservation.Filter + def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[Reservation.Filter, _Mapping]] = ...) -> None: ... + FILTERS_FIELD_NUMBER: _ClassVar[int] + PRIO_FIELD_NUMBER: _ClassVar[int] + filters: _containers.MessageMap[str, Reservation.Filter] + prio: float + def __init__(self, filters: _Optional[_Mapping[str, Reservation.Filter]] = ..., prio: _Optional[float] = ...) -> None: ... + +class CreateReservationResponse(_message.Message): + __slots__ = ("reservation",) + RESERVATION_FIELD_NUMBER: _ClassVar[int] + reservation: Reservation + def __init__(self, reservation: _Optional[_Union[Reservation, _Mapping]] = ...) -> None: ... + +class Reservation(_message.Message): + __slots__ = ("owner", "token", "state", "prio", "filters", "allocations", "created", "timeout") + class Filter(_message.Message): + __slots__ = ("filter",) + class FilterEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... + FILTER_FIELD_NUMBER: _ClassVar[int] + filter: _containers.ScalarMap[str, str] + def __init__(self, filter: _Optional[_Mapping[str, str]] = ...) -> None: ... + class FiltersEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: Reservation.Filter + def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[Reservation.Filter, _Mapping]] = ...) -> None: ... + class AllocationsEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... + OWNER_FIELD_NUMBER: _ClassVar[int] + TOKEN_FIELD_NUMBER: _ClassVar[int] + STATE_FIELD_NUMBER: _ClassVar[int] + PRIO_FIELD_NUMBER: _ClassVar[int] + FILTERS_FIELD_NUMBER: _ClassVar[int] + ALLOCATIONS_FIELD_NUMBER: _ClassVar[int] + CREATED_FIELD_NUMBER: _ClassVar[int] + TIMEOUT_FIELD_NUMBER: _ClassVar[int] + owner: str + token: str + state: int + prio: float + filters: _containers.MessageMap[str, Reservation.Filter] + allocations: _containers.ScalarMap[str, str] + created: float + timeout: float + def __init__(self, owner: _Optional[str] = ..., token: _Optional[str] = ..., state: _Optional[int] = ..., prio: _Optional[float] = ..., filters: _Optional[_Mapping[str, Reservation.Filter]] = ..., allocations: _Optional[_Mapping[str, str]] = ..., created: _Optional[float] = ..., timeout: _Optional[float] = ...) -> None: ... + +class CancelReservationRequest(_message.Message): + __slots__ = ("token",) + TOKEN_FIELD_NUMBER: _ClassVar[int] + token: str + def __init__(self, token: _Optional[str] = ...) -> None: ... + +class CancelReservationResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class PollReservationRequest(_message.Message): + __slots__ = ("token",) + TOKEN_FIELD_NUMBER: _ClassVar[int] + token: str + def __init__(self, token: _Optional[str] = ...) -> None: ... + +class PollReservationResponse(_message.Message): + __slots__ = ("reservation",) + RESERVATION_FIELD_NUMBER: _ClassVar[int] + reservation: Reservation + def __init__(self, reservation: _Optional[_Union[Reservation, _Mapping]] = ...) -> None: ... + +class GetReservationsResponse(_message.Message): + __slots__ = ("reservations",) + RESERVATIONS_FIELD_NUMBER: _ClassVar[int] + reservations: _containers.RepeatedCompositeFieldContainer[Reservation] + def __init__(self, reservations: _Optional[_Iterable[_Union[Reservation, _Mapping]]] = ...) -> None: ... + +class GetReservationsRequest(_message.Message): + __slots__ = () + def __init__(self) -> None: ... diff --git a/labgrid/remote/generated/labgrid_coordinator_pb2_grpc.py b/labgrid/remote/generated/labgrid_coordinator_pb2_grpc.py new file mode 100644 index 000000000..debfb24f2 --- /dev/null +++ b/labgrid/remote/generated/labgrid_coordinator_pb2_grpc.py @@ -0,0 +1,627 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from . import labgrid_coordinator_pb2 as labgrid__coordinator__pb2 + + +class CoordinatorStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.ClientStream = channel.stream_stream( + '/labgrid.Coordinator/ClientStream', + request_serializer=labgrid__coordinator__pb2.ClientInMessage.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.ClientOutMessage.FromString, + ) + self.ExporterStream = channel.stream_stream( + '/labgrid.Coordinator/ExporterStream', + request_serializer=labgrid__coordinator__pb2.ExporterInMessage.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.ExporterOutMessage.FromString, + ) + self.AddPlace = channel.unary_unary( + '/labgrid.Coordinator/AddPlace', + request_serializer=labgrid__coordinator__pb2.AddPlaceRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.AddPlaceResponse.FromString, + ) + self.DeletePlace = channel.unary_unary( + '/labgrid.Coordinator/DeletePlace', + request_serializer=labgrid__coordinator__pb2.DeletePlaceRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.DeletePlaceResponse.FromString, + ) + self.GetPlaces = channel.unary_unary( + '/labgrid.Coordinator/GetPlaces', + request_serializer=labgrid__coordinator__pb2.GetPlacesRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.GetPlacesResponse.FromString, + ) + self.AddPlaceAlias = channel.unary_unary( + '/labgrid.Coordinator/AddPlaceAlias', + request_serializer=labgrid__coordinator__pb2.AddPlaceAliasRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.AddPlaceAliasResponse.FromString, + ) + self.DeletePlaceAlias = channel.unary_unary( + '/labgrid.Coordinator/DeletePlaceAlias', + request_serializer=labgrid__coordinator__pb2.DeletePlaceAliasRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.DeletePlaceAliasResponse.FromString, + ) + self.SetPlaceTags = channel.unary_unary( + '/labgrid.Coordinator/SetPlaceTags', + request_serializer=labgrid__coordinator__pb2.SetPlaceTagsRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.SetPlaceTagsResponse.FromString, + ) + self.SetPlaceComment = channel.unary_unary( + '/labgrid.Coordinator/SetPlaceComment', + request_serializer=labgrid__coordinator__pb2.SetPlaceCommentRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.SetPlaceCommentResponse.FromString, + ) + self.AddPlaceMatch = channel.unary_unary( + '/labgrid.Coordinator/AddPlaceMatch', + request_serializer=labgrid__coordinator__pb2.AddPlaceMatchRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.AddPlaceMatchResponse.FromString, + ) + self.DeletePlaceMatch = channel.unary_unary( + '/labgrid.Coordinator/DeletePlaceMatch', + request_serializer=labgrid__coordinator__pb2.DeletePlaceMatchRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.DeletePlaceMatchResponse.FromString, + ) + self.AcquirePlace = channel.unary_unary( + '/labgrid.Coordinator/AcquirePlace', + request_serializer=labgrid__coordinator__pb2.AcquirePlaceRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.AcquirePlaceResponse.FromString, + ) + self.ReleasePlace = channel.unary_unary( + '/labgrid.Coordinator/ReleasePlace', + request_serializer=labgrid__coordinator__pb2.ReleasePlaceRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.ReleasePlaceResponse.FromString, + ) + self.AllowPlace = channel.unary_unary( + '/labgrid.Coordinator/AllowPlace', + request_serializer=labgrid__coordinator__pb2.AllowPlaceRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.AllowPlaceResponse.FromString, + ) + self.CreateReservation = channel.unary_unary( + '/labgrid.Coordinator/CreateReservation', + request_serializer=labgrid__coordinator__pb2.CreateReservationRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.CreateReservationResponse.FromString, + ) + self.CancelReservation = channel.unary_unary( + '/labgrid.Coordinator/CancelReservation', + request_serializer=labgrid__coordinator__pb2.CancelReservationRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.CancelReservationResponse.FromString, + ) + self.PollReservation = channel.unary_unary( + '/labgrid.Coordinator/PollReservation', + request_serializer=labgrid__coordinator__pb2.PollReservationRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.PollReservationResponse.FromString, + ) + self.GetReservations = channel.unary_unary( + '/labgrid.Coordinator/GetReservations', + request_serializer=labgrid__coordinator__pb2.GetReservationsRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.GetReservationsResponse.FromString, + ) + + +class CoordinatorServicer(object): + """Missing associated documentation comment in .proto file.""" + + def ClientStream(self, request_iterator, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ExporterStream(self, request_iterator, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def AddPlace(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeletePlace(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetPlaces(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def AddPlaceAlias(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeletePlaceAlias(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SetPlaceTags(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SetPlaceComment(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def AddPlaceMatch(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeletePlaceMatch(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def AcquirePlace(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReleasePlace(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def AllowPlace(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CreateReservation(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CancelReservation(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def PollReservation(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetReservations(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_CoordinatorServicer_to_server(servicer, server): + rpc_method_handlers = { + 'ClientStream': grpc.stream_stream_rpc_method_handler( + servicer.ClientStream, + request_deserializer=labgrid__coordinator__pb2.ClientInMessage.FromString, + response_serializer=labgrid__coordinator__pb2.ClientOutMessage.SerializeToString, + ), + 'ExporterStream': grpc.stream_stream_rpc_method_handler( + servicer.ExporterStream, + request_deserializer=labgrid__coordinator__pb2.ExporterInMessage.FromString, + response_serializer=labgrid__coordinator__pb2.ExporterOutMessage.SerializeToString, + ), + 'AddPlace': grpc.unary_unary_rpc_method_handler( + servicer.AddPlace, + request_deserializer=labgrid__coordinator__pb2.AddPlaceRequest.FromString, + response_serializer=labgrid__coordinator__pb2.AddPlaceResponse.SerializeToString, + ), + 'DeletePlace': grpc.unary_unary_rpc_method_handler( + servicer.DeletePlace, + request_deserializer=labgrid__coordinator__pb2.DeletePlaceRequest.FromString, + response_serializer=labgrid__coordinator__pb2.DeletePlaceResponse.SerializeToString, + ), + 'GetPlaces': grpc.unary_unary_rpc_method_handler( + servicer.GetPlaces, + request_deserializer=labgrid__coordinator__pb2.GetPlacesRequest.FromString, + response_serializer=labgrid__coordinator__pb2.GetPlacesResponse.SerializeToString, + ), + 'AddPlaceAlias': grpc.unary_unary_rpc_method_handler( + servicer.AddPlaceAlias, + request_deserializer=labgrid__coordinator__pb2.AddPlaceAliasRequest.FromString, + response_serializer=labgrid__coordinator__pb2.AddPlaceAliasResponse.SerializeToString, + ), + 'DeletePlaceAlias': grpc.unary_unary_rpc_method_handler( + servicer.DeletePlaceAlias, + request_deserializer=labgrid__coordinator__pb2.DeletePlaceAliasRequest.FromString, + response_serializer=labgrid__coordinator__pb2.DeletePlaceAliasResponse.SerializeToString, + ), + 'SetPlaceTags': grpc.unary_unary_rpc_method_handler( + servicer.SetPlaceTags, + request_deserializer=labgrid__coordinator__pb2.SetPlaceTagsRequest.FromString, + response_serializer=labgrid__coordinator__pb2.SetPlaceTagsResponse.SerializeToString, + ), + 'SetPlaceComment': grpc.unary_unary_rpc_method_handler( + servicer.SetPlaceComment, + request_deserializer=labgrid__coordinator__pb2.SetPlaceCommentRequest.FromString, + response_serializer=labgrid__coordinator__pb2.SetPlaceCommentResponse.SerializeToString, + ), + 'AddPlaceMatch': grpc.unary_unary_rpc_method_handler( + servicer.AddPlaceMatch, + request_deserializer=labgrid__coordinator__pb2.AddPlaceMatchRequest.FromString, + response_serializer=labgrid__coordinator__pb2.AddPlaceMatchResponse.SerializeToString, + ), + 'DeletePlaceMatch': grpc.unary_unary_rpc_method_handler( + servicer.DeletePlaceMatch, + request_deserializer=labgrid__coordinator__pb2.DeletePlaceMatchRequest.FromString, + response_serializer=labgrid__coordinator__pb2.DeletePlaceMatchResponse.SerializeToString, + ), + 'AcquirePlace': grpc.unary_unary_rpc_method_handler( + servicer.AcquirePlace, + request_deserializer=labgrid__coordinator__pb2.AcquirePlaceRequest.FromString, + response_serializer=labgrid__coordinator__pb2.AcquirePlaceResponse.SerializeToString, + ), + 'ReleasePlace': grpc.unary_unary_rpc_method_handler( + servicer.ReleasePlace, + request_deserializer=labgrid__coordinator__pb2.ReleasePlaceRequest.FromString, + response_serializer=labgrid__coordinator__pb2.ReleasePlaceResponse.SerializeToString, + ), + 'AllowPlace': grpc.unary_unary_rpc_method_handler( + servicer.AllowPlace, + request_deserializer=labgrid__coordinator__pb2.AllowPlaceRequest.FromString, + response_serializer=labgrid__coordinator__pb2.AllowPlaceResponse.SerializeToString, + ), + 'CreateReservation': grpc.unary_unary_rpc_method_handler( + servicer.CreateReservation, + request_deserializer=labgrid__coordinator__pb2.CreateReservationRequest.FromString, + response_serializer=labgrid__coordinator__pb2.CreateReservationResponse.SerializeToString, + ), + 'CancelReservation': grpc.unary_unary_rpc_method_handler( + servicer.CancelReservation, + request_deserializer=labgrid__coordinator__pb2.CancelReservationRequest.FromString, + response_serializer=labgrid__coordinator__pb2.CancelReservationResponse.SerializeToString, + ), + 'PollReservation': grpc.unary_unary_rpc_method_handler( + servicer.PollReservation, + request_deserializer=labgrid__coordinator__pb2.PollReservationRequest.FromString, + response_serializer=labgrid__coordinator__pb2.PollReservationResponse.SerializeToString, + ), + 'GetReservations': grpc.unary_unary_rpc_method_handler( + servicer.GetReservations, + request_deserializer=labgrid__coordinator__pb2.GetReservationsRequest.FromString, + response_serializer=labgrid__coordinator__pb2.GetReservationsResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'labgrid.Coordinator', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class Coordinator(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def ClientStream(request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.stream_stream(request_iterator, target, '/labgrid.Coordinator/ClientStream', + labgrid__coordinator__pb2.ClientInMessage.SerializeToString, + labgrid__coordinator__pb2.ClientOutMessage.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ExporterStream(request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.stream_stream(request_iterator, target, '/labgrid.Coordinator/ExporterStream', + labgrid__coordinator__pb2.ExporterInMessage.SerializeToString, + labgrid__coordinator__pb2.ExporterOutMessage.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def AddPlace(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/AddPlace', + labgrid__coordinator__pb2.AddPlaceRequest.SerializeToString, + labgrid__coordinator__pb2.AddPlaceResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def DeletePlace(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/DeletePlace', + labgrid__coordinator__pb2.DeletePlaceRequest.SerializeToString, + labgrid__coordinator__pb2.DeletePlaceResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetPlaces(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/GetPlaces', + labgrid__coordinator__pb2.GetPlacesRequest.SerializeToString, + labgrid__coordinator__pb2.GetPlacesResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def AddPlaceAlias(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/AddPlaceAlias', + labgrid__coordinator__pb2.AddPlaceAliasRequest.SerializeToString, + labgrid__coordinator__pb2.AddPlaceAliasResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def DeletePlaceAlias(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/DeletePlaceAlias', + labgrid__coordinator__pb2.DeletePlaceAliasRequest.SerializeToString, + labgrid__coordinator__pb2.DeletePlaceAliasResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def SetPlaceTags(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/SetPlaceTags', + labgrid__coordinator__pb2.SetPlaceTagsRequest.SerializeToString, + labgrid__coordinator__pb2.SetPlaceTagsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def SetPlaceComment(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/SetPlaceComment', + labgrid__coordinator__pb2.SetPlaceCommentRequest.SerializeToString, + labgrid__coordinator__pb2.SetPlaceCommentResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def AddPlaceMatch(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/AddPlaceMatch', + labgrid__coordinator__pb2.AddPlaceMatchRequest.SerializeToString, + labgrid__coordinator__pb2.AddPlaceMatchResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def DeletePlaceMatch(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/DeletePlaceMatch', + labgrid__coordinator__pb2.DeletePlaceMatchRequest.SerializeToString, + labgrid__coordinator__pb2.DeletePlaceMatchResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def AcquirePlace(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/AcquirePlace', + labgrid__coordinator__pb2.AcquirePlaceRequest.SerializeToString, + labgrid__coordinator__pb2.AcquirePlaceResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ReleasePlace(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/ReleasePlace', + labgrid__coordinator__pb2.ReleasePlaceRequest.SerializeToString, + labgrid__coordinator__pb2.ReleasePlaceResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def AllowPlace(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/AllowPlace', + labgrid__coordinator__pb2.AllowPlaceRequest.SerializeToString, + labgrid__coordinator__pb2.AllowPlaceResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def CreateReservation(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/CreateReservation', + labgrid__coordinator__pb2.CreateReservationRequest.SerializeToString, + labgrid__coordinator__pb2.CreateReservationResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def CancelReservation(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/CancelReservation', + labgrid__coordinator__pb2.CancelReservationRequest.SerializeToString, + labgrid__coordinator__pb2.CancelReservationResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def PollReservation(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/PollReservation', + labgrid__coordinator__pb2.PollReservationRequest.SerializeToString, + labgrid__coordinator__pb2.PollReservationResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetReservations(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/GetReservations', + labgrid__coordinator__pb2.GetReservationsRequest.SerializeToString, + labgrid__coordinator__pb2.GetReservationsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/labgrid/remote/generated/requirements.in b/labgrid/remote/generated/requirements.in new file mode 100644 index 000000000..c43218a5c --- /dev/null +++ b/labgrid/remote/generated/requirements.in @@ -0,0 +1,3 @@ +# use ./update-requirements.sh to update requirements.txt +grpcio-tools==1.62.2 + diff --git a/labgrid/remote/generated/requirements.txt b/labgrid/remote/generated/requirements.txt new file mode 100644 index 000000000..580b2389d --- /dev/null +++ b/labgrid/remote/generated/requirements.txt @@ -0,0 +1,15 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile requirements.in +# +grpcio==1.64.1 + # via grpcio-tools +grpcio-tools==1.62.2 + # via -r requirements.in +protobuf==4.25.3 + # via grpcio-tools + +# The following packages are considered to be unsafe in a requirements file: +# setuptools diff --git a/labgrid/remote/generated/update-requirements.sh b/labgrid/remote/generated/update-requirements.sh new file mode 100755 index 000000000..f828ed573 --- /dev/null +++ b/labgrid/remote/generated/update-requirements.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +set -ex + +pipx run --spec pip-tools pip-compile requirements.in -U | tee requirements.txt diff --git a/labgrid/remote/proto/labgrid-coordinator.proto b/labgrid/remote/proto/labgrid-coordinator.proto new file mode 100644 index 000000000..e0585f7e1 --- /dev/null +++ b/labgrid/remote/proto/labgrid-coordinator.proto @@ -0,0 +1,297 @@ +syntax = "proto3"; + +package labgrid; + +service Coordinator { + rpc ClientStream(stream ClientInMessage) returns (stream ClientOutMessage) {} + + rpc ExporterStream(stream ExporterInMessage) returns (stream ExporterOutMessage) {} + + rpc AddPlace(AddPlaceRequest) returns (AddPlaceResponse) {} + + rpc DeletePlace(DeletePlaceRequest) returns (DeletePlaceResponse) {} + + rpc GetPlaces(GetPlacesRequest) returns (GetPlacesResponse) {} + + rpc AddPlaceAlias(AddPlaceAliasRequest) returns (AddPlaceAliasResponse) {} + + rpc DeletePlaceAlias(DeletePlaceAliasRequest) returns (DeletePlaceAliasResponse) {} + + rpc SetPlaceTags(SetPlaceTagsRequest) returns (SetPlaceTagsResponse) {} + + rpc SetPlaceComment(SetPlaceCommentRequest) returns (SetPlaceCommentResponse) {} + + rpc AddPlaceMatch(AddPlaceMatchRequest) returns (AddPlaceMatchResponse) {} + + rpc DeletePlaceMatch(DeletePlaceMatchRequest) returns (DeletePlaceMatchResponse) {} + + rpc AcquirePlace(AcquirePlaceRequest) returns (AcquirePlaceResponse) {} + + rpc ReleasePlace(ReleasePlaceRequest) returns (ReleasePlaceResponse) {} + + rpc AllowPlace(AllowPlaceRequest) returns (AllowPlaceResponse) {} + + rpc CreateReservation(CreateReservationRequest) returns (CreateReservationResponse) {} + + rpc CancelReservation(CancelReservationRequest) returns (CancelReservationResponse) {} + + rpc PollReservation(PollReservationRequest) returns (PollReservationResponse) {} + + rpc GetReservations(GetReservationsRequest) returns (GetReservationsResponse) {} +} + +message ClientInMessage { + oneof kind { + Sync sync = 1; + StartupDone startup = 2; + Subscribe subscribe = 3; + }; +}; + +message Sync { + uint64 id = 1; +}; + +message StartupDone { + string version = 1; + string name = 2; +}; + +message Subscribe { + optional bool is_unsubscribe = 1; + oneof kind { + bool all_places = 2; + bool all_resources = 3; + } +}; + +message ClientOutMessage { + optional Sync sync = 1; + repeated UpdateResponse updates = 2; +}; + +message UpdateResponse { + oneof kind { + Resource resource = 1; + Resource.Path del_resource = 2; + Place place = 3; + string del_place = 4; + }; +}; + +message ExporterInMessage { + oneof kind { + Resource resource = 1; + StartupDone startup = 2; + ExporterResponse response = 3; + }; +}; + +message Resource { + message Path { + optional string exporter_name = 1; + string group_name = 2; + string resource_name = 3; + } + Path path = 1; + string cls = 2; + map params = 3; + map extra = 4; + string acquired = 5; + bool avail = 6; +}; + +message MapValue { + oneof kind { + bool bool_value = 1; + int64 int_value = 2; + uint64 uint_value = 3; + double float_value = 4; + string string_value = 5; + // FIXME do we need arrays? + } +}; + +message ExporterResponse { + bool success = 1; + optional string reason = 2; +}; + +message Hello { + string version = 1; +} + +message ExporterOutMessage { + oneof kind { + Hello hello = 1; + ExporterSetAcquiredRequest set_acquired_request = 2; + }; +}; + +message ExporterSetAcquiredRequest { + string group_name = 1; + string resource_name = 2; + optional string place_name = 3; +}; + +message AddPlaceRequest { + string name = 1; +}; + +message AddPlaceResponse { +}; + +message DeletePlaceRequest { + string name = 1; +}; + +message DeletePlaceResponse { +}; + +message GetPlacesRequest { +}; + +message GetPlacesResponse { + repeated Place places = 1; +} + +message Place { + string name = 1; + repeated string aliases = 2; + string comment = 3; + map tags = 4; + repeated ResourceMatch matches = 5; + optional string acquired = 6; + repeated string acquired_resources = 7; + repeated string allowed = 8; + double created = 9; + double changed = 10; + optional string reservation = 11; +}; + +message ResourceMatch { + string exporter = 1; + string group = 2; + string cls = 3; + optional string name = 4; + optional string rename = 5; +}; + +message AddPlaceAliasRequest { + string placename = 1; + string alias = 2; +}; + +message AddPlaceAliasResponse { +}; + +message DeletePlaceAliasRequest { + string placename = 1; + string alias = 2; +}; + +message DeletePlaceAliasResponse { +}; + +message SetPlaceTagsRequest { + string placename = 1; + map tags = 2; +}; + +message SetPlaceTagsResponse { +}; + +message SetPlaceCommentRequest { + string placename = 1; + string comment = 2; +}; + +message SetPlaceCommentResponse { +}; + +message AddPlaceMatchRequest { + string placename = 1; + string pattern = 2; + optional string rename = 3; +}; + +message AddPlaceMatchResponse { +}; + +message DeletePlaceMatchRequest { + string placename = 1; + string pattern = 2; + optional string rename = 3; +}; + +message DeletePlaceMatchResponse { +}; + +message AcquirePlaceRequest { + string placename = 1; +}; + +message AcquirePlaceResponse { +}; + +message ReleasePlaceRequest { + string placename = 1; + optional string fromuser = 2; +}; + +message ReleasePlaceResponse { +}; + +message AllowPlaceRequest { + string placename = 1; + string user = 2; +}; + +message AllowPlaceResponse { +}; + + +message CreateReservationRequest { + map filters = 1; + double prio = 2; +}; + +message CreateReservationResponse { + Reservation reservation = 1; +}; + +message Reservation { + message Filter { + map filter = 1; + } + string owner = 1; + string token = 2; + int32 state = 3; + double prio = 4; + map filters = 5; + map allocations = 6; + double created = 7; + double timeout = 8; +}; + +message CancelReservationRequest { + string token = 1; +}; + +message CancelReservationResponse { +}; + +message PollReservationRequest { + string token = 1; +}; + +message PollReservationResponse { + Reservation reservation = 1; +}; + +message GetReservationsResponse { + repeated Reservation reservations = 1; +}; + +message GetReservationsRequest { +}; diff --git a/labgrid/resource/remote.py b/labgrid/resource/remote.py index ad116382d..1b8256ef0 100644 --- a/labgrid/resource/remote.py +++ b/labgrid/resource/remote.py @@ -11,7 +11,6 @@ class RemotePlaceManager(ResourceManager): def __attrs_post_init__(self): super().__attrs_post_init__() self.url = None - self.realm = None self.loop = None self.session = None self.ready = None @@ -23,7 +22,7 @@ def _start(self): from ..remote.client import start_session try: - self.session = start_session(self.url, self.realm, {'env': self.env}) + self.session = start_session(self.url, {'env': self.env}) except ConnectionRefusedError as e: raise ConnectionRefusedError(f"Could not connect to coordinator {self.url}") \ from e @@ -39,12 +38,10 @@ def on_resource_added(self, resource): # be the same). if not self.session: self.env = remote_place.target.env - self.url = os.environ.get("LG_CROSSBAR", "ws://127.0.0.1:20408/ws") - self.realm = os.environ.get("LG_CROSSBAR_REALM", "realm1") + self.url = os.environ.get("LG_COORDINATOR", "127.0.0.1:20408") if self.env: config = self.env.config - self.url = config.get_option('crossbar_url', self.url) - self.realm = config.get_option('crossbar_realm', self.realm) + self.url = config.get_option("coordinator_address", self.url) self._start() place = self.session.get_place(remote_place.name) # pylint: disable=no-member resource_entries = self.session.get_target_resources(place) # pylint: disable=no-member diff --git a/labgrid/util/proxy.py b/labgrid/util/proxy.py index a5740e8fd..d489d89a5 100644 --- a/labgrid/util/proxy.py +++ b/labgrid/util/proxy.py @@ -95,6 +95,13 @@ def get_url(cls, url, *, default_port=None): return urlunsplit(s) + @classmethod + def get_grpc_address(cls, address, *, default_port=None): + url = f"//{address}" + url = proxymanager.get_url(url, default_port=default_port) + address = url.lstrip("/") + return address + @classmethod def get_command(cls, res, host, port, ifname=None): """get argument list to start a proxy process connected to the target""" diff --git a/pyproject.toml b/pyproject.toml index d84b7b571..590fcb7c7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,7 +33,9 @@ classifiers = [ dependencies = [ "ansicolors>=1.1.8", "attrs>=21.4.0", - "autobahn>=21.3.1", + "grpcio>=1.64.1, <2.0.0", + "grpcio-reflection>=1.64.1, <2.0.0", + "protobuf>=5.27.0", "jinja2>=3.0.2", "pexpect>=4.8.0", "pyserial-labgrid>=3.4.0.1", @@ -118,11 +120,15 @@ dev = [ # additional dev dependencies "psutil>=5.8.0", + "pytest-benchmark>=4.0.0", "pytest-cov>=3.0.0", "pytest-dependency>=0.5.1", "pytest-isort>=2.0.0", "pytest-mock>=3.6.1", "pylint>=3.0.0", + + # GRPC Channelz support + "grpcio-channelz>=1.64.1, <2.0.0", ] [project.scripts] @@ -130,6 +136,7 @@ labgrid-autoinstall = "labgrid.autoinstall.main:main" labgrid-client = "labgrid.remote.client:main" labgrid-exporter = "labgrid.remote.exporter:main" labgrid-suggest = "labgrid.resource.suggest:main" +labgrid-coordinator = "labgrid.remote.coordinator:main" # the following makes a plugin available to pytest [project.entry-points.pytest11] @@ -145,6 +152,7 @@ packages = [ "labgrid.protocol", "labgrid.pytestplugin", "labgrid.remote", + "labgrid.remote.generated", "labgrid.resource", "labgrid.strategy", "labgrid.util", @@ -211,6 +219,7 @@ enable = [ generated-members = [ "labgrid.driver.*", "labgrid.strategy.*", + "labgrid_coordinator_pb2", ] signature-mutators = ["labgrid.step.step"] diff --git a/tests/conftest.py b/tests/conftest.py index 50bcad1a0..164914c31 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,12 +1,10 @@ import logging -from pathlib import Path from signal import SIGTERM import sys import threading import pytest import pexpect -import yaml from labgrid import Target from labgrid.driver import SerialDriver @@ -100,56 +98,24 @@ def serial_driver_no_name(target, serial_port, mocker): return s @pytest.fixture(scope='function') -def crossbar_config(tmpdir, pytestconfig): - crossbar_config = '.crossbar/config-anonymous.yaml' - - pytestconfig.rootdir.join(crossbar_config).copy(tmpdir.mkdir('.crossbar')) - crossbar_config = tmpdir.join(crossbar_config) - - # crossbar runs labgrid's coordinator component as a guest, record its coverage - if pytestconfig.pluginmanager.get_plugin('pytest_cov'): - with open(crossbar_config, 'r+') as stream: - conf = yaml.safe_load(stream) - - for worker in conf['workers']: - if worker['type'] == 'guest': - worker['executable'] = 'coverage' - worker['arguments'].insert(0, 'run') - worker['arguments'].insert(1, '--parallel-mode') - # pytest-cov combines coverage files in root dir automatically, so copy it there - coverage_data = pytestconfig.rootdir.join('.coverage') - worker['arguments'].insert(2, f'--data-file={coverage_data}') - - stream.seek(0) - yaml.safe_dump(conf, stream) - - return crossbar_config - -@pytest.fixture(scope='function') -def crossbar(tmpdir, pytestconfig, crossbar_config): - crossbar_venv = Path(pytestconfig.getoption("--crossbar-venv")) - if not crossbar_venv.is_absolute(): - crossbar_venv = pytestconfig.rootdir / crossbar_venv - crossbar_bin = crossbar_venv / "bin/crossbar" +def coordinator(tmpdir): spawn = pexpect.spawn( - f'{crossbar_bin} start --color false --logformat none --config {crossbar_config}', - logfile=Prefixer(sys.stdout.buffer, 'crossbar'), + 'labgrid-coordinator', + logfile=Prefixer(sys.stdout.buffer, 'coordinator'), cwd=str(tmpdir)) try: - spawn.expect('Realm .* started') - spawn.expect('Guest .* started') spawn.expect('Coordinator ready') except: - print(f"crossbar startup failed with {spawn.before}") + print(f"coordinator startup failed with {spawn.before}") raise - reader = threading.Thread(target=keep_reading, name='crossbar-reader', args=(spawn,), daemon=True) + reader = threading.Thread(target=keep_reading, name='coordinator-reader', args=(spawn,), daemon=True) reader.start() yield spawn # let coverage write its data: # https://coverage.readthedocs.io/en/latest/subprocess.html#process-termination - print("stopping crossbar") + print("stopping coordinator") spawn.kill(SIGTERM) spawn.expect(pexpect.EOF) spawn.wait() @@ -157,7 +123,15 @@ def crossbar(tmpdir, pytestconfig, crossbar_config): reader.join() @pytest.fixture(scope='function') -def exporter(tmpdir, crossbar): +def exporter(tmpdir, coordinator, start_exporter): + yield start_exporter() + + +@pytest.fixture(scope='function') +def start_exporter(tmpdir, coordinator): + spawns = [] + readers = [] + p = tmpdir.join("exports.yaml") p.write( """ @@ -177,22 +151,33 @@ def exporter(tmpdir, crossbar): username: "root" """ ) - spawn = pexpect.spawn( - f'{sys.executable} -m labgrid.remote.exporter --name testhost exports.yaml', - logfile=Prefixer(sys.stdout.buffer, 'exporter'), - cwd=str(tmpdir)) - try: - spawn.expect('exporter/testhost') - except: - print(f"exporter startup failed with {spawn.before}") - raise - reader = threading.Thread(target=keep_reading, name='exporter-reader', args=(spawn,), daemon=True) - reader.start() - yield spawn - print("stopping exporter") - spawn.close(force=True) - assert not spawn.isalive() - reader.join() + + def _start_exporter(): + spawn = pexpect.spawn( + f'{sys.executable} -m labgrid.remote.exporter --name testhost exports.yaml', + logfile=Prefixer(sys.stdout.buffer, 'exporter'), + cwd=str(tmpdir)) + try: + spawn.expect('exporter name: testhost') + spawn.expect('connected to exporter') + except: + print(f"exporter startup failed with {spawn.before}") + raise + reader = threading.Thread(target=keep_reading, name=f'exporter-reader-{spawn.pid}', args=(spawn,), daemon=True) + reader.start() + + spawns.append(spawn) + readers.append(reader) + + return spawn + + yield _start_exporter + + for spawn, reader in zip(spawns, readers): + print(f"stopping exporter pid={spawn.pid}") + spawn.close(force=True) + assert not spawn.isalive() + reader.join() def pytest_addoption(parser): parser.addoption("--sigrok-usb", action="store_true", @@ -201,8 +186,6 @@ def pytest_addoption(parser): help="Run SSHManager tests against localhost") parser.addoption("--ssh-username", default=None, help="SSH username to use for SSHDriver testing") - parser.addoption("--crossbar-venv", default=None, - help="Path to separate virtualenv with crossbar installed") def pytest_configure(config): # register an additional marker @@ -213,7 +196,7 @@ def pytest_configure(config): config.addinivalue_line("markers", "sshusername: test SSHDriver against Localhost") config.addinivalue_line("markers", - "crossbar: test against local crossbar") + "coordinator: test against local coordinator") def pytest_runtest_setup(item): envmarker = item.get_closest_marker("sigrokusb") @@ -228,7 +211,3 @@ def pytest_runtest_setup(item): if envmarker is not None: if item.config.getoption("--ssh-username") is None: pytest.skip("SSHDriver tests against localhost not enabled (enable with --ssh-username )") - envmarker = item.get_closest_marker("crossbar") - if envmarker is not None: - if item.config.getoption("--crossbar-venv") is None: - pytest.skip("No path to crossbar virtualenv given (set with --crossbar-venv )") diff --git a/tests/test_crossbar.py b/tests/test_client.py similarity index 80% rename from tests/test_crossbar.py rename to tests/test_client.py index a1db0eeeb..d82c0a8ce 100644 --- a/tests/test_crossbar.py +++ b/tests/test_client.py @@ -1,6 +1,5 @@ import os import re -import sys import time import pytest @@ -8,8 +7,6 @@ psutil = pytest.importorskip("psutil") -pytestmark = pytest.mark.crossbar - def suspend_tree(pid): main = psutil.Process(pid) main.suspend() @@ -22,12 +19,11 @@ def resume_tree(pid): for child in main.children(recursive=True): child.resume() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") -def test_startup(crossbar): +def test_startup(coordinator): pass @pytest.fixture(scope='function') -def place(crossbar): +def place(coordinator): with pexpect.spawn('python -m labgrid.remote.client -p test create') as spawn: spawn.expect(pexpect.EOF) spawn.close() @@ -65,26 +61,24 @@ def place_acquire(place, exporter): assert spawn.exitstatus == 0, spawn.before.strip() def test_connect_error(): - with pexpect.spawn('python -m labgrid.remote.client -x ws://127.0.0.1:20409/ws places') as spawn: + with pexpect.spawn('python -m labgrid.remote.client -x 127.0.0.1:20409 places') as spawn: spawn.expect("Could not connect to coordinator") spawn.expect(pexpect.EOF) spawn.close() assert spawn.exitstatus == 1, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") -def test_connect_timeout(crossbar): - suspend_tree(crossbar.pid) +def test_connect_timeout(coordinator): + suspend_tree(coordinator.pid) try: with pexpect.spawn('python -m labgrid.remote.client places') as spawn: - spawn.expect("connection closed during setup") + spawn.expect("connection attempt timed out before receiving SETTINGS frame") spawn.expect(pexpect.EOF) spawn.close() assert spawn.exitstatus == 1, spawn.before.strip() finally: - resume_tree(crossbar.pid) + resume_tree(coordinator.pid) pass -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_show(place): with pexpect.spawn('python -m labgrid.remote.client -p test show') as spawn: spawn.expect("Place 'test':") @@ -92,7 +86,6 @@ def test_place_show(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_alias(place): with pexpect.spawn('python -m labgrid.remote.client -p test add-alias foo') as spawn: spawn.expect(pexpect.EOF) @@ -104,7 +97,6 @@ def test_place_alias(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_comment(place): with pexpect.spawn('python -m labgrid.remote.client -p test set-comment my comment') as spawn: spawn.expect(pexpect.EOF) @@ -118,7 +110,6 @@ def test_place_comment(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_match(place): with pexpect.spawn('python -m labgrid.remote.client -p test add-match "e1/g1/r1" "e2/g2/*"') as spawn: spawn.expect(pexpect.EOF) @@ -137,7 +128,6 @@ def test_place_match(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_match_duplicates(place): # first given match should succeed, second should be skipped matches = ( @@ -158,7 +148,6 @@ def test_place_match_duplicates(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_acquire(place): with pexpect.spawn('python -m labgrid.remote.client -p test acquire') as spawn: spawn.expect(pexpect.EOF) @@ -176,7 +165,6 @@ def test_place_acquire(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_acquire_enforce(place): with pexpect.spawn('python -m labgrid.remote.client -p test add-match does/not/exist') as spawn: spawn.expect(pexpect.EOF) @@ -200,7 +188,6 @@ def test_place_acquire_enforce(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_acquire_broken(place, exporter): with pexpect.spawn('python -m labgrid.remote.client -p test add-match "*/Broken/*"') as spawn: spawn.expect(pexpect.EOF) @@ -208,7 +195,7 @@ def test_place_acquire_broken(place, exporter): assert spawn.exitstatus == 0, spawn.before.strip() with pexpect.spawn('python -m labgrid.remote.client -p test acquire') as spawn: - spawn.expect('failed to acquire place test') + spawn.expect('Failed to acquire resources for place test') spawn.expect(pexpect.EOF) spawn.close() assert spawn.exitstatus == 1, spawn.before.strip() @@ -220,7 +207,6 @@ def test_place_acquire_broken(place, exporter): print(spawn.before.decode()) assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_release_from(monkeypatch, place, exporter): user = "test-user" host = "test-host" @@ -267,23 +253,20 @@ def test_place_release_from(monkeypatch, place, exporter): before = spawn.before.decode("utf-8").strip() assert user not in before and not host in before, before -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") -def test_place_add_no_name(crossbar): +def test_place_add_no_name(coordinator): with pexpect.spawn('python -m labgrid.remote.client create') as spawn: spawn.expect("missing place name") spawn.expect(pexpect.EOF) spawn.close() assert spawn.exitstatus != 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") -def test_place_del_no_name(crossbar): +def test_place_del_no_name(coordinator): with pexpect.spawn('python -m labgrid.remote.client delete') as spawn: - spawn.expect("deletes require an exact place name") + spawn.expect("name was not a string") spawn.expect(pexpect.EOF) spawn.close() assert spawn.exitstatus != 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_remoteplace_target(place_acquire, tmpdir): from labgrid.environment import Environment p = tmpdir.join("config.yaml") @@ -304,7 +287,6 @@ def test_remoteplace_target(place_acquire, tmpdir): remote_place = t.get_resource("RemotePlace") assert remote_place.tags == {"board": "bar"} -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_remoteplace_target_without_env(request, place_acquire): from labgrid import Target from labgrid.resource import RemotePlace @@ -313,7 +295,6 @@ def test_remoteplace_target_without_env(request, place_acquire): remote_place = RemotePlace(t, name="test") assert remote_place.tags == {"board": "bar"} -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_resource_conflict(place_acquire, tmpdir): with pexpect.spawn('python -m labgrid.remote.client -p test2 create') as spawn: spawn.expect(pexpect.EOF) @@ -335,7 +316,6 @@ def test_resource_conflict(place_acquire, tmpdir): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_reservation(place_acquire, tmpdir): with pexpect.spawn('python -m labgrid.remote.client reserve --shell board=bar name=test') as spawn: spawn.expect(pexpect.EOF) @@ -413,7 +393,93 @@ def test_reservation(place_acquire, tmpdir): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") +def test_resource_acquired_state_on_exporter_restart(monkeypatch, place,start_exporter): + user = "test-user" + host = "test-host" + monkeypatch.setenv("LG_USERNAME", user) + monkeypatch.setenv("LG_HOSTNAME", host) + + exporter = start_exporter() + + # add resource match + with pexpect.spawn('python -m labgrid.remote.client -p test add-match testhost/Testport/NetworkSerialPort') as spawn: + spawn.expect(pexpect.EOF) + spawn.close() + assert spawn.exitstatus == 0, spawn.before.strip() + + # make sure matching resource is found + with pexpect.spawn('python -m labgrid.remote.client -p test show') as spawn: + spawn.expect(pexpect.EOF) + spawn.close() + assert spawn.exitstatus == 0, spawn.before.strip() + assert b"acquired: None" in spawn.before + assert b"Matching resource 'NetworkSerialPort' (testhost/Testport/NetworkSerialPort/NetworkSerialPort)" in spawn.before + + with pexpect.spawn('python -m labgrid.remote.client -p test -v resources') as spawn: + spawn.expect(pexpect.EOF) + spawn.close() + assert spawn.exitstatus == 0, spawn.before.strip() + assert b"Resource 'NetworkSerialPort' (testhost/Testport/NetworkSerialPort[/NetworkSerialPort]):\r\n {'acquired': None," in spawn.before + + # lock place (and its resources) + with pexpect.spawn('python -m labgrid.remote.client -p test acquire') as spawn: + spawn.expect(pexpect.EOF) + spawn.close() + assert spawn.exitstatus == 0, spawn.before.strip() + + with pexpect.spawn('python -m labgrid.remote.client -p test -v resources') as spawn: + spawn.expect(pexpect.EOF) + spawn.close() + assert spawn.exitstatus == 0, spawn.before.strip() + assert b"Resource 'NetworkSerialPort' (testhost/Testport/NetworkSerialPort[/NetworkSerialPort]):\r\n {'acquired': 'test'," in spawn.before + + # stop exporter + exporter.close() + assert not exporter.isalive() + + # start exporter again + exporter = start_exporter() + + # make sure matching resource is still found + with pexpect.spawn('python -m labgrid.remote.client -p test show') as spawn: + spawn.expect(pexpect.EOF) + spawn.close() + assert spawn.exitstatus == 0, spawn.before.strip() + assert f"acquired: {host}/{user}" in spawn.before.decode("utf-8") + assert b"Acquired resource 'NetworkSerialPort' (testhost/Testport/NetworkSerialPort/NetworkSerialPort)" in spawn.before + + # release place + with pexpect.spawn('python -m labgrid.remote.client -p test release') as spawn: + spawn.expect(pexpect.EOF) + spawn.close() + assert spawn.exitstatus == 0, spawn.before.strip() + + with pexpect.spawn('python -m labgrid.remote.client -p test -v resources') as spawn: + spawn.expect(pexpect.EOF) + spawn.close() + assert spawn.exitstatus == 0, spawn.before.strip() + assert b"Resource 'NetworkSerialPort' (testhost/Testport/NetworkSerialPort[/NetworkSerialPort]):\r\n {'acquired': None," in spawn.before + + # make sure matching resource is still found + with pexpect.spawn('python -m labgrid.remote.client -p test show') as spawn: + spawn.expect(pexpect.EOF) + spawn.close() + assert spawn.exitstatus == 0, spawn.before.strip() + assert b"acquired: None" in spawn.before + assert b"Matching resource 'NetworkSerialPort' (testhost/Testport/NetworkSerialPort/NetworkSerialPort)" in spawn.before + + # place should now be acquirable again + with pexpect.spawn('python -m labgrid.remote.client -p test acquire') as spawn: + spawn.expect(pexpect.EOF) + spawn.close() + assert spawn.exitstatus == 0, spawn.before.strip() + + with pexpect.spawn('python -m labgrid.remote.client -p test release') as spawn: + spawn.expect(pexpect.EOF) + spawn.close() + assert spawn.exitstatus == 0, spawn.before.strip() + + def test_exporter_timeout(place, exporter): with pexpect.spawn('python -m labgrid.remote.client resources') as spawn: spawn.expect(pexpect.EOF) @@ -451,7 +517,6 @@ def test_exporter_timeout(place, exporter): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_reservation_custom_config(place, exporter, tmpdir): p = tmpdir.join("config.yaml") p.write( @@ -489,7 +554,6 @@ def test_reservation_custom_config(place, exporter, tmpdir): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_same_name_resources(place, exporter, tmpdir): with pexpect.spawn('python -m labgrid.remote.client -p test add-named-match "testhost/Many/NetworkService" "samename"') as spawn: spawn.expect(pexpect.EOF) diff --git a/tests/test_coordinator.py b/tests/test_coordinator.py new file mode 100644 index 000000000..27c04969b --- /dev/null +++ b/tests/test_coordinator.py @@ -0,0 +1,167 @@ +import pytest + +import grpc +import labgrid.remote.generated.labgrid_coordinator_pb2_grpc as labgrid_coordinator_pb2_grpc +import labgrid.remote.generated.labgrid_coordinator_pb2 as labgrid_coordinator_pb2 + +psutil = pytest.importorskip("psutil") + +@pytest.fixture(scope='function') +def channel_stub(): + import queue + queue = queue.Queue() + + channel = grpc.insecure_channel("127.0.0.1:20408") + stub = labgrid_coordinator_pb2_grpc.CoordinatorStub(channel) + def generate_startup(queue): + msg = labgrid_coordinator_pb2.ClientInMessage() + msg.startup.version = "2.0.0" + msg.startup.name = "testclient" + messages = [ + msg + ] + for msg in messages: + yield msg + while True: + msg = queue.get() + yield msg + queue.task_done() + + stream = stub.ClientStream(generate_startup(queue)) + yield stub + channel.close() + +@pytest.fixture(scope='function') +def coordinator_place(channel_stub): + name = "test" + place = labgrid_coordinator_pb2.AddPlaceRequest(name=name) + res = channel_stub.AddPlace(place) + assert res, f"There was an error: {res}" + return channel_stub + +def test_startup(coordinator): + pass + +def test_coordinator_add_place(coordinator, channel_stub): + name = "test" + place = labgrid_coordinator_pb2.AddPlaceRequest(name=name) + res = channel_stub.AddPlace(place) + assert res, f"There was an error: {res}" + +def test_coordinator_del_place(coordinator, channel_stub): + name = "test" + place = labgrid_coordinator_pb2.AddPlaceRequest(name=name) + res = channel_stub.AddPlace(place) + assert res, f"There was an error: {res}" + place = labgrid_coordinator_pb2.DeletePlaceRequest(name=name) + res = channel_stub.DeletePlace(place) + assert res, f"There was an error: {res}" + +def test_coordinator_get_places(coordinator, channel_stub): + name = "test" + place = labgrid_coordinator_pb2.AddPlaceRequest(name=name) + res = channel_stub.AddPlace(place) + assert res, f"There was an error: {res}" + name = "test2" + place = labgrid_coordinator_pb2.AddPlaceRequest(name=name) + res = channel_stub.AddPlace(place) + assert res, f"There was an error: {res}" + + request = labgrid_coordinator_pb2.GetPlacesRequest() + res = channel_stub.GetPlaces(request) + + from labgrid.remote.common import Place + places = set() + names = set() + for pb2 in res.places: + place = Place.from_pb2(pb2) + places.add(place) + names.add(place.name) + + assert len(places) == 2, f"Returned places not two: {places}" + assert set(names) == {"test", "test2"}, f"There was an error: {res}" + +def test_coordinator_exporter_session(coordinator, channel_stub): + import queue + queue = queue.Queue() + + def generate_startup(queue): + msg = labgrid_coordinator_pb2.ExporterInMessage() + msg.startup.version = "2.0.0" + msg.startup.name = "testporter" + messages = [ + msg + ] + for msg in messages: + yield msg + while True: + msg = queue.get() + yield msg + queue.task_done() + + coordinator = channel_stub.ExporterStream(generate_startup(queue), wait_for_ready=True) + +def test_coordinator_place_acquire(coordinator, coordinator_place): + stub = coordinator_place + res = stub.AcquirePlace(labgrid_coordinator_pb2.AcquirePlaceRequest(placename="test")) + assert res + +def test_coordinator_place_acquire_release(coordinator, coordinator_place): + stub = coordinator_place + res = stub.AcquirePlace(labgrid_coordinator_pb2.AcquirePlaceRequest(placename="test")) + assert res + res = stub.ReleasePlace(labgrid_coordinator_pb2.ReleasePlaceRequest(placename="test")) + assert res + +def test_coordinator_place_add_alias(coordinator, coordinator_place): + stub = coordinator_place + res = stub.AddPlaceAlias(labgrid_coordinator_pb2.AddPlaceAliasRequest(placename="test", alias="testalias")) + assert res + +def test_coordinator_place_add_remove_alias(coordinator, coordinator_place): + stub = coordinator_place + res = stub.AddPlaceAlias(labgrid_coordinator_pb2.AddPlaceAliasRequest(placename="test", alias="testalias")) + assert res + res = stub.DeletePlaceAlias(labgrid_coordinator_pb2.DeletePlaceAliasRequest(placename="test", alias="testalias")) + assert res + +def test_coordinator_place_set_tags(coordinator, coordinator_place): + stub = coordinator_place + res = stub.SetPlaceTags(labgrid_coordinator_pb2.SetPlaceTagsRequest(placename="test", tags={"one": "two"})) + assert res + +def test_coordinator_place_set_comment(coordinator, coordinator_place): + stub = coordinator_place + res = stub.SetPlaceComment(labgrid_coordinator_pb2.SetPlaceCommentRequest(placename="test", comment="testcomment")) + assert res + +def test_coordinator_place_add_match(coordinator, coordinator_place): + stub = coordinator_place + res = stub.AddPlaceMatch(labgrid_coordinator_pb2.AddPlaceMatchRequest(placename="test", pattern="this/test/pattern")) + assert res + +def test_coordinator_place_add_delete_match(coordinator, coordinator_place): + stub = coordinator_place + res = stub.AddPlaceMatch(labgrid_coordinator_pb2.AddPlaceMatchRequest(placename="test", pattern="this/test/pattern")) + assert res + res = stub.DeletePlaceMatch(labgrid_coordinator_pb2.DeletePlaceMatchRequest(placename="test", pattern="this/test/pattern")) + assert res + +def test_coordinator_place_allow(coordinator, coordinator_place): + stub = coordinator_place + res = stub.AcquirePlace(labgrid_coordinator_pb2.AcquirePlaceRequest(placename="test")) + assert res + res = stub.AllowPlace(labgrid_coordinator_pb2.AllowPlaceRequest(placename="test", user="othertest")) + assert res + +def test_coordinator_create_reservation(coordinator, coordinator_place): + tags = {"board": "test"} + stub = coordinator_place + res = stub.SetPlaceTags(labgrid_coordinator_pb2.SetPlaceTagsRequest(placename="test", tags=tags)) + assert res + res = stub.CreateReservation(labgrid_coordinator_pb2.CreateReservationRequest(filters={ + "main": labgrid_coordinator_pb2.Reservation.Filter(filter={"board": "test"}), + }, prio=1.0)) + assert res + res: labgrid_coordinator_pb2.CreateReservationResponse + assert len(res.reservation.token) > 0 diff --git a/tests/test_fixtures.py b/tests/test_fixtures.py index a1cd7600c..4ed9fba39 100644 --- a/tests/test_fixtures.py +++ b/tests/test_fixtures.py @@ -72,12 +72,12 @@ def test_env_with_junit(short_env, short_test, tmpdir): def test_help(short_test): with pexpect.spawn(f'pytest --help {short_test}') as spawn: spawn.expect(pexpect.EOF) - assert b'--lg-coordinator=CROSSBAR_URL' in spawn.before + assert b'--lg-coordinator=COORDINATOR_ADDRESS' in spawn.before spawn.close() assert spawn.exitstatus == 0 def test_help_coordinator(short_test): - with pexpect.spawn(f'pytest --lg-coordinator=ws://127.0.0.1:20408/ws --help {short_test}') as spawn: + with pexpect.spawn(f'pytest --lg-coordinator=127.0.0.1:20408 --help {short_test}') as spawn: spawn.expect(pexpect.EOF) spawn.close() assert spawn.exitstatus == 0 diff --git a/tests/test_pb2.py b/tests/test_pb2.py new file mode 100644 index 000000000..d1340ab45 --- /dev/null +++ b/tests/test_pb2.py @@ -0,0 +1,172 @@ +from labgrid.remote.common import Place, ResourceMatch, Reservation, set_map_from_dict, build_dict_from_map +import labgrid.remote.generated.labgrid_coordinator_pb2 as labgrid_coordinator_pb2 + +def test_place_as_pb2(): + place = Place(name="testing-place") + pb2 = place.as_pb2() + assert pb2.name == "testing-place" + assert pb2.created == place.created + assert pb2.changed == place.changed + +def test_place_from_pb2(): + place_start = Place(name="testing-place", comment="such-comment") + pb2 = place_start.as_pb2() + assert pb2.name == "testing-place" + place_new = Place.from_pb2(pb2) + assert place_new.name == "testing-place" + assert place_new.name == place_start.name + assert place_new.comment == place_start.comment + assert place_new.tags == place_start.tags + assert place_new.matches == place_start.matches + assert place_new.acquired == place_start.acquired + assert place_new.acquired_resources == place_start.acquired_resources + assert place_new.allowed == place_start.allowed + assert place_new.created == place_start.created + assert place_new.changed == place_start.changed + assert place_new.reservation == place_start.reservation + +def test_from_pb2_tags(): + tags = {"some": "test", "more": "values"} + place_start = Place(name="testing-place", tags=tags) + pb2 = place_start.as_pb2() + assert pb2.name == "testing-place", f"PB2 has wrong name: {pb2}" + assert pb2.tags is not None, f"PB2 has no tags field: {pb2}" + place_new = Place.from_pb2(pb2) + assert place_new.name == "testing-place" + assert place_new.tags == place_start.tags + assert place_new.tags == tags + +def test_from_pb2_matches(): + rm = ResourceMatch("such", "test", "match") + place_start = Place(name="testing-place", matches=[rm]) + pb2 = place_start.as_pb2() + assert pb2.name == "testing-place", f"PB2 has wrong name: {pb2}" + assert pb2.tags is not None, f"PB2 has no tags field: {pb2}" + place_new = Place.from_pb2(pb2) + assert place_new.name == "testing-place" + assert place_new.tags == place_start.tags + assert place_new.matches == place_start.matches + +def test_from_pb2_tags_deepcopy(): + # Used by the RemotePlaceManager + tags = {"some": "test", "more": "values"} + place_start = Place(name="testing-place", tags=tags) + pb2 = place_start.as_pb2() + place_new = Place.from_pb2(pb2) + import copy + tags_copy = copy.deepcopy(place_new.tags) + +def test_place_as_pb2_copy_with_match(): + tags = {"some": "test", "more": "values"} + # Used by the RemotePlaceManager + place_start = Place(name="testing-place", tags=tags, comment="Hello", aliases={"some": "alias"}, matches=[ResourceMatch("testporter","somegroup","someclass")]) + out = labgrid_coordinator_pb2.ClientOutMessage() + + update_response = labgrid_coordinator_pb2.UpdateResponse() + update_response.place.CopyFrom(place_start.as_pb2()) + + out.updates.append(update_response) + +def test_match_as_from_pb2(): + rms = ResourceMatch("*", "somegroup", "someclass") + pb2 = rms.as_pb2() + assert pb2 + rme = ResourceMatch.from_pb2(pb2) + assert rms == rme + +def test_reservation_as_pb2(): + reservation = Reservation( + owner="test", + filters={ + "main": {"some": "filter"}, + }, + allocations={ + "main": ["the-place"], + }, + ) + pb2 = reservation.as_pb2() + assert pb2.owner == "test" + assert pb2.token == reservation.token + assert pb2.state == reservation.state.value + assert pb2.filters["main"].filter == {"some": "filter"} + assert pb2.created == reservation.created + assert pb2.timeout == reservation.timeout + +def test_reservation_as_from_pb2(): + resold = Reservation( + owner="test", + filters={ + "main": {"some": "filter"}, + }, + allocations={ + "main": ["the-place"], + }, + ) + pb2 = resold.as_pb2() + assert pb2.owner == resold.owner + assert pb2.token == resold.token + assert pb2.state == resold.state.value + assert pb2.filters["main"].filter == {"some": "filter"} + assert pb2.created == resold.created + assert pb2.timeout == resold.timeout + + resnew = Reservation.from_pb2(pb2) + + assert resnew.owner == resold.owner + assert resnew.token == resold.token + assert resnew.state == resold.state + assert resnew.filters["main"] == resold.filters["main"] + assert resnew.created == resold.created + assert resnew.timeout == resold.timeout + +def test_resource_dict(): + params = { + 'host': 'foo', + 'model_id': 2277, + 'vendor_id': 1133, + 'path': None, + } + + resource = labgrid_coordinator_pb2.Resource() + set_map_from_dict(resource.params, params) + bm = resource.SerializeToString() + + resource = labgrid_coordinator_pb2.Resource() + resource.ParseFromString(bm) + decoded = build_dict_from_map(resource.params) + + assert params == decoded + +def test_map_serialize(benchmark): + params = { + 'host': 'foo', + 'model_id': 2277, + 'vendor_id': 1133, + 'path': None, + } + + def run(): + resource = labgrid_coordinator_pb2.Resource() + set_map_from_dict(resource.params, params) + bm = resource.SerializeToString() + + benchmark(run) + +def test_map_deser(benchmark): + params = { + 'host': 'foo', + 'model_id': 2277, + 'vendor_id': 1133, + 'path': None, + } + + resource = labgrid_coordinator_pb2.Resource() + set_map_from_dict(resource.params, params) + bm = resource.SerializeToString() + + def run(): + resource = labgrid_coordinator_pb2.Resource() + resource.ParseFromString(bm) + decoded = build_dict_from_map(resource.params) + + benchmark(run) diff --git a/tests/test_remote.py b/tests/test_remote.py index 4f803b043..80f54430e 100644 --- a/tests/test_remote.py +++ b/tests/test_remote.py @@ -1,8 +1,5 @@ -import pytest import pexpect -pytest.importorskip('autobahn') - def test_client_help(): with pexpect.spawn('python -m labgrid.remote.client --help') as spawn: spawn.expect('usage') From d7ac3e7faff061758e82d4bf5ac1ca7f6ff637a9 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 2 Aug 2024 23:05:57 +0200 Subject: [PATCH 02/38] remote/exporter: make coordinator port default to 20408 The coordinator binds to port 20408 by default. The client tries to connect to that port if no port is specified via --cordinator/-x, LG_COORDINATOR or via coordinator_address option in the environment config. Only the exporter does not yet default to 20408. Change that by appending the default port 20408 to the address given via --coordinator/-c or LG_COORDINATOR if no port was specified. Closes #1429 Signed-off-by: Bastian Krause --- labgrid/remote/exporter.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/labgrid/remote/exporter.py b/labgrid/remote/exporter.py index 78b8ca606..019d39f96 100755 --- a/labgrid/remote/exporter.py +++ b/labgrid/remote/exporter.py @@ -10,6 +10,7 @@ import traceback import shutil import subprocess +from urllib.parse import urlsplit import warnings from pathlib import Path from typing import Dict, Type @@ -782,6 +783,10 @@ def __init__(self, config) -> None: self.hostname = config["hostname"] self.isolated = config["isolated"] + # default to port 20408 if not specified + if urlsplit(f"//{config['coordinator']}").port is None: + config["coordinator"] += ":20408" + self.channel = grpc.aio.insecure_channel(config["coordinator"]) self.stub = labgrid_coordinator_pb2_grpc.CoordinatorStub(self.channel) self.out_queue = asyncio.Queue() From db9a326830277891453fb23f297511c1c42e2771 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Wed, 3 Jul 2024 15:42:53 +0200 Subject: [PATCH 03/38] remote/coordinator: catch exceptions from poll steps separately If one of these fails, the others should not be skipped. Signed-off-by: Jan Luebbe --- labgrid/remote/coordinator.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/labgrid/remote/coordinator.py b/labgrid/remote/coordinator.py index b4d4cf27a..336b6409f 100644 --- a/labgrid/remote/coordinator.py +++ b/labgrid/remote/coordinator.py @@ -195,10 +195,16 @@ def __init__(self) -> None: async def _poll_step(self): # save changes - if self.save_scheduled: - await self.save() + try: + if self.save_scheduled: + await self.save() + except Exception: # pylint: disable=broad-except + traceback.print_exc() # update reservations - self.schedule_reservations() + try: + self.schedule_reservations() + except Exception: # pylint: disable=broad-except + traceback.print_exc() async def poll(self): loop = asyncio.get_event_loop() From 829acab0647205af970298ba7fd65101c25842a9 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Wed, 3 Jul 2024 12:47:53 +0200 Subject: [PATCH 04/38] remote/coordinator: simplify _update_acquired_places() _update_acquired_places() is only called when resources are added by the exporter or when they are removed after it has disconnected. So we never want to call back to the exporter for removed resources. Signed-off-by: Jan Luebbe --- labgrid/remote/coordinator.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/labgrid/remote/coordinator.py b/labgrid/remote/coordinator.py index 336b6409f..d825a576c 100644 --- a/labgrid/remote/coordinator.py +++ b/labgrid/remote/coordinator.py @@ -334,7 +334,7 @@ def get_exporter_by_name(self, name): if exporter.name == name: return exporter - async def _update_acquired_places(self, action, resource, callback=True): + async def _update_acquired_places(self, action, resource): """Update acquired places when resources are added or removed.""" if action not in [Action.ADD, Action.DEL]: return # currently nothing needed for Action.UPD @@ -357,7 +357,9 @@ async def _update_acquired_places(self, action, resource, callback=True): self._publish_place(place) else: for place in places: - await self._release_resources(place, [resource], callback=callback) + # resources only disappear when exporters disconnect, so we + # can't call back to the exporter + await self._release_resources(place, [resource], callback=False) self._publish_place(place) def _publish_place(self, place): @@ -452,7 +454,7 @@ async def request_task(): for groupname, group in session.groups.items(): for resourcename in group.copy(): action, resource = session.set_resource(groupname, resourcename, None) - await self._update_acquired_places(action, resource, callback=False) + await self._update_acquired_places(action, resource) logging.debug("exporter aborted %s, cancelled: %s", context.peer(), context.cancelled()) From 2ca13299d429b27c5e2c60e74df92dbc3ed02b87 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Wed, 3 Jul 2024 15:17:28 +0200 Subject: [PATCH 05/38] remote/coordinator: split out code to acquire a resource on the exporter In future commits, more code will call the newly introduced _acquire_resource() method. Signed-off-by: Jan Luebbe --- labgrid/remote/coordinator.py | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/labgrid/remote/coordinator.py b/labgrid/remote/coordinator.py index d825a576c..80a9a6832 100644 --- a/labgrid/remote/coordinator.py +++ b/labgrid/remote/coordinator.py @@ -599,6 +599,21 @@ async def DeletePlaceMatch(self, request, context): self.save_later() return labgrid_coordinator_pb2.DeletePlaceMatchResponse() + async def _acquire_resource(self, place, resource): + assert self.lock.locked() + + # this triggers an update from the exporter which is published + # to the clients + request = labgrid_coordinator_pb2.ExporterSetAcquiredRequest() + request.group_name = resource.path[1] + request.resource_name = resource.path[3] + request.place_name = place.name + cmd = ExporterCommand(request) + self.get_exporter_by_name(resource.path[0]).queue.put_nowait(cmd) + await cmd.wait() + if not cmd.response.success: + raise ExporterError("failed to acquire {resource}") + async def _acquire_resources(self, place, resources): assert self.lock.locked() @@ -612,17 +627,7 @@ async def _acquire_resources(self, place, resources): acquired = [] try: for resource in resources: - # this triggers an update from the exporter which is published - # to the clients - request = labgrid_coordinator_pb2.ExporterSetAcquiredRequest() - request.group_name = resource.path[1] - request.resource_name = resource.path[3] - request.place_name = place.name - cmd = ExporterCommand(request) - self.get_exporter_by_name(resource.path[0]).queue.put_nowait(cmd) - await cmd.wait() - if not cmd.response.success: - raise ExporterError("failed to acquire {resource}") + await self._acquire_resource(place, resource) acquired.append(resource) except Exception: logging.exception("failed to acquire %s", resource) From 33126fa59e6687ff5ef7a3dc8915755f91094266 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Wed, 3 Jul 2024 15:40:06 +0200 Subject: [PATCH 06/38] remote/client: explicitly handle orphaned resources in the client In cases when an exporter used by an acquired place disconnects, the place still references those orphaned resources. If the exporter reconnects, they are reaquired by the coordinator automatically. Signed-off-by: Jan Luebbe --- labgrid/remote/client.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index a78759fb7..8ce447d7a 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -472,7 +472,11 @@ async def print_place(self): name = resource_name if match.rename: name = match.rename - resource = self.resources[exporter][group_name][resource_name] + try: + resource = self.resources[exporter][group_name][resource_name] + except KeyError: + print(f"Orphaned resource '{name}' ({exporter}/{group_name}/{cls}/{resource_name})") + continue print(f"Acquired resource '{name}' ({exporter}/{group_name}/{resource.cls}/{resource_name}):") # pylint: disable=line-too-long print(indent(pformat(resource.asdict()), prefix=" ")) assert resource.cls == cls @@ -747,7 +751,11 @@ def get_target_resources(self, place): name = resource_name if match.rename: name = match.rename - resources[(name, cls)] = self.resources[exporter][group_name][resource_name] + try: + resources[(name, cls)] = self.resources[exporter][group_name][resource_name] + except KeyError: + raise ServerError(f"place {place} has an orphaned resource (exporter {exporter} disconnected?)") + return resources def get_target_config(self, place): From fc7b8022b64b77c83bb123964c78971657cfca60 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Wed, 3 Jul 2024 15:44:13 +0200 Subject: [PATCH 07/38] remote/coordinator: refactor handling of orphaned resources If an exporter disconnects, corresponding resources that are acquired become orphaned. If the exporter reconnects, the resources are not locked. Requiring the user to unlock and lock the corresponding place again is inconvenient and would change the previous behavior. Prior to this commit, reacquiring did not work due to a logic error: ExporterCommand.complete() und ExporterCommand.wait() are both called in ExporterStream.request_task(). The blocking wait() prevents further processing of exporter messages. That also means responses for ExporterSetAcquiredRequest are not handled anymore. This leads to a state where resources cannot be acquired/released by their place anymore. Ultimatively, this leads to an inconsistent state requiring a coordinator restart. Refactor handling of these orphaned resources to solve this. Signed-off-by: Jan Luebbe --- labgrid/remote/coordinator.py | 97 ++++++++++++++++++++++------------- 1 file changed, 61 insertions(+), 36 deletions(-) diff --git a/labgrid/remote/coordinator.py b/labgrid/remote/coordinator.py index 80a9a6832..961ea4ea9 100644 --- a/labgrid/remote/coordinator.py +++ b/labgrid/remote/coordinator.py @@ -54,7 +54,7 @@ def set_resource(self, groupname, resourcename, resource): """This is called when Exporters update resources or when they disconnect.""" logging.info("set_resource %s %s %s", groupname, resourcename, resource) group = self.groups.setdefault(groupname, {}) - old = group.get(resourcename) + old: ResourceImport = group.get(resourcename) if resource is not None: new = ResourceImport( data=ResourceImport.data_from_pb2(resource), path=(self.name, groupname, resource.cls, resourcename) @@ -66,6 +66,8 @@ def set_resource(self, groupname, resourcename, resource): group[resourcename] = new else: new = None + if old.acquired: + old.orphaned = True try: del group[resourcename] except KeyError: @@ -150,6 +152,7 @@ class ResourceImport(ResourceEntry): """ path = attr.ib(kw_only=True, validator=attr.validators.instance_of(tuple)) + orphaned = attr.ib(init=False, default=False, validator=attr.validators.instance_of(bool)) def locked(func): @@ -181,7 +184,7 @@ class ExporterError(Exception): class Coordinator(labgrid_coordinator_pb2_grpc.CoordinatorServicer): def __init__(self) -> None: - self.places = {} + self.places: dict[str, Place] = {} self.reservations = {} self.poll_task = None self.save_scheduled = False @@ -200,6 +203,12 @@ async def _poll_step(self): await self.save() except Exception: # pylint: disable=broad-except traceback.print_exc() + # try to re-acquire orphaned resources + try: + async with self.lock: + await self._reacquire_orphaned_resources() + except Exception: # pylint: disable=broad-except + traceback.print_exc() # update reservations try: self.schedule_reservations() @@ -334,34 +343,6 @@ def get_exporter_by_name(self, name): if exporter.name == name: return exporter - async def _update_acquired_places(self, action, resource): - """Update acquired places when resources are added or removed.""" - if action not in [Action.ADD, Action.DEL]: - return # currently nothing needed for Action.UPD - - # collect affected places - places = [] - for place in self.places.values(): - if not place.acquired: - continue - if not place.hasmatch(resource.path): - continue - places.append(place) - - if action is Action.ADD: - # only add if there is no conflict - if len(places) != 1: - return - place = places[0] - await self._acquire_resources(place, [resource]) - self._publish_place(place) - else: - for place in places: - # resources only disappear when exporters disconnect, so we - # can't call back to the exporter - await self._release_resources(place, [resource], callback=False) - self._publish_place(place) - def _publish_place(self, place): msg = labgrid_coordinator_pb2.ClientOutMessage() msg.updates.add().place.CopyFrom(place.as_pb2()) @@ -411,15 +392,12 @@ async def request_task(): logging.debug("Received startup from %s with %s", name, version) elif kind == "resource": logging.debug("Received resource from %s with %s", name, in_msg.resource) - action, resource = session.set_resource( + action, _ = session.set_resource( in_msg.resource.path.group_name, in_msg.resource.path.resource_name, in_msg.resource ) if action is Action.ADD: async with self.lock: self._add_default_place(in_msg.resource.path.group_name) - if action in (Action.ADD, Action.DEL): - async with self.lock: - await self._update_acquired_places(action, resource) self.save_later() else: logging.warning("received unknown kind %s from exporter %s (version %s)", kind, name, version) @@ -453,8 +431,7 @@ async def request_task(): for groupname, group in session.groups.items(): for resourcename in group.copy(): - action, resource = session.set_resource(groupname, resourcename, None) - await self._update_acquired_places(action, resource) + session.set_resource(groupname, resourcename, None) logging.debug("exporter aborted %s, cancelled: %s", context.peer(), context.cancelled()) @@ -652,6 +629,8 @@ async def _release_resources(self, place, resources, callback=True): pass for resource in resources: + if resource.orphaned: + continue try: # this triggers an update from the exporter which is published # to the clients @@ -673,6 +652,48 @@ async def _release_resources(self, place, resources, callback=True): except: logging.exception("failed to publish released resource %s", resource) + async def _reacquire_orphaned_resources(self): + assert self.lock.locked() + + for place in self.places.values(): + changed = False + + for idx, resource in enumerate(place.acquired_resources): + if not resource.orphaned: + continue + + # is the exporter connected again? + exporter = self.get_exporter_by_name(resource.path[0]) + if not exporter: + continue + + # does the resource exist again? + try: + new_resource = exporter.groups[resource.path[1]][resource.path[3]] + except KeyError: + continue + + if new_resource.acquired: + # this should only happen when resources become broken + logging.debug("ignoring acquired/broken resource %s for place %s", new_resource, place.name) + continue + + try: + await self._acquire_resource(place, new_resource) + place.acquired_resources[idx] = new_resource + except Exception: + logging.exception( + "failed to reacquire orphaned resource %s for place %s", new_resource, place.name + ) + break + + logging.info("reacquired orphaned resource %s for place %s", new_resource, place.name) + changed = True + + if changed: + self._publish_place(place) + self.save_later() + @locked async def AcquirePlace(self, request, context): peer = context.peer() @@ -693,6 +714,10 @@ async def AcquirePlace(self, request, context): res = self.reservations[place.reservation] if not res.owner == username: await context.abort(grpc.StatusCode.PERMISSION_DENIED, f"Place {name} was not reserved for {username}") + + # First try to reacquire orphaned resources to avoid conflicts. + await self._reacquire_orphaned_resources() + # FIXME use the session object instead? or something else which # survives disconnecting clients? place.acquired = username From bef539e94f0e9023b1654ae324362a1714274c07 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Wed, 3 Jul 2024 18:04:38 +0200 Subject: [PATCH 08/38] remote: rework grpc timeout configuration It's not really clear how keepalive_timeout_ms and the ping_timeout_ms experiment should interact, so we set them both. Signed-off-by: Jan Luebbe --- labgrid/remote/client.py | 16 +++++++++++++++- labgrid/remote/coordinator.py | 23 +++++++++++++---------- labgrid/remote/exporter.py | 16 +++++++++++++++- 3 files changed, 43 insertions(+), 12 deletions(-) diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index 8ce447d7a..3541aacb5 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -86,7 +86,21 @@ def __attrs_post_init__(self): """Actions which are executed if a connection is successfully opened.""" self.stopping = asyncio.Event() - self.channel = grpc.aio.insecure_channel(self.address) + # It seems since https://github.com/grpc/grpc/pull/34647, the + # ping_timeout_ms default of 60 seconds overrides keepalive_timeout_ms, + # so set it as well. + # Use GRPC_VERBOSITY=DEBUG GRPC_TRACE=http_keepalive for debugging. + channel_options = [ + ("grpc.keepalive_time_ms", 7500), # 7.5 seconds + ("grpc.keepalive_timeout_ms", 10000), # 10 seconds + ("grpc.http2.ping_timeout_ms", 10000), # 10 seconds + ("grpc.http2.max_pings_without_data", 0), # no limit + ] + + self.channel = grpc.aio.insecure_channel( + target=self.address, + options=channel_options, + ) self.stub = labgrid_coordinator_pb2_grpc.CoordinatorStub(self.channel) self.out_queue = asyncio.Queue() diff --git a/labgrid/remote/coordinator.py b/labgrid/remote/coordinator.py index 961ea4ea9..47c58296b 100644 --- a/labgrid/remote/coordinator.py +++ b/labgrid/remote/coordinator.py @@ -959,17 +959,20 @@ async def GetReservations(self, request: labgrid_coordinator_pb2.GetReservations async def serve(listen, cleanup) -> None: + # It seems since https://github.com/grpc/grpc/pull/34647, the + # ping_timeout_ms default of 60 seconds overrides keepalive_timeout_ms, + # so set it as well. + # Use GRPC_VERBOSITY=DEBUG GRPC_TRACE=http_keepalive for debugging. + channel_options = [ + ("grpc.keepalive_time_ms", 10000), # 10 seconds + ("grpc.keepalive_timeout_ms", 10000), # 10 seconds + ("grpc.http2.ping_timeout_ms", 15000), # 15 seconds + ("grpc.http2.min_ping_interval_without_data_ms", 5000), + ("grpc.http2.max_pings_without_data", 0), # no limit + ("grpc.keepalive_permit_without_calls", 1), # allow keepalive pings even when there are no calls + ] server = grpc.aio.server( - options=[ - ("grpc.keepalive_time_ms", 30000), # Send keepalive ping every 30 seconds - ( - "grpc.keepalive_timeout_ms", - 10000, - ), # Wait 10 seconds for ping ack before considering the connection dead - ("grpc.http2.min_time_between_pings_ms", 15000), # Minimum amount of time between pings - ("grpc.http2.max_pings_without_data", 0), # Allow pings even without active streams - ("grpc.keepalive_permit_without_calls", 1), # Allow keepalive pings even when there are no calls - ], + options=channel_options, ) coordinator = Coordinator() labgrid_coordinator_pb2_grpc.add_CoordinatorServicer_to_server(coordinator, server) diff --git a/labgrid/remote/exporter.py b/labgrid/remote/exporter.py index 019d39f96..8187c4e14 100755 --- a/labgrid/remote/exporter.py +++ b/labgrid/remote/exporter.py @@ -783,11 +783,25 @@ def __init__(self, config) -> None: self.hostname = config["hostname"] self.isolated = config["isolated"] + # It seems since https://github.com/grpc/grpc/pull/34647, the + # ping_timeout_ms default of 60 seconds overrides keepalive_timeout_ms, + # so set it as well. + # Use GRPC_VERBOSITY=DEBUG GRPC_TRACE=http_keepalive for debugging. + channel_options = [ + ("grpc.keepalive_time_ms", 7500), # 7.5 seconds + ("grpc.keepalive_timeout_ms", 10000), # 10 seconds + ("grpc.http2.ping_timeout_ms", 10000), # 10 seconds + ("grpc.http2.max_pings_without_data", 0), # no limit + ] + # default to port 20408 if not specified if urlsplit(f"//{config['coordinator']}").port is None: config["coordinator"] += ":20408" - self.channel = grpc.aio.insecure_channel(config["coordinator"]) + self.channel = grpc.aio.insecure_channel( + target=config["coordinator"], + options=channel_options, + ) self.stub = labgrid_coordinator_pb2_grpc.CoordinatorStub(self.channel) self.out_queue = asyncio.Queue() self.pump_task = None From f32d41833eff192287def83516e4492b8441df44 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Wed, 3 Jul 2024 19:50:11 +0200 Subject: [PATCH 09/38] remote/coordinator: disable so_reuseport We only want to run one instance of the coordinator, so enabling so_reuseport can lead to confusing situations where multiple coordinator are running in parallel. Signed-off-by: Jan Luebbe --- labgrid/remote/coordinator.py | 1 + 1 file changed, 1 insertion(+) diff --git a/labgrid/remote/coordinator.py b/labgrid/remote/coordinator.py index 47c58296b..f7d97b86a 100644 --- a/labgrid/remote/coordinator.py +++ b/labgrid/remote/coordinator.py @@ -964,6 +964,7 @@ async def serve(listen, cleanup) -> None: # so set it as well. # Use GRPC_VERBOSITY=DEBUG GRPC_TRACE=http_keepalive for debugging. channel_options = [ + ("grpc.so_reuseport", 0), # no load balancing ("grpc.keepalive_time_ms", 10000), # 10 seconds ("grpc.keepalive_timeout_ms", 10000), # 10 seconds ("grpc.http2.ping_timeout_ms", 15000), # 15 seconds From 9ad28c334a468603f7fb6815c1792d0bc615935b Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 1 Aug 2024 14:47:48 +0200 Subject: [PATCH 10/38] remote/coordinator: store event loop in loop attribute Instead of using asyncio.get_event_loop() in various places in the Coordinator class, store it in an attribute called loop. Signed-off-by: Bastian Krause --- labgrid/remote/coordinator.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/labgrid/remote/coordinator.py b/labgrid/remote/coordinator.py index f7d97b86a..635c7c5c9 100644 --- a/labgrid/remote/coordinator.py +++ b/labgrid/remote/coordinator.py @@ -194,7 +194,8 @@ def __init__(self) -> None: self.clients: dict[str, ClientSession] = {} self.load() - self.poll_task = asyncio.get_event_loop().create_task(self.poll()) + self.loop = asyncio.get_event_loop() + self.poll_task = self.loop.create_task(self.poll()) async def _poll_step(self): # save changes @@ -216,8 +217,7 @@ async def _poll_step(self): traceback.print_exc() async def poll(self): - loop = asyncio.get_event_loop() - while not loop.is_closed(): + while not self.loop.is_closed(): try: await asyncio.sleep(15.0) await self._poll_step() @@ -247,11 +247,10 @@ async def save(self): places = yaml.dump(places) places = places.encode() - loop = asyncio.get_event_loop() logging.debug("Awaiting resources") - await loop.run_in_executor(None, atomic_replace, "resources.yaml", resources) + await self.loop.run_in_executor(None, atomic_replace, "resources.yaml", resources) logging.debug("Awaiting places") - await loop.run_in_executor(None, atomic_replace, "places.yaml", places) + await self.loop.run_in_executor(None, atomic_replace, "places.yaml", places) def load(self): try: @@ -310,7 +309,7 @@ async def request_task(): except Exception: logging.exception("error in client message handler") - runnning_request_task = asyncio.get_event_loop().create_task(request_task()) + runnning_request_task = self.loop.create_task(request_task()) try: async for out_msg in queue_as_aiter(out_msg_queue): @@ -406,7 +405,7 @@ async def request_task(): except Exception: logging.exception("error in exporter message handler") - runnning_request_task = asyncio.get_event_loop().create_task(request_task()) + runnning_request_task = self.loop.create_task(request_task()) try: async for cmd in queue_as_aiter(command_queue): From 1bd0f99a3a6235050317820dd6b25c421a500406 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 23 Jul 2024 15:14:17 +0200 Subject: [PATCH 11/38] remote/client: drop error check already done by argparser Signed-off-by: Bastian Krause --- labgrid/remote/client.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index 3541aacb5..cec377c54 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -646,8 +646,6 @@ async def add_named_match(self): raise UserError(f"invalid pattern format '{pattern}' (use 'exporter/group/cls/name')") if "*" in pattern: raise UserError(f"invalid pattern '{pattern}' ('*' not allowed for named matches)") - if not name: - raise UserError(f"invalid name '{name}'") request = labgrid_coordinator_pb2.AddPlaceMatchRequest(placename=place.name, pattern=pattern, rename=name) From 5679fa579352b91113de0f3a04db1603e9c828c2 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 8 Aug 2024 15:53:58 +0200 Subject: [PATCH 12/38] remote/client: replace manual place checks with helper method calls Signed-off-by: Bastian Krause --- labgrid/remote/client.py | 5 +---- tests/test_client.py | 2 +- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index cec377c54..28488fbc8 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -669,10 +669,7 @@ def check_matches(self, place): async def acquire(self): """Acquire a place, marking it unavailable for other clients""" - place = self.get_place() - if place.acquired: - raise UserError(f"place {place.name} is already acquired by {place.acquired}") - + place = self.get_idle_place() if not self.args.allow_unmatched: self.check_matches(place) diff --git a/tests/test_client.py b/tests/test_client.py index d82c0a8ce..14b855d1e 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -262,7 +262,7 @@ def test_place_add_no_name(coordinator): def test_place_del_no_name(coordinator): with pexpect.spawn('python -m labgrid.remote.client delete') as spawn: - spawn.expect("name was not a string") + spawn.expect("place pattern not specified") spawn.expect(pexpect.EOF) spawn.close() assert spawn.exitstatus != 0, spawn.before.strip() From 42305915e79722e2c59914ba8d3a504afef8d8cb Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 23 Jul 2024 14:10:47 +0200 Subject: [PATCH 13/38] remote/client: drop redundant checks These checks are already performed by get_idle_place(). Signed-off-by: Bastian Krause --- labgrid/remote/client.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index 28488fbc8..bd3526c17 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -597,8 +597,6 @@ async def add_match(self): """Add a match for a place, making fuzzy matching available to the client""" place = self.get_idle_place() - if place.acquired: - raise UserError(f"can not change acquired place {place.name}") for pattern in self.args.patterns: if not 2 <= pattern.count("/") <= 3: raise UserError(f"invalid pattern format '{pattern}' (use 'exporter/group/cls/name')") @@ -617,8 +615,6 @@ async def add_match(self): async def del_match(self): """Delete a match for a place""" place = self.get_idle_place() - if place.acquired: - raise UserError(f"can not change acquired place {place.name}") for pattern in self.args.patterns: if not 2 <= pattern.count("/") <= 3: raise UserError(f"invalid pattern format '{pattern}' (use 'exporter/group/cls/name')") @@ -638,8 +634,6 @@ async def add_named_match(self): Fuzzy matching is not allowed to avoid accidental names conflicts.""" place = self.get_idle_place() - if place.acquired: - raise UserError(f"can not change acquired place {place.name}") pattern = self.args.pattern name = self.args.name if not 2 <= pattern.count("/") <= 3: From 624667c39740d9183ff7383e0fc9d1da8f47e8fb Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 23 Jul 2024 14:12:02 +0200 Subject: [PATCH 14/38] remote/client: be more explicit about expected place acquired state in allow()/release_from() Signed-off-by: Bastian Krause --- labgrid/remote/client.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index bd3526c17..63fdfa7af 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -719,6 +719,8 @@ async def release(self): async def release_from(self): """Release a place, but only if acquired by a specific user""" place = self.get_place() + if not place.acquired: + raise UserError(f"place {place.name} is not acquired") request = labgrid_coordinator_pb2.ReleasePlaceRequest(placename=place.name, fromuser=self.args.acquired) @@ -732,7 +734,7 @@ async def release_from(self): async def allow(self): """Allow another use access to a previously acquired place""" - place = self.get_place() + place = self.get_acquired_place() if "/" not in self.args.user: raise UserError(f"user {self.args.user} must be in / format") request = labgrid_coordinator_pb2.AllowPlaceRequest(placename=place.name, user=self.args.user) From da95c4446416205a26bf30d3d3979f6c9a7a0ad5 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 1 Aug 2024 15:18:16 +0200 Subject: [PATCH 15/38] remote: use more explicit event loop handling Calling asyncio.get_event_loop() with no current event loop is deprecated since Python 3.10 and will be an error in some future Python release [1]. Whenever we don't expect to run with an event loop, create one explicitly. In coroutine and callbacks from asynchronous code, use the more explicit asyncio.get_running_loop() to get the loop. Note that this does not work in labgrid.resources.ethernetport.EthernetPortManager: This code is usually not called in coroutines and callbacks from asynchronous code, so asyncio.get_running_loop() does not work there. So stick to asyncio.get_event_loop() there and just expect to be called with a running event loop (which is the non-deprecated use case for this function). Users that do not have an event loop running will see a justified DeprecationWarning with Python >= 3.12 and an error in some future Python version. [1] https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.get_event_loop Signed-off-by: Bastian Krause --- labgrid/remote/coordinator.py | 6 ++++-- labgrid/remote/exporter.py | 5 ++++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/labgrid/remote/coordinator.py b/labgrid/remote/coordinator.py index 635c7c5c9..97ed16d26 100644 --- a/labgrid/remote/coordinator.py +++ b/labgrid/remote/coordinator.py @@ -194,7 +194,7 @@ def __init__(self) -> None: self.clients: dict[str, ClientSession] = {} self.load() - self.loop = asyncio.get_event_loop() + self.loop = asyncio.get_running_loop() self.poll_task = self.loop.create_task(self.poll()) async def _poll_step(self): @@ -1025,7 +1025,9 @@ def main(): logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO) - loop = asyncio.get_event_loop() + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + cleanup = [] loop.set_debug(True) try: diff --git a/labgrid/remote/exporter.py b/labgrid/remote/exporter.py index 8187c4e14..86a261c92 100755 --- a/labgrid/remote/exporter.py +++ b/labgrid/remote/exporter.py @@ -778,7 +778,7 @@ def __init__(self, config) -> None: - Setup loop, name, authid and address - Join the coordinator as an exporter""" self.config = config - self.loop = asyncio.get_event_loop() + self.loop = asyncio.get_running_loop() self.name = config["name"] self.hostname = config["hostname"] self.isolated = config["isolated"] @@ -1061,6 +1061,9 @@ def main(): print(f"exporter hostname: {config['hostname']}") print(f"resource config file: {config['resources']}") + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + asyncio.run(amain(config), debug=bool(args.debug)) if reexec: From 0e1e4fbaccec3c1c27b6314bdb3ab3eafa86e490 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 1 Aug 2024 15:18:57 +0200 Subject: [PATCH 16/38] remote/client: rework event loop handling in start_session() Calling asyncio.get_event_loop() with no current event loop is deprecated since Python 3.10 and will be an error in some future Python release [1]. Using it in labgrid.remote.client.start_session() causes errors in IPython when using a RemotePlace: In [1]: from labgrid.resource.remote import RemotePlace ...: from labgrid import Target ...: ...: target = Target("example") ...: RemotePlace(target, name="example-place") [...] RuntimeError: There is no current event loop in thread 'MainThread'. For labgrid.remote.client.start_session() there is no reliable way of retrieving the thread's event loop without being called from an async context (which we cannot assume here). Instead of using asyncio.get_event_loop(), use a new helper function ensure_event_loop() that returns the first available loop instance from: - externally provided event loop - stashed event loop - OS thread's running event loop (when called from async code) - new event loop The returned loop is stashed for future calls. See also [2] for a similar approach. start_session() now accepts a new optional argument "loop" for providing an external event loop. [1] https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.get_event_loop [2] https://github.com/jupyter/jupyter_core/pull/387 Signed-off-by: Bastian Krause --- labgrid/remote/client.py | 46 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 43 insertions(+), 3 deletions(-) diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index 63fdfa7af..6924090f5 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -4,6 +4,7 @@ import argparse import asyncio import contextlib +from contextvars import ContextVar import enum import os import pathlib @@ -1529,8 +1530,45 @@ def print_version(self): print(labgrid_version()) -def start_session(address, extra, debug=False): - loop = asyncio.get_event_loop() +_loop: ContextVar["asyncio.AbstractEventLoop | None"] = ContextVar("_loop", default=None) + + +def ensure_event_loop(external_loop=None): + """Get the event loop for this thread, or create a new event loop.""" + # get stashed loop + loop = _loop.get() + + # ignore closed stashed loop + if loop and loop.is_closed(): + loop = None + + if external_loop: + # if a loop is stashed, expect it to be the same as the external one + if loop: + assert loop is external_loop + _loop.set(external_loop) + return external_loop + + # return stashed loop + if loop: + return loop + + try: + # if called from async code, try to get current's thread loop + loop = asyncio.get_running_loop() + except RuntimeError: + # no previous, external or running loop found, create a new one + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + # stash it + _loop.set(loop) + return loop + + +def start_session(address, extra, debug=False, loop=None): + loop = ensure_event_loop(loop) + if debug: loop.set_debug(True) @@ -2040,7 +2078,9 @@ def main(): coordinator_address = os.environ.get("LG_COORDINATOR", "127.0.0.1:20408") logging.debug('Starting session with "%s"', coordinator_address) - session = start_session(coordinator_address, extra, args.debug) + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + session = start_session(coordinator_address, extra=extra, debug=args.debug, loop=loop) logging.debug("Started session") try: From 545d240ec3bd48916c1119c0f4b88b4b9ac6551d Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 2 Aug 2024 15:54:47 +0200 Subject: [PATCH 17/38] remote/client: provide default for ClientSession.env to make it optional The Environment was always optional. Before users of ClientSession had to pass an explicit None for this attribute. While we're changing the ClientSession for gRPC anyway, let's make env really optional. This again allows us to make start_session()'s extra argument optional, too. It is used to pass extra arguments to the ClientSession, which means it can be an empty dictionary now. Signed-off-by: Bastian Krause --- labgrid/remote/client.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index 6924090f5..731e81950 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -71,7 +71,7 @@ class ClientSession: address = attr.ib(validator=attr.validators.instance_of(str)) loop = attr.ib(validator=attr.validators.instance_of(asyncio.BaseEventLoop)) - env = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(Environment))) + env = attr.ib(default=None, validator=attr.validators.optional(attr.validators.instance_of(Environment))) role = attr.ib(default=None, validator=attr.validators.optional(attr.validators.instance_of(str))) prog = attr.ib(default=None, validator=attr.validators.optional(attr.validators.instance_of(str))) args = attr.ib(default=None, validator=attr.validators.optional(attr.validators.instance_of(argparse.Namespace))) @@ -1566,9 +1566,12 @@ def ensure_event_loop(external_loop=None): return loop -def start_session(address, extra, debug=False, loop=None): +def start_session(address, extra=None, debug=False, loop=None): loop = ensure_event_loop(loop) + if extra is None: + extra = {} + if debug: loop.set_debug(True) From 37a92b5a86371dab28c3e5ef93146af0cf13ecb7 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 2 Aug 2024 15:55:40 +0200 Subject: [PATCH 18/38] remote: make start_session() args more explicit During the move to gRPC, start_session()'s arguments changed. Since this is one of the functions used from outside of labgrid, add typing hints and force the kwargs to be passed with names. This should make users aware of the changes, so their code can be adapted. Signed-off-by: Bastian Krause --- labgrid/remote/client.py | 15 ++++++++++++++- labgrid/resource/remote.py | 2 +- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index 731e81950..5ab4f0683 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -23,6 +23,7 @@ from collections import defaultdict, OrderedDict from datetime import datetime from pprint import pformat +from typing import Any, Dict import attr import grpc @@ -1566,7 +1567,19 @@ def ensure_event_loop(external_loop=None): return loop -def start_session(address, extra=None, debug=False, loop=None): +def start_session( + address: str, *, extra: Dict[str, Any] = None, debug: bool = False, loop: "asyncio.AbstractEventLoop | None" = None +): + """ + Starts a ClientSession. + + Args: + address: coordinator address as HOST[:PORT], PORT defaults to 20408 + extra: additional kwargs for ClientSession + debug: set debug mode of the event loop + loop: explicit event loop to use (otherwise a previously stashed loop, + if retrievable the current thread's loop or a new loop is used) + """ loop = ensure_event_loop(loop) if extra is None: diff --git a/labgrid/resource/remote.py b/labgrid/resource/remote.py index 1b8256ef0..b8adb2524 100644 --- a/labgrid/resource/remote.py +++ b/labgrid/resource/remote.py @@ -22,7 +22,7 @@ def _start(self): from ..remote.client import start_session try: - self.session = start_session(self.url, {'env': self.env}) + self.session = start_session(self.url, extra={'env': self.env}) except ConnectionRefusedError as e: raise ConnectionRefusedError(f"Could not connect to coordinator {self.url}") \ from e From 606af353d5621c0bd0a7f19bdac7182f318acb32 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 31 Jul 2024 18:30:34 +0200 Subject: [PATCH 19/38] tests/test_ethernetport: create and set event loop for test Calling asyncio.get_event_loop() with no current event loop is deprecated since Python 3.10 and will be an error in some future Python release [1]. SNMPEthernetPort expects a running event loop. So create and set one. [1] https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.get_event_loop Signed-off-by: Bastian Krause --- tests/test_ethernetport.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/tests/test_ethernetport.py b/tests/test_ethernetport.py index bea4b8ad7..55dbac6b8 100644 --- a/tests/test_ethernetport.py +++ b/tests/test_ethernetport.py @@ -1,6 +1,15 @@ +import asyncio + from labgrid.resource import SNMPEthernetPort def test_instance(target): - s = SNMPEthernetPort(target, 'port-1', switch='dummy-switch', interface='1') - assert (isinstance(s, SNMPEthernetPort)) + # SNMPEthernetPort should be called with a running event loop + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + try: + s = SNMPEthernetPort(target, 'port-1', switch='dummy-switch', interface='1') + assert (isinstance(s, SNMPEthernetPort)) + finally: + loop.close() From cb9ecde9e03737ed90cf3432099ede738b63cb5e Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 2 Jul 2024 11:15:41 +0200 Subject: [PATCH 20/38] tests/conftest: turn exporter fixture into class This allows stopping and starting the exporter during a test. More functionality will be moved into the class in future commits. Signed-off-by: Bastian Krause --- tests/conftest.py | 93 +++++++++++++++++++++++++++----------------- tests/test_client.py | 13 ++----- 2 files changed, 61 insertions(+), 45 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 164914c31..b120782f6 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -60,6 +60,54 @@ def __getattr__(self, name): return getattr(self.__wrapped, name) +class Exporter: + def __init__(self, config, cwd): + self.cwd = str(cwd) + self.config = config + self.spawn = None + self.reader = None + + def start(self): + assert self.spawn is None + assert self.reader is None + + self.spawn = pexpect.spawn( + f'{sys.executable} -m labgrid.remote.exporter --name testhost {self.config}', + logfile=Prefixer(sys.stdout.buffer, 'exporter'), + cwd=self.cwd) + try: + self.spawn.expect('exporter name: testhost') + self.spawn.expect('connected to exporter') + except Exception as e: + raise Exception(f"exporter startup failed with {self.spawn.before}") from e + + self.reader = threading.Thread( + target=keep_reading, + name=f'exporter-reader-{self.pid}', + args=(self.spawn,), daemon=True) + self.reader.start() + + def stop(self): + logging.info("stopping exporter pid=%s", self.spawn.pid) + self.spawn.close(force=True) + assert not self.spawn.isalive() + self.reader.join() + + self.spawn = None + self.reader = None + + def isalive(self): + return self.spawn.isalive() + + @property + def exitstatus(self): + return self.spawn.exitstatus + + @property + def pid(self): + return self.spawn.pid + + @pytest.fixture(scope='function') def target(): return Target('Test') @@ -123,16 +171,9 @@ def coordinator(tmpdir): reader.join() @pytest.fixture(scope='function') -def exporter(tmpdir, coordinator, start_exporter): - yield start_exporter() - - -@pytest.fixture(scope='function') -def start_exporter(tmpdir, coordinator): - spawns = [] - readers = [] - - p = tmpdir.join("exports.yaml") +def exporter(tmpdir, coordinator): + config = "exports.yaml" + p = tmpdir.join(config) p.write( """ Testport: @@ -152,32 +193,12 @@ def start_exporter(tmpdir, coordinator): """ ) - def _start_exporter(): - spawn = pexpect.spawn( - f'{sys.executable} -m labgrid.remote.exporter --name testhost exports.yaml', - logfile=Prefixer(sys.stdout.buffer, 'exporter'), - cwd=str(tmpdir)) - try: - spawn.expect('exporter name: testhost') - spawn.expect('connected to exporter') - except: - print(f"exporter startup failed with {spawn.before}") - raise - reader = threading.Thread(target=keep_reading, name=f'exporter-reader-{spawn.pid}', args=(spawn,), daemon=True) - reader.start() - - spawns.append(spawn) - readers.append(reader) - - return spawn - - yield _start_exporter - - for spawn, reader in zip(spawns, readers): - print(f"stopping exporter pid={spawn.pid}") - spawn.close(force=True) - assert not spawn.isalive() - reader.join() + exporter = Exporter(config, tmpdir) + exporter.start() + + yield exporter + + exporter.stop() def pytest_addoption(parser): parser.addoption("--sigrok-usb", action="store_true", diff --git a/tests/test_client.py b/tests/test_client.py index 14b855d1e..8f397f56f 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -393,14 +393,12 @@ def test_reservation(place_acquire, tmpdir): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -def test_resource_acquired_state_on_exporter_restart(monkeypatch, place,start_exporter): +def test_resource_acquired_state_on_exporter_restart(monkeypatch, place, exporter): user = "test-user" host = "test-host" monkeypatch.setenv("LG_USERNAME", user) monkeypatch.setenv("LG_HOSTNAME", host) - exporter = start_exporter() - # add resource match with pexpect.spawn('python -m labgrid.remote.client -p test add-match testhost/Testport/NetworkSerialPort') as spawn: spawn.expect(pexpect.EOF) @@ -433,12 +431,9 @@ def test_resource_acquired_state_on_exporter_restart(monkeypatch, place,start_ex assert spawn.exitstatus == 0, spawn.before.strip() assert b"Resource 'NetworkSerialPort' (testhost/Testport/NetworkSerialPort[/NetworkSerialPort]):\r\n {'acquired': 'test'," in spawn.before - # stop exporter - exporter.close() - assert not exporter.isalive() - - # start exporter again - exporter = start_exporter() + # restart exporter + exporter.stop() + exporter.start() # make sure matching resource is still found with pexpect.spawn('python -m labgrid.remote.client -p test show') as spawn: From 373874bac00d817fbde68f354096dbc8cfb4c199 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 17 Jul 2024 16:21:43 +0200 Subject: [PATCH 21/38] tests/conftest: move common Exporter functionality into generic LabgridComponent Most functionality of this class is not exporter- or coordinator-specific, so move the common parts into a common class. Both Exporter (and a future) Coordinator class will inherhit it. Signed-off-by: Bastian Krause --- tests/conftest.py | 91 ++++++++++++++++++++++++++++------------------- 1 file changed, 55 insertions(+), 36 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index b120782f6..497eb2ee6 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -21,20 +21,6 @@ def curses_init(): except ModuleNotFoundError: logging.warning("curses module not found, not setting up a default terminal – tests may fail") -def keep_reading(spawn): - "The output from background processes must be read to avoid blocking them." - while spawn.isalive(): - try: - data = spawn.read_nonblocking(size=1024, timeout=0.1) - if not data: - return - except pexpect.TIMEOUT: - continue - except pexpect.EOF: - return - except OSError: - return - class Prefixer: def __init__(self, wrapped, prefix): @@ -60,40 +46,51 @@ def __getattr__(self, name): return getattr(self.__wrapped, name) -class Exporter: - def __init__(self, config, cwd): +class LabgridComponent: + def __init__(self, cwd): self.cwd = str(cwd) - self.config = config self.spawn = None self.reader = None - def start(self): - assert self.spawn is None - assert self.reader is None + def stop(self): + logging.info("stopping {self.__class__.__name__} pid=%s", self.spawn.pid) + + # let coverage write its data: + # https://coverage.readthedocs.io/en/latest/subprocess.html#process-termination + self.spawn.kill(SIGTERM) + if not self.spawn.closed: + self.spawn.expect(pexpect.EOF) + self.spawn.wait() + assert not self.spawn.isalive() - self.spawn = pexpect.spawn( - f'{sys.executable} -m labgrid.remote.exporter --name testhost {self.config}', - logfile=Prefixer(sys.stdout.buffer, 'exporter'), - cwd=self.cwd) - try: - self.spawn.expect('exporter name: testhost') - self.spawn.expect('connected to exporter') - except Exception as e: - raise Exception(f"exporter startup failed with {self.spawn.before}") from e + self.spawn = None + self.stop_reader() + + @staticmethod + def keep_reading(spawn): + "The output from background processes must be read to avoid blocking them." + while spawn.isalive(): + try: + data = spawn.read_nonblocking(size=1024, timeout=0.1) + if not data: + return + except pexpect.TIMEOUT: + continue + except pexpect.EOF: + return + except OSError: + return + def start_reader(self): self.reader = threading.Thread( - target=keep_reading, - name=f'exporter-reader-{self.pid}', + target=LabgridComponent.keep_reading, + name=f'{self.__class__.__name__}-reader-{self.pid}', args=(self.spawn,), daemon=True) self.reader.start() - def stop(self): - logging.info("stopping exporter pid=%s", self.spawn.pid) - self.spawn.close(force=True) - assert not self.spawn.isalive() + def stop_reader(self): self.reader.join() - self.spawn = None self.reader = None def isalive(self): @@ -108,6 +105,28 @@ def pid(self): return self.spawn.pid +class Exporter(LabgridComponent): + def __init__(self, config, cwd): + super().__init__(cwd) + self.config = config + + def start(self): + assert self.spawn is None + assert self.reader is None + + self.spawn = pexpect.spawn( + f'labgrid-exporter --name testhost {self.config}', + logfile=Prefixer(sys.stdout.buffer, 'exporter'), + cwd=self.cwd) + try: + self.spawn.expect('exporter name: testhost') + self.spawn.expect('connected to exporter') + except Exception as e: + raise Exception(f"exporter startup failed with {self.spawn.before}") from e + + self.start_reader() + + @pytest.fixture(scope='function') def target(): return Target('Test') From 15da271b45a890abe33c226c0ed804e837e64117 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 17 Jul 2024 16:22:48 +0200 Subject: [PATCH 22/38] tests/conftest: make coordinator fixture use a helper class Coordinator The same was previously implemented for the exporter. Since we need the same functionality also for the coordinator (along with suspend_tree()/resume_tree() functionality to be moved in a future commit), let's refactor it now. Signed-off-by: Bastian Krause --- tests/conftest.py | 43 ++++++++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 497eb2ee6..08f630c59 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -127,6 +127,23 @@ def start(self): self.start_reader() +class Coordinator(LabgridComponent): + def start(self): + assert self.spawn is None + assert self.reader is None + + self.spawn = pexpect.spawn( + 'labgrid-coordinator', + logfile=Prefixer(sys.stdout.buffer, 'coordinator'), + cwd=self.cwd) + try: + self.spawn.expect('Coordinator ready') + except Exception as e: + raise Exception(f"coordinator startup failed with {self.spawn.before}") from e + + self.start_reader() + + @pytest.fixture(scope='function') def target(): return Target('Test') @@ -166,28 +183,12 @@ def serial_driver_no_name(target, serial_port, mocker): @pytest.fixture(scope='function') def coordinator(tmpdir): + coordinator = Coordinator(tmpdir) + coordinator.start() - spawn = pexpect.spawn( - 'labgrid-coordinator', - logfile=Prefixer(sys.stdout.buffer, 'coordinator'), - cwd=str(tmpdir)) - try: - spawn.expect('Coordinator ready') - except: - print(f"coordinator startup failed with {spawn.before}") - raise - reader = threading.Thread(target=keep_reading, name='coordinator-reader', args=(spawn,), daemon=True) - reader.start() - yield spawn - - # let coverage write its data: - # https://coverage.readthedocs.io/en/latest/subprocess.html#process-termination - print("stopping coordinator") - spawn.kill(SIGTERM) - spawn.expect(pexpect.EOF) - spawn.wait() - - reader.join() + yield coordinator + + coordinator.stop() @pytest.fixture(scope='function') def exporter(tmpdir, coordinator): From 5b2776d0e699a81abaafab8ab12af9c4299c79af Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 17 Jul 2024 16:24:02 +0200 Subject: [PATCH 23/38] tests/conftest: move suspend_tree/resume_tree into LabgridComponent Previously only tests/test_client.py could use the suspend_tree() and resume_tree() functionality. Since this is also useful for exporter + coordinator tests, move it into the generic LabgridComponent class. Signed-off-by: Bastian Krause --- tests/conftest.py | 14 ++++++++++++++ tests/test_client.py | 22 ++++------------------ tests/test_coordinator.py | 2 -- 3 files changed, 18 insertions(+), 20 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 08f630c59..b56b212fa 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -11,6 +11,8 @@ from labgrid.resource import RawSerialPort, NetworkSerialPort from labgrid.driver.fake import FakeConsoleDriver +psutil = pytest.importorskip("psutil") + @pytest.fixture(scope="session") def curses_init(): """ curses only reads the terminfo DB once on the first import, so make @@ -104,6 +106,18 @@ def exitstatus(self): def pid(self): return self.spawn.pid + def suspend_tree(self): + main = psutil.Process(self.pid) + main.suspend() + for child in main.children(recursive=True): + child.suspend() + + def resume_tree(self): + main = psutil.Process(self.pid) + main.resume() + for child in main.children(recursive=True): + child.resume() + class Exporter(LabgridComponent): def __init__(self, config, cwd): diff --git a/tests/test_client.py b/tests/test_client.py index 8f397f56f..5a1fc1a74 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -5,20 +5,6 @@ import pytest import pexpect -psutil = pytest.importorskip("psutil") - -def suspend_tree(pid): - main = psutil.Process(pid) - main.suspend() - for child in main.children(recursive=True): - child.suspend() - -def resume_tree(pid): - main = psutil.Process(pid) - main.resume() - for child in main.children(recursive=True): - child.resume() - def test_startup(coordinator): pass @@ -68,7 +54,7 @@ def test_connect_error(): assert spawn.exitstatus == 1, spawn.before.strip() def test_connect_timeout(coordinator): - suspend_tree(coordinator.pid) + coordinator.suspend_tree() try: with pexpect.spawn('python -m labgrid.remote.client places') as spawn: spawn.expect("connection attempt timed out before receiving SETTINGS frame") @@ -76,7 +62,7 @@ def test_connect_timeout(coordinator): spawn.close() assert spawn.exitstatus == 1, spawn.before.strip() finally: - resume_tree(coordinator.pid) + coordinator.resume_tree() pass def test_place_show(place): @@ -488,7 +474,7 @@ def test_exporter_timeout(place, exporter): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() - suspend_tree(exporter.pid) + exporter.suspend_tree() try: time.sleep(30) @@ -499,7 +485,7 @@ def test_exporter_timeout(place, exporter): assert spawn.exitstatus == 0, spawn.before.strip() assert b'/Testport/NetworkSerialPort' not in spawn.before finally: - resume_tree(exporter.pid) + exporter.resume_tree() # the exporter should quit by itself now time.sleep(5) diff --git a/tests/test_coordinator.py b/tests/test_coordinator.py index 27c04969b..dbbefa33a 100644 --- a/tests/test_coordinator.py +++ b/tests/test_coordinator.py @@ -4,8 +4,6 @@ import labgrid.remote.generated.labgrid_coordinator_pb2_grpc as labgrid_coordinator_pb2_grpc import labgrid.remote.generated.labgrid_coordinator_pb2 as labgrid_coordinator_pb2 -psutil = pytest.importorskip("psutil") - @pytest.fixture(scope='function') def channel_stub(): import queue From df8df522b152d81ed95bebd224901a71553bbdce Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 17 Jul 2024 12:59:21 +0200 Subject: [PATCH 24/38] tests/test_remote: add test_exporter_start_coordinator_unreachable Previously the exporter had blocking issues when the coordinator was not available. Add a test to prevent future regressions. Signed-off-by: Bastian Krause --- tests/test_remote.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/tests/test_remote.py b/tests/test_remote.py index 80f54430e..b07b5b170 100644 --- a/tests/test_remote.py +++ b/tests/test_remote.py @@ -15,3 +15,23 @@ def test_exporter_help(): spawn.close() assert spawn.exitstatus == 0 assert spawn.signalstatus is None + +def test_exporter_start_coordinator_unreachable(monkeypatch, tmpdir): + monkeypatch.setenv("LG_COORDINATOR", "coordinator.invalid") + + config = "exports.yaml" + p = tmpdir.join(config) + p.write( + """ + Testport: + NetworkSerialPort: + host: 'localhost' + port: 4000 + """ + ) + + with pexpect.spawn(f"python -m labgrid.remote.exporter {config}", cwd=tmpdir) as spawn: + spawn.expect("coordinator is unavailable", timeout=10) + spawn.expect(pexpect.EOF) + spawn.close() + assert spawn.exitstatus == 100, spawn.before From 8462aebc046c401bcab4ee167b19fb20aa4dbd8e Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 17 Jul 2024 16:24:56 +0200 Subject: [PATCH 25/38] tests/test_remote: add test_exporter_coordinator_becomes_unreachable A previous commit added a test for exporter startup with an unreachable coordinator. Now also add a test simulating a dissappearing coordinator during operation. The exporter should notice the coordinator disappearing and should exit with exitcode 100. This way systemd can try restarting the exporter regularly until the coordinator is available again. Signed-off-by: Bastian Krause --- tests/test_remote.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/test_remote.py b/tests/test_remote.py index b07b5b170..ca09908ec 100644 --- a/tests/test_remote.py +++ b/tests/test_remote.py @@ -35,3 +35,12 @@ def test_exporter_start_coordinator_unreachable(monkeypatch, tmpdir): spawn.expect(pexpect.EOF) spawn.close() assert spawn.exitstatus == 100, spawn.before + +def test_exporter_coordinator_becomes_unreachable(coordinator, exporter): + coordinator.suspend_tree() + + exporter.spawn.expect(pexpect.EOF, timeout=30) + exporter.spawn.close() + assert exporter.exitstatus == 100 + + coordinator.resume_tree() From 61516b6c28361726424fde2c31142250b247924a Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 7 Aug 2024 15:48:59 +0200 Subject: [PATCH 26/38] contrib/systemd: update service for new versions of coordinator/exporter Signed-off-by: Rouven Czerwinski Signed-off-by: Bastian Krause --- contrib/systemd/labgrid-coordinator.service | 6 +++--- contrib/systemd/labgrid-exporter.service | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/contrib/systemd/labgrid-coordinator.service b/contrib/systemd/labgrid-coordinator.service index 2dc5d117e..c701038c6 100644 --- a/contrib/systemd/labgrid-coordinator.service +++ b/contrib/systemd/labgrid-coordinator.service @@ -4,12 +4,12 @@ After=network.target [Service] Environment="PYTHONUNBUFFERED=1" -# labgrid's .crossbar/config-anonymous.yaml serves as an example -ExecStart=/path/to/labgrid-coordinator/venv/bin/crossbar start --logformat=syslogd --cbdir /var/lib/labgrid-coordinator --config /etc/labgrid/coordinator.yaml -ExecStop=/usr/bin/labgrid-coordinator stop --cbdir /var/lib/labgrid-coordinator +ExecStart=/path/to/labgrid/venv/bin/labgrid-coordinator Restart=on-failure DynamicUser=yes StateDirectory=labgrid-coordinator +# Set WorkingDirectory to StateDirectory, this works in DynamicUser mode since symlinks are created +WorkingDirectory=%S/labgrid-coordinator [Install] WantedBy=multi-user.target diff --git a/contrib/systemd/labgrid-exporter.service b/contrib/systemd/labgrid-exporter.service index a896aeeae..10cbfff26 100644 --- a/contrib/systemd/labgrid-exporter.service +++ b/contrib/systemd/labgrid-exporter.service @@ -5,7 +5,7 @@ Wants=network-online.target [Service] Environment="PYTHONUNBUFFERED=1" -# Should contain LG_CROSSBAR configuration +# Should contain LG_COORDINATOR configuration EnvironmentFile=-/etc/environment ExecStart=/path/to/labgrid/venv/bin/labgrid-exporter /etc/labgrid/exporter.yaml Restart=on-failure From 394864eecad91d3465cad799dda82d84e6a6ac7e Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 2 Aug 2024 15:58:17 +0200 Subject: [PATCH 27/38] contrib/coordinator-statsd: migrate to gRPC Signed-off-by: Bastian Krause --- contrib/coordinator-statsd.py | 45 +++++++++++++++++------------------ 1 file changed, 22 insertions(+), 23 deletions(-) diff --git a/contrib/coordinator-statsd.py b/contrib/coordinator-statsd.py index cdf6c6c73..f8ea0254b 100755 --- a/contrib/coordinator-statsd.py +++ b/contrib/coordinator-statsd.py @@ -42,12 +42,14 @@ import sys import argparse -import statsd import os -import labgrid.remote.client import time import asyncio -import txaio + +from labgrid.remote.client import start_session, Error +from labgrid.remote.generated import labgrid_coordinator_pb2 +from labgrid.remote.common import Reservation +import statsd def inc_gauge(gauges, key): @@ -56,12 +58,13 @@ def inc_gauge(gauges, key): async def report_reservations(session, tags, gauges): - reservations = await session.call("org.labgrid.coordinator.get_reservations") + request = labgrid_coordinator_pb2.GetReservationsRequest() - for token, config in reservations.items(): - state = config["state"] + response = await session.stub.GetReservations(request) + reservations = [Reservation.from_pb2(x) for x in response.reservations] - groups = config.get("filters", {}) + for reservation in reservations: + groups = reservation.filters if not groups: groups = {"": {}} @@ -72,7 +75,7 @@ async def report_reservations(session, tags, gauges): ".".join( ["reservations", group_name] + [group.get(t, "") for t in tags] - + [state] + + [reservation.state.name] ), ) @@ -94,10 +97,10 @@ def main(): ) parser.add_argument( "-x", - "--crossbar", - metavar="URL", - help="Crossbar URL for the coordinator", - default=os.environ.get("LG_CROSSBAR", "ws://127.0.0.1:20408/ws"), + "--coordinator", + metavar="ADDRESS", + help="Coordinator address as HOST[:PORT]. Default is %(default)s", + default=os.environ.get("LG_COORDINATOR", "127.0.0.1:20408"), ) parser.add_argument( "--period", @@ -142,8 +145,8 @@ def main(): args = parser.parse_args() - txaio.use_asyncio() - txaio.config.loop = asyncio.get_event_loop() + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) statsd_client = None gauges = {} @@ -175,22 +178,18 @@ def main(): next_time = time.monotonic() + args.period try: - extra = {} - session = labgrid.remote.client.start_session( - args.crossbar, - os.environ.get("LG_CROSSBAR_REALM", "realm1"), - extra, - ) + session = start_session(args.coordinator, loop=loop) try: - session.loop.run_until_complete( + loop.run_until_complete( asyncio.gather( report_places(session, args.tags, gauges), report_reservations(session, args.tags, gauges), ) ) finally: - session.leave() - except labgrid.remote.client.Error as e: + loop.run_until_complete(session.stop()) + loop.run_until_complete(session.close()) + except Error as e: print(f"Error communicating with labgrid: {e}") continue From b589897a56b491c168774561160f06d227a97382 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 2 Aug 2024 16:08:39 +0200 Subject: [PATCH 28/38] contrib/labgrid-webapp: migrate to gRPC Signed-off-by: Bastian Krause --- contrib/README.rst | 6 +++--- contrib/labgrid-webapp | 23 ++++++++++++++++------- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/contrib/README.rst b/contrib/README.rst index 5e537d850..d770cdebd 100644 --- a/contrib/README.rst +++ b/contrib/README.rst @@ -13,14 +13,14 @@ Quick Start $ source venv/bin/activate venv $ pip install -r contrib/requirements-webapp.txt venv $ ./contrib/labgrid-webapp --help - usage: labgrid-webapp [-h] [--crossbar URL] [--port PORT] [--proxy PROXY] + usage: labgrid-webapp [-h] [--coordinator ADDRESS] [--port PORT] [--proxy PROXY] Labgrid webapp options: -h, --help show this help message and exit - --crossbar URL, -x URL - Crossbar websocket URL (default: ws://127.0.0.1:20408/ws) + --coordinator ADDRESS, -x ADDRESS + Coordinator address as HOST[:PORT] (default: 127.0.0.1:20408) --port PORT Port to serve on --proxy PROXY, -P PROXY diff --git a/contrib/labgrid-webapp b/contrib/labgrid-webapp index bd4a22178..e78976ad1 100755 --- a/contrib/labgrid-webapp +++ b/contrib/labgrid-webapp @@ -1,5 +1,6 @@ #!/usr/bin/env python3 import argparse +import asyncio import logging import os import sys @@ -118,11 +119,11 @@ def main(): formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( - '--crossbar', + '--coordinator', '-x', metavar='URL', - default=os.environ.get('LG_CROSSBAR', 'ws://127.0.0.1:20408/ws'), - help='Crossbar websocket URL (default: %(default)s)', + default=os.environ.get('LG_COORDINATOR', '127.0.0.1:20408'), + help='Coordinator address as HOST[:PORT] (default: %(default)s)', ) parser.add_argument('--port', type=int, default=8800, help='Port to serve on') parser.add_argument('--proxy', '-P', help='Proxy connections via given ssh host') @@ -132,16 +133,20 @@ def main(): if args.proxy: proxymanager.force_proxy(args.proxy) + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: session = start_session( - args.crossbar, os.environ.get('LG_CROSSBAR_REALM', 'realm1'), {}, + args.coordinator, + loop=loop, ) except ConnectionRefusedError: - logger.fatal('Unable to connect to labgrid crossbar') + logger.fatal('Unable to connect to labgrid coordinator') return server = uvicorn.Server(config=uvicorn.Config( - loop=session.loop, + loop=loop, host='0.0.0.0', port=args.port, app=app, @@ -153,7 +158,11 @@ def main(): if route.path not in reserved_routes: logger.info(f' - {route.path}') - session.loop.run_until_complete(server.serve()) + try: + loop.run_until_complete(server.serve()) + finally: + loop.run_until_complete(session.stop()) + loop.run_until_complete(session.close()) if __name__ == '__main__': From 5a49429b7a246292365d156ec95258f0d4b87365 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 2 Aug 2024 16:46:15 +0200 Subject: [PATCH 29/38] contrib/sync-places: migrate to gRPC Signed-off-by: Bastian Krause --- contrib/sync-places.py | 52 ++++++++++++++++++++++++++++-------------- 1 file changed, 35 insertions(+), 17 deletions(-) diff --git a/contrib/sync-places.py b/contrib/sync-places.py index d3d74f86d..94ea15734 100755 --- a/contrib/sync-places.py +++ b/contrib/sync-places.py @@ -15,8 +15,10 @@ # limitations under the License. import argparse +import asyncio from contextlib import contextmanager from labgrid.remote.client import start_session +from labgrid.remote.generated import labgrid_coordinator_pb2 from labgrid.util.proxy import proxymanager import os import sys @@ -51,14 +53,20 @@ async def do_sync(session, args): for name in remove_places: print(f"Removing place {name}") if not args.dry_run: - await session.call("org.labgrid.coordinator.del_place", name) + request = labgrid_coordinator_pb2.DeletePlaceRequest(name=name) + await session.stub.DeletePlace(request) + await session.sync_with_coordinator() + changed = True for name in config["places"]: if not name in seen_places: print(f"Adding place {name}") if not args.dry_run: - await session.call("org.labgrid.coordinator.add_place", name) + request = labgrid_coordinator_pb2.AddPlaceRequest(name=name) + await session.stub.AddPlace(request) + await session.sync_with_coordinator() + changed = True for name in config["places"]: @@ -89,9 +97,10 @@ async def do_sync(session, args): else: print(f"Deleting match '{match}' for place {name}") if not args.dry_run: - await session.call( - "org.labgrid.coordinator.del_place_match", name, match, rename - ) + request = labgrid_coordinator_pb2.DeletePlaceMatchRequest(placename=name, pattern=match) + await session.stub.DeletePlaceMatch(request) + await session.sync_with_coordinator() + changed = True for m in matches: @@ -103,9 +112,9 @@ async def do_sync(session, args): print(f"Adding match '{match}' for place {name}") if not args.dry_run: - await session.call( - "org.labgrid.coordinator.add_place_match", name, match, rename - ) + request = labgrid_coordinator_pb2.AddPlaceMatchRequest(placename=name, pattern=match, rename=rename) + await session.stub.AddPlaceMatch(request) + await session.sync_with_coordinator() changed = True tags = config["places"][name].get("tags", {}).copy() @@ -131,9 +140,10 @@ async def do_sync(session, args): tags[k] = "" if not args.dry_run: - await session.call( - "org.labgrid.coordinator.set_place_tags", name, tags - ) + request = labgrid_coordinator_pb2.SetPlaceTagsRequest(placename=name, tags=tags) + await session.stub.SetPlaceTags(request) + await session.sync_with_coordinator() + changed = True async def do_dump(session, args): @@ -174,11 +184,11 @@ async def do_dump(session, args): formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( - "--crossbar", + "--coordinator", "-x", - metavar="URL", - default=os.environ.get("LG_CROSSBAR", "ws://127.0.0.1:20408/ws"), - help="Crossbar websocket URL (default: %(default)s)", + metavar="ADDRESS", + default=os.environ.get("LG_COORDINATOR", "127.0.0.1:20408"), + help="Coordinator address as HOST[:PORT] (default: %(default)s)", ) parser.add_argument("--proxy", "-P", help="Proxy connections via given ssh host") @@ -219,11 +229,19 @@ async def do_dump(session, args): if args.proxy: proxymanager.force_proxy(args.proxy) + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + session = start_session( - args.crossbar, os.environ.get("LG_CROSSBAR_REALM", "realm1"), {} + args.coordinator, + loop=loop, ) - return session.loop.run_until_complete(args.func(session, args)) + try: + return loop.run_until_complete(args.func(session, args)) + finally: + loop.run_until_complete(session.stop()) + loop.run_until_complete(session.close()) if __name__ == "__main__": From 13969df4fade306021028a71c3adf9e9abd1fccd Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 7 Aug 2024 15:26:38 +0200 Subject: [PATCH 30/38] examples: migrate to gRPC Signed-off-by: Bastian Krause --- examples/deditec-relais8/import-dedicontrol.yaml | 2 +- examples/networkmanager/nm.env | 2 +- examples/sysfsgpio/import-gpio.yaml | 2 +- examples/usbpower/README.rst | 6 ++---- 4 files changed, 5 insertions(+), 7 deletions(-) diff --git a/examples/deditec-relais8/import-dedicontrol.yaml b/examples/deditec-relais8/import-dedicontrol.yaml index 0e8607962..9af4776e9 100644 --- a/examples/deditec-relais8/import-dedicontrol.yaml +++ b/examples/deditec-relais8/import-dedicontrol.yaml @@ -6,4 +6,4 @@ targets: drivers: DeditecRelaisDriver: {} options: - crossbar_url: 'ws://labgrid:20408/ws' + coordinator_address: 'labgrid:20408' diff --git a/examples/networkmanager/nm.env b/examples/networkmanager/nm.env index c96ef21da..be767f2fb 100644 --- a/examples/networkmanager/nm.env +++ b/examples/networkmanager/nm.env @@ -6,4 +6,4 @@ targets: drivers: NetworkInterfaceDriver: {} options: - crossbar_url: 'ws://labgrid/ws' + coordinator_address: 'labgrid:20408' diff --git a/examples/sysfsgpio/import-gpio.yaml b/examples/sysfsgpio/import-gpio.yaml index 76c9c285f..4ba7b223f 100644 --- a/examples/sysfsgpio/import-gpio.yaml +++ b/examples/sysfsgpio/import-gpio.yaml @@ -6,4 +6,4 @@ targets: drivers: GpioDigitalOutputDriver: {} options: - crossbar_url: 'ws://labgrid:20408/ws' + coordinator_address: 'labgrid:20408' diff --git a/examples/usbpower/README.rst b/examples/usbpower/README.rst index 552f933c8..f427d42d3 100644 --- a/examples/usbpower/README.rst +++ b/examples/usbpower/README.rst @@ -40,8 +40,6 @@ on port 3. Software Setup -------------- -The following expects that labgrid is installed in the -active virtualenv and crossbar is installed into a separate virtualenv. The ``uhubctl`` and ``usbsdmux`` tools need to be installed on the system. Library Example @@ -116,9 +114,9 @@ Remote Setup ------------ To access resources remotely, you first need to start the coordinator:: - $ crossbar-venv/bin/crossbar start --logformat none --config config-anonymous.yaml + $ labgrid-coordinator [...] - Coordinator ready. + Coordinator ready Then, you need to start the exporter:: $ labgrid-exporter exports.yaml From c613f1e55e08e12da4ceff3d6b9e797b97a0c97f Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 22 Jul 2024 11:43:42 +0200 Subject: [PATCH 31/38] dockerfiles: migrate to gRPC Signed-off-by: Bastian Krause --- .gitignore | 4 +-- dockerfiles/Dockerfile | 27 +++++++------------ dockerfiles/README.rst | 20 +++++++------- .../places_example.yaml | 0 dockerfiles/staging/docker-compose.yml | 6 ++--- 5 files changed, 24 insertions(+), 33 deletions(-) rename dockerfiles/staging/{crossbar => coordinator}/places_example.yaml (100%) diff --git a/.gitignore b/.gitignore index cc74652b4..dad0e7c90 100644 --- a/.gitignore +++ b/.gitignore @@ -18,6 +18,6 @@ /.pytest_cache/ /htmlcov/ /labgrid/_version.py -/dockerfiles/staging/crossbar/* -!/dockerfiles/staging/crossbar/places_example.yaml +/dockerfiles/staging/coordinator/* +!/dockerfiles/staging/coordinator/places_example.yaml /.idea diff --git a/dockerfiles/Dockerfile b/dockerfiles/Dockerfile index 1b749fe4f..15fd5eb1f 100644 --- a/dockerfiles/Dockerfile +++ b/dockerfiles/Dockerfile @@ -1,4 +1,5 @@ FROM debian:bookworm-slim AS labgrid-base +ARG VERSION LABEL maintainer="eha@deif.com" @@ -8,10 +9,12 @@ COPY ./ /opt/labgrid/ RUN set -e ;\ apt update -q=2 ;\ - apt install -q=2 --yes --no-install-recommends python3 python3-dev python3-pip python3-setuptools git build-essential libsnappy-dev ;\ + apt install -q=2 --yes --no-install-recommends python3 python3-dev python3-pip python3-setuptools git build-essential ;\ pip3 install --break-system-packages -U pip;\ apt clean ;\ - rm -rf /var/lib/apt/lists/* + rm -rf /var/lib/apt/lists/* ;\ + cd /opt/labgrid ;\ + SETUPTOOLS_SCM_PRETEND_VERSION="$VERSION" pip3 install --break-system-packages --no-cache-dir . # # Client @@ -20,9 +23,7 @@ FROM labgrid-base AS labgrid-client ARG VERSION RUN set -e ;\ - cd /opt/labgrid ;\ pip3 install --break-system-packages yq ;\ - SETUPTOOLS_SCM_PRETEND_VERSION="$VERSION" pip3 install --break-system-packages --no-cache-dir . ;\ apt update -q=2 ;\ apt install -q=2 --yes --no-install-recommends microcom openssh-client rsync jq qemu-system qemu-utils ;\ apt clean ;\ @@ -36,21 +37,13 @@ CMD ["/bin/bash"] FROM labgrid-base AS labgrid-coordinator ARG VERSION -ENV CROSSBAR_DIR=/opt/crossbar - -RUN set -e ;\ - cd /opt/labgrid ;\ - pip3 install --break-system-packages virtualenv ;\ - SETUPTOOLS_SCM_PRETEND_VERSION="$VERSION" pip3 install --break-system-packages --no-cache-dir . ;\ - virtualenv -p python3 crossbar-venv ;\ - crossbar-venv/bin/pip3 install --break-system-packages -r crossbar-requirements.txt ;\ - sed -i "s#^ executable: .*\$# executable: python3#" .crossbar/config-anonymous.yaml - -VOLUME /opt/crossbar +VOLUME /opt/coordinator EXPOSE 20408 -CMD ["/opt/labgrid/crossbar-venv/bin/crossbar", "start", "--config", "/opt/labgrid/.crossbar/config-anonymous.yaml"] +WORKDIR /opt/coordinator + +CMD ["/usr/local/bin/labgrid-coordinator"] # # Exporter @@ -61,8 +54,6 @@ ARG VERSION COPY dockerfiles/exporter/entrypoint.sh /entrypoint.sh RUN set -e ;\ - cd /opt/labgrid ;\ - SETUPTOOLS_SCM_PRETEND_VERSION="$VERSION" pip3 install --break-system-packages --no-cache-dir . ;\ apt update -q=2 ;\ apt install -q=2 --yes --no-install-recommends ser2net ;\ apt clean ;\ diff --git a/dockerfiles/README.rst b/dockerfiles/README.rst index 8c2e31439..b0f8ff150 100644 --- a/dockerfiles/README.rst +++ b/dockerfiles/README.rst @@ -5,7 +5,7 @@ This folder contains Dockerfile's for building Docker images for the 3 different components of a Labgrid distributed infrastructure. - **labgrid-coordinator** - An image for with crossbar which can be used to run + An image with the Labgrid coordinator. a Labgrid coordinator instance. - **labgrid-client** An image with the Labgrid client tools and pytest integration. @@ -64,18 +64,18 @@ No policy or configuration is done. labgrid-coordinator usage ~~~~~~~~~~~~~~~~~~~~~~~~~ -The labgrid-coordinator comes with a preconfigured Crossbar.io server. +The labgrid-coordinator image can be used to run a coordinator instance. -It listens to port 20408, +It listens on port 20408, so you probably want to publish that so you can talk to the coordinator. -State is written to ``/opt/crossbar``. +State is written to ``/opt/coordinator``. You might want to bind a volume to that -so you can restart the service without loosing state. +so you can restart the service without losing state. .. code-block:: bash - $ docker run -t -p 20408:20408 -v $HOME/crossbar:/opt/crossbar \ + $ docker run -t -p 20408:20408 -v $HOME/coordinator:/opt/coordinator \ docker.io/labgrid/coordinator @@ -85,18 +85,18 @@ labgrid-client usage The labgrid-client image can be used to run ``labgrid-client`` and ``pytest`` commands. For example listing available places registered at coordinator at -ws://192.168.1.42:20408/ws +192.168.1.42:20408 .. code-block:: bash - $ docker run -e LG_CROSSBAR=ws://192.168.1.42:20408/ws docker.io/labgrid/client \ + $ docker run -e LG_COORDINATOR=192.168.1.42:20408 docker.io/labgrid/client \ labgrid-client places Or running all pytest/labgrid tests at current directory: .. code-block:: bash - $ docker run -e LG_CROSSBAR=ws://192.168.1.42:20408/ws docker.io/labgrid/client \ + $ docker run -e LG_COORDINATOR=192.168.1.42:20408 docker.io/labgrid/client \ pytest @@ -113,7 +113,7 @@ Start it with something like: .. code-block:: bash - $ docker run -e LG_CROSSBAR=ws://192.168.1.42:20408/ws \ + $ docker run -e LG_COORDINATOR=192.168.1.42:20408 \ -v $HOME/exporter-conf:/opt/conf \ docker.io/labgrid/exporter diff --git a/dockerfiles/staging/crossbar/places_example.yaml b/dockerfiles/staging/coordinator/places_example.yaml similarity index 100% rename from dockerfiles/staging/crossbar/places_example.yaml rename to dockerfiles/staging/coordinator/places_example.yaml diff --git a/dockerfiles/staging/docker-compose.yml b/dockerfiles/staging/docker-compose.yml index cb3547802..b78c648b6 100644 --- a/dockerfiles/staging/docker-compose.yml +++ b/dockerfiles/staging/docker-compose.yml @@ -2,11 +2,11 @@ services: coordinator: image: "${IMAGE_PREFIX:-docker.io/labgrid/}coordinator" volumes: - - "./crossbar:/home/root/crossbar" + - "./coordinator:/home/root/coordinator" tty: true network_mode: "host" - command: bash -c "cp /home/root/crossbar/places_example.yaml /opt/crossbar/places.yaml && - /opt/labgrid/crossbar-venv/bin/crossbar start --config /opt/labgrid/.crossbar/config-anonymous.yaml" + command: bash -c "cp /home/root/coordinator/places_example.yaml /opt/coordinator/places.yaml && + /usr/local/bin/labgrid-coordinator" client: image: "${IMAGE_PREFIX:-docker.io/labgrid/}client" volumes: From 81f5801cdbaebba8e78735a3639656e82e6f6875 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 8 Aug 2024 18:20:49 +0200 Subject: [PATCH 32/38] man: update for gRPC Signed-off-by: Bastian Krause Signed-off-by: Rouven Czerwinski --- debian/labgrid.manpages | 1 + man/Makefile | 4 +- man/labgrid-client.1 | 14 +++---- man/labgrid-client.rst | 17 +++------ man/labgrid-coordinator.1 | 70 +++++++++++++++++++++++++++++++++++ man/labgrid-coordinator.rst | 49 ++++++++++++++++++++++++ man/labgrid-device-config.5 | 12 ++---- man/labgrid-device-config.rst | 12 ++---- man/labgrid-exporter.1 | 14 +++---- man/labgrid-exporter.rst | 17 +++------ 10 files changed, 152 insertions(+), 58 deletions(-) create mode 100644 man/labgrid-coordinator.1 create mode 100644 man/labgrid-coordinator.rst diff --git a/debian/labgrid.manpages b/debian/labgrid.manpages index eb6e10245..b72c77540 100644 --- a/debian/labgrid.manpages +++ b/debian/labgrid.manpages @@ -1,4 +1,5 @@ man/labgrid-client.1 +man/labgrid-coordinator.1 man/labgrid-exporter.1 man/labgrid-suggest.1 man/labgrid-device-config.5 diff --git a/man/Makefile b/man/Makefile index a31910b9f..4c1c1accc 100644 --- a/man/Makefile +++ b/man/Makefile @@ -1,5 +1,5 @@ -PAGES := labgrid-client.1 labgrid-exporter.1 labgrid-device-config.5 labgrid-pytest.7 labgrid-suggest.1 -COMPRESSED := labgrid-client.1.gz labgrid-exporter.1.gz labgrid-device-config.5.gz labgrid-pytest.7.gz labgrid-suggest.1.gz +PAGES := labgrid-client.1 labgrid-coordinator.1 labgrid-exporter.1 labgrid-device-config.5 labgrid-pytest.7 labgrid-suggest.1 +COMPRESSED := labgrid-client.1.gz labgrid-coordinator.1.gz labgrid-exporter.1.gz labgrid-device-config.5.gz labgrid-pytest.7.gz labgrid-suggest.1.gz %.1 %.5 %.7: %.rst rst2man.py $< >$@.tmp diff --git a/man/labgrid-client.1 b/man/labgrid-client.1 index c5772ae0b..52db032db 100644 --- a/man/labgrid-client.1 +++ b/man/labgrid-client.1 @@ -51,8 +51,8 @@ display command line help .BI \-p \ PLACE\fR,\fB \ \-\-place \ PLACE specify the place to operate on .TP -.BI \-x \ URL\fR,\fB \ \-\-crossbar \ URL -the crossbar url of the coordinator, defaults to \fBws://127.0.0.1:20408/ws\fP +.BI \-x \ ADDRESS\fR,\fB \ \-\-coordinator \ ADDRESS +coordinator \fBHOST[:PORT]\fP to connect to, defaults to \fB127.0.0.1:20408\fP .TP .BI \-c \ CONFIG\fR,\fB \ \-\-config \ CONFIG set the configuration file @@ -103,14 +103,10 @@ A desired state must be set using \fBLG_STATE\fP or \fB\-s\fP/\fB\-\-state\fP\&. .sp This variable can be used to specify the configuration file to use without using the \fB\-\-config\fP option, the \fB\-\-config\fP option overrides it. -.SS LG_CROSSBAR +.SS LG_COORDINATOR .sp -This variable can be used to set the default crossbar URL (instead of using the -\fB\-x\fP option). -.SS LG_CROSSBAR_REALM -.sp -This variable can be used to set the default crossbar realm to use instead of -\fBrealm1\fP\&. +This variable can be used to set the default coordinator in the format +\fBHOST[:PORT]\fP (instead of using the \fB\-x\fP option). .SS LG_PROXY .sp This variable can be used to specify a SSH proxy hostname which should be used diff --git a/man/labgrid-client.rst b/man/labgrid-client.rst index 27259bfed..43b76f663 100644 --- a/man/labgrid-client.rst +++ b/man/labgrid-client.rst @@ -38,8 +38,8 @@ OPTIONS display command line help -p PLACE, --place PLACE specify the place to operate on --x URL, --crossbar URL - the crossbar url of the coordinator, defaults to ``ws://127.0.0.1:20408/ws`` +-x ADDRESS, --coordinator ADDRESS + coordinator ``HOST[:PORT]`` to connect to, defaults to ``127.0.0.1:20408`` -c CONFIG, --config CONFIG set the configuration file -s STATE, --state STATE @@ -91,15 +91,10 @@ LG_ENV This variable can be used to specify the configuration file to use without using the ``--config`` option, the ``--config`` option overrides it. -LG_CROSSBAR -~~~~~~~~~~~ -This variable can be used to set the default crossbar URL (instead of using the -``-x`` option). - -LG_CROSSBAR_REALM -~~~~~~~~~~~~~~~~~ -This variable can be used to set the default crossbar realm to use instead of -``realm1``. +LG_COORDINATOR +~~~~~~~~~~~~~~ +This variable can be used to set the default coordinator in the format +``HOST[:PORT]`` (instead of using the ``-x`` option). LG_PROXY ~~~~~~~~ diff --git a/man/labgrid-coordinator.1 b/man/labgrid-coordinator.1 new file mode 100644 index 000000000..22673a065 --- /dev/null +++ b/man/labgrid-coordinator.1 @@ -0,0 +1,70 @@ +.\" Man page generated from reStructuredText. +. +. +.nr rst2man-indent-level 0 +. +.de1 rstReportMargin +\\$1 \\n[an-margin] +level \\n[rst2man-indent-level] +level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] +- +\\n[rst2man-indent0] +\\n[rst2man-indent1] +\\n[rst2man-indent2] +.. +.de1 INDENT +.\" .rstReportMargin pre: +. RS \\$1 +. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] +. nr rst2man-indent-level +1 +.\" .rstReportMargin post: +.. +.de UNINDENT +. RE +.\" indent \\n[an-margin] +.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] +.nr rst2man-indent-level -1 +.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] +.in \\n[rst2man-indent\\n[rst2man-indent-level]]u +.. +.TH "LABGRID-COORDINATOR" 1 "2024-08-06" "0.0.1" "embedded testing" +.SH NAME +labgrid-coordinator \- labgrid-coordinator managing labgrid resources and places +.SH SYNOPSIS +.sp +\fBlabgrid\-coordinator\fP \fB\-\-help\fP +.SH DESCRIPTION +.sp +Labgrid is a scalable infrastructure and test architecture for embedded (linux) +systems. +.sp +This is the man page for the coordinator. Clients and exporters connect to the +coordinator to publish resources, manage place configuration and handle mutual +exclusion. +.SH OPTIONS +.INDENT 0.0 +.TP +.B \-h\fP,\fB \-\-help +display command line help +.TP +.BI \-l \ ADDRESS\fR,\fB \ \-\-listen \ ADDRESS +make coordinator listen on host and port +.TP +.B \-d\fP,\fB \-\-debug +enable debug mode +.UNINDENT +.SH SEE ALSO +.sp +\fBlabgrid\-client\fP(1), \fBlabgrid\-exporter\fP(1) +.SH AUTHOR +Rouven Czerwinski + +Organization: Labgrid-Project +.SH COPYRIGHT +Copyright (C) 2016-2024 Pengutronix. This library is free software; +you can redistribute it and/or modify it under the terms of the GNU +Lesser General Public License as published by the Free Software +Foundation; either version 2.1 of the License, or (at your option) +any later version. +.\" Generated by docutils manpage writer. +. diff --git a/man/labgrid-coordinator.rst b/man/labgrid-coordinator.rst new file mode 100644 index 000000000..786059196 --- /dev/null +++ b/man/labgrid-coordinator.rst @@ -0,0 +1,49 @@ +===================== + labgrid-coordinator +===================== + +labgrid-coordinator managing labgrid resources and places +========================================================= + + +:Author: Rouven Czerwinski +:organization: Labgrid-Project +:Date: 2024-08-06 +:Copyright: Copyright (C) 2016-2024 Pengutronix. This library is free software; + you can redistribute it and/or modify it under the terms of the GNU + Lesser General Public License as published by the Free Software + Foundation; either version 2.1 of the License, or (at your option) + any later version. +:Version: 0.0.1 +:Manual section: 1 +:Manual group: embedded testing + + + +SYNOPSIS +-------- + +``labgrid-coordinator`` ``--help`` + +DESCRIPTION +----------- +Labgrid is a scalable infrastructure and test architecture for embedded (linux) +systems. + +This is the man page for the coordinator. Clients and exporters connect to the +coordinator to publish resources, manage place configuration and handle mutual +exclusion. + +OPTIONS +------- +-h, --help + display command line help +-l ADDRESS, --listen ADDRESS + make coordinator listen on host and port +-d, --debug + enable debug mode + +SEE ALSO +-------- + +``labgrid-client``\(1), ``labgrid-exporter``\(1) diff --git a/man/labgrid-device-config.5 b/man/labgrid-device-config.5 index 13943611c..f6de7a954 100644 --- a/man/labgrid-device-config.5 +++ b/man/labgrid-device-config.5 @@ -52,17 +52,13 @@ For a list of available resources and drivers refer to \fI\%https://labgrid.readthedocs.io/en/latest/configuration.html\fP\&. .SH OPTIONS .sp -The \fBoptions:\fP top key configures various options such as the crossbar_url. +The \fBoptions:\fP top key configures various options such as the coordinator_address. .SS OPTIONS KEYS .INDENT 0.0 .TP -.B \fBcrossbar_url\fP -takes as parameter the URL of the crossbar (coordinator) to connect to. -Defaults to \(aqws://127.0.0.1:20408\(aq. -.TP -.B \fBcrossbar_realm\fP -takes as parameter the realm of the crossbar (coordinator) to connect to. -Defaults to \(aqrealm1\(aq. +.B \fBcoordinator_address\fP +takes as parameter the coordinator \fBHOST[:PORT]\fP to connect to. +Defaults to \fB127.0.0.1:20408\fP\&. .UNINDENT .SH IMAGES .sp diff --git a/man/labgrid-device-config.rst b/man/labgrid-device-config.rst index 216a657bd..ba0156830 100644 --- a/man/labgrid-device-config.rst +++ b/man/labgrid-device-config.rst @@ -47,18 +47,14 @@ https://labgrid.readthedocs.io/en/latest/configuration.html. OPTIONS ------- -The ``options:`` top key configures various options such as the crossbar_url. +The ``options:`` top key configures various options such as the coordinator_address. OPTIONS KEYS ~~~~~~~~~~~~ -``crossbar_url`` - takes as parameter the URL of the crossbar (coordinator) to connect to. - Defaults to 'ws://127.0.0.1:20408'. - -``crossbar_realm`` - takes as parameter the realm of the crossbar (coordinator) to connect to. - Defaults to 'realm1'. +``coordinator_address`` + takes as parameter the coordinator ``HOST[:PORT]`` to connect to. + Defaults to ``127.0.0.1:20408``. .. _labgrid-device-config-images: diff --git a/man/labgrid-exporter.1 b/man/labgrid-exporter.1 index faf836daa..66d1d69e0 100644 --- a/man/labgrid-exporter.1 +++ b/man/labgrid-exporter.1 @@ -47,8 +47,8 @@ USB devices and various other controllers. .B \-h\fP,\fB \-\-help display command line help .TP -.B \-x\fP,\fB \-\-crossbar -the crossbar url of the coordinator +.B \-x\fP,\fB \-\-coordinator +coordinator \fBHOST[:PORT]\fP to connect to, defaults to \fB127.0.0.1:20408\fP .TP .B \-i\fP,\fB \-\-isolated enable isolated mode (always request SSH forwards) @@ -100,14 +100,10 @@ for more information. .SH ENVIRONMENT VARIABLES .sp The following environment variable can be used to configure labgrid\-exporter. -.SS LG_CROSSBAR +.SS LG_COORDINATOR .sp -This variable can be used to set the default crossbar URL (instead of using the -\fB\-x\fP option). -.SS LG_CROSSBAR_REALM -.sp -This variable can be used to set the default crossbar realm to use instead of -\fBrealm1\fP\&. +This variable can be used to set the default coordinator in the format +\fBHOST[:PORT]\fP (instead of using the \fB\-x\fP option). .SH EXAMPLES .sp Start the exporter with the configuration file \fImy\-config.yaml\fP: diff --git a/man/labgrid-exporter.rst b/man/labgrid-exporter.rst index f43754ca1..bf3ec50d0 100644 --- a/man/labgrid-exporter.rst +++ b/man/labgrid-exporter.rst @@ -38,8 +38,8 @@ OPTIONS ------- -h, --help display command line help --x, --crossbar - the crossbar url of the coordinator +-x, --coordinator + coordinator ``HOST[:PORT]`` to connect to, defaults to ``127.0.0.1:20408`` -i, --isolated enable isolated mode (always request SSH forwards) -n, --name @@ -92,15 +92,10 @@ ENVIRONMENT VARIABLES --------------------- The following environment variable can be used to configure labgrid-exporter. -LG_CROSSBAR -~~~~~~~~~~~ -This variable can be used to set the default crossbar URL (instead of using the -``-x`` option). - -LG_CROSSBAR_REALM -~~~~~~~~~~~~~~~~~ -This variable can be used to set the default crossbar realm to use instead of -``realm1``. +LG_COORDINATOR +~~~~~~~~~~~~~~ +This variable can be used to set the default coordinator in the format +``HOST[:PORT]`` (instead of using the ``-x`` option). EXAMPLES -------- From e0167aeebc8119a01673a5276ecfdaa402af9f3f Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 8 Aug 2024 18:22:12 +0200 Subject: [PATCH 33/38] doc: update for gRPC Signed-off-by: Bastian Krause Signed-off-by: Rouven Czerwinski --- doc/RELEASE.rst | 6 +--- doc/development.rst | 2 +- doc/getting_started.rst | 63 ++++------------------------------------- doc/man.rst | 1 + doc/man/coordinator.rst | 2 ++ doc/overview.rst | 13 ++++----- doc/usage.rst | 8 +++--- 7 files changed, 20 insertions(+), 75 deletions(-) create mode 100644 doc/man/coordinator.rst diff --git a/doc/RELEASE.rst b/doc/RELEASE.rst index da1b2f41d..886746ba5 100644 --- a/doc/RELEASE.rst +++ b/doc/RELEASE.rst @@ -66,10 +66,6 @@ Test the upload by using pypi dev as a download source :: - virtualenv -p python3 labgrid-crossbar-release- - labgrid-crossbar-release-/bin/pip install --upgrade pip - labgrid-crossbar-release-/bin/pip install -r crossbar-requirements.txt - virtualenv -p python3 labgrid-release- source labgrid-release-/bin/activate pip install --upgrade pip setuptools wheel @@ -80,7 +76,7 @@ And optionally run the tests: :: pip install ".[dev]" - pytest tests --crossbar-venv labgrid-crossbar-release- + pytest tests 7. Upload to pypi ================= diff --git a/doc/development.rst b/doc/development.rst index 8b0351076..5a58abb52 100644 --- a/doc/development.rst +++ b/doc/development.rst @@ -23,7 +23,7 @@ Install required dependencies: .. code-block:: bash - sudo apt install python3-dev libow-dev libsnappy-dev + sudo apt install python3-dev libow-dev Install labgrid with development dependencies into the virtualenv in editable mode: diff --git a/doc/getting_started.rst b/doc/getting_started.rst index 7d1990bb5..c27d6b161 100644 --- a/doc/getting_started.rst +++ b/doc/getting_started.rst @@ -62,7 +62,7 @@ Test your installation by running: .. code-block:: bash labgrid-venv $ labgrid-client --help - usage: labgrid-client [-h] [-x URL] [-c CONFIG] [-p PLACE] [-d] COMMAND ... + usage: labgrid-client [-h] [-x ADDRESS] [-c CONFIG] [-p PLACE] [-d] COMMAND ... ... If the help for labgrid-client does not show up, open an `Issue @@ -170,58 +170,11 @@ exporter, and learn how to access the exporter via the client. Coordinator ~~~~~~~~~~~ -To start the coordinator, we will download the labgrid repository, create an -extra virtualenv and install the dependencies: +We can simply start the coordinator: .. code-block:: bash - $ sudo apt install libsnappy-dev - $ git clone https://github.com/labgrid-project/labgrid - $ cd labgrid - $ virtualenv -p python3 crossbar-venv - $ crossbar-venv/bin/pip install --upgrade pip - $ crossbar-venv/bin/pip install -r crossbar-requirements.txt - $ virtualenv -p python3 labgrid-venv - $ source labgrid-venv/bin/activate - labgrid-venv $ pip install --upgrade pip - labgrid-venv $ pip install . - -All necessary dependencies should be installed now. - -Copy and customize the crossbar config file ``.crossbar/config-anonymous.yaml`` -for your use case: - -.. code-block:: bash - - labgrid-venv $ cp .crossbar/config-anonymous.yaml .crossbar/my-config.yaml - -.. note:: crossbar is a network messaging framework for building distributed - applications, which labgrid plugs into. - -The path to the Python interpreter in the labgrid-venv needs to be configured -in crossbar's config, either manually or with the labgrid-venv being active -via: - -.. code-block:: bash - - labgrid-venv $ sed -i "s#^ executable: .*\$# executable: ${VIRTUAL_ENV}/bin/python3#" .crossbar/my-config.yaml - -.. note:: For long running deployments a different ``workdir`` and port may be - used. - The crossbar config should reside in a ``.crossbar`` directory in the - ``workdir`` in this case. - For an example systemd service file, see - :ref:`remote-getting-started-systemd-files`. - -Now we can finally start the coordinator inside the repository: - -.. code-block:: bash - - $ crossbar-venv/bin/crossbar start --config my-config.yaml - -.. note:: If --config is specified as a relative path, the config is expected - in a .crossbar subdirectory (as is the case in the labgrid - repository). + labgrid-venv $ labgrid-coordinator Exporter ~~~~~~~~ @@ -375,25 +328,19 @@ Follow these instructions to install the systemd files on your machine(s): installation paths of your distribution. #. Adapt the ``ExecStart`` paths of the service files to the respective Python virtual environments of the coordinator and exporter. -#. Create the coordinator configuration file referenced in the ``ExecStart`` - option of the :file:`labgrid-coordinator.service` file by using - :file:`.crossbar/config-anonymous.yaml` as a starting point. You most likely - want to make sure that the ``workdir`` option matches the path given via the - ``--cbdir`` option in the service file; see - :ref:`remote-getting-started-coordinator` for further information. #. Adjust the ``SupplementaryGroups`` option in the :file:`labgrid-exporter.service` file to your distribution so that the exporter gains read and write access on TTY devices (for ``ser2net``); most often, these groups are called ``dialout``, ``plugdev`` or ``tty``. Depending on your udev configuration, you may need multiple groups. -#. Set the coordinator URL the exporter should connect to by overriding the +#. Set the coordinator address the exporter should connect to by overriding the exporter service file; i.e. execute ``systemctl edit labgrid-exporter.service`` and add the following snippet: .. code-block:: [Service] - Environment="LG_CROSSBAR=ws://:/ws" + Environment="LG_COORDINATOR=[:]" #. Create the ``labgrid`` user and group: diff --git a/doc/man.rst b/doc/man.rst index 0523f301a..e6d6f6360 100644 --- a/doc/man.rst +++ b/doc/man.rst @@ -5,3 +5,4 @@ Manual Pages man/client man/device-config man/exporter + man/coordinator diff --git a/doc/man/coordinator.rst b/doc/man/coordinator.rst new file mode 100644 index 000000000..c1c7afbd7 --- /dev/null +++ b/doc/man/coordinator.rst @@ -0,0 +1,2 @@ +.. _labgrid-coordinator: +.. include:: ../../man/labgrid-coordinator.rst diff --git a/doc/overview.rst b/doc/overview.rst index 8152406bf..8d69d85f8 100644 --- a/doc/overview.rst +++ b/doc/overview.rst @@ -210,7 +210,7 @@ labgrid contains components for accessing resources which are not directly accessible on the local machine. The main parts of this are: -labgrid-coordinator (crossbar component) +labgrid-coordinator Clients and exporters connect to the coordinator to publish resources, manage place configuration and handle mutual exclusion. @@ -227,9 +227,8 @@ RemotePlace (managed resource) When used in a `Target`, the RemotePlace expands to the resources configured for the named places. -These components communicate over the `WAMP `_ -implementation `Autobahn `_ and the `Crossbar -`_ WAMP router. +These components communicate over `gRPC `_. The coordinator +acts as a gRPC server to which client and exporter connect. The following sections describe the responsibilities of each component. See :ref:`remote-usage` for usage information. @@ -239,8 +238,8 @@ The following sections describe the responsibilities of each component. See Coordinator ~~~~~~~~~~~ -The `Coordinator` is implemented as a Crossbar component and is started by the -router. +The `Coordinator` is implemented as a gRPC server and is started as a separate +process. It provides separate RPC methods for the exporters and clients. The coordinator keeps a list of all resources for clients and @@ -387,7 +386,7 @@ variable needs to be set to the remote host which should tunnel the connection to the coordinator. The client then forwards all network traffic - client-to-coordinator and client-to-exporter - through SSH, via their respective proxies. This means that with :code:`LG_PROXY` and -:code:`LG_CROSSBAR` labgrid can be used fully remotely with only a SSH +:code:`LG_COORDINATOR` labgrid can be used fully remotely with only a SSH connection as a requirement. .. note:: diff --git a/doc/usage.rst b/doc/usage.rst index 4caff26bc..9d423d50b 100644 --- a/doc/usage.rst +++ b/doc/usage.rst @@ -437,10 +437,10 @@ Other labgrid-related pytest plugin options are: Specify a labgrid environment config file. This is equivalent to labgrid-client's ``-c``/``--config``. -``--lg-coordinator=CROSSBAR_URL`` - Specify labgrid coordinator websocket URL. - Defaults to ``ws://127.0.0.1:20408/ws``. - This is equivalent to labgrid-client's ``-x``/``--crossbar``. +``--lg-coordinator=COORDINATOR_ADDRESS`` + Specify labgrid coordinator gRPC address as ``HOST[:PORT]``. + Defaults to ``127.0.0.1:20408``. + This is equivalent to labgrid-client's ``-x``/``--coordinator``. ``--lg-log=[path to logfiles]`` Path to store console log file. From 045634b25f1e489b9513f9d0ce64edaee5250e7b Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 22 Jul 2024 11:53:08 +0200 Subject: [PATCH 34/38] github/workflows/reusable-unit-tests: skip pylint for python3.8 pylint finds false positives for Python 3.8: labgrid/remote/coordinator.py:188:21: E1136: Value 'dict' is unsubscriptable (unsubscriptable-object) labgrid/remote/coordinator.py:194:24: E1136: Value 'dict' is unsubscriptable (unsubscriptable-object) labgrid/remote/coordinator.py:195:22: E1136: Value 'dict' is unsubscriptable (unsubscriptable-object) Since Python 3.8 reaches EOL in October 2024 anyways, let's just skip pylint for this version until then. Signed-off-by: Bastian Krause --- .github/workflows/reusable-unit-tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/reusable-unit-tests.yml b/.github/workflows/reusable-unit-tests.yml index 15be56078..852cd1a26 100644 --- a/.github/workflows/reusable-unit-tests.yml +++ b/.github/workflows/reusable-unit-tests.yml @@ -47,6 +47,7 @@ jobs: run: | pip install -e ".[dev]" - name: Lint with pylint + if: inputs.python-version != '3.8' run: | pylint --list-msgs-enabled pylint labgrid From bf4379c9f2e01ba844a70d4be60b972110f770fa Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 7 Aug 2024 14:11:38 +0200 Subject: [PATCH 35/38] pyproject: advertise python3.12 compatibility The latest crossbar release was not compatible with python3.12. Now that the crossbar dependency is gone, we can finally advertise python3.12 compatibility. Closes #1260 Signed-off-by: Bastian Krause --- pyproject.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 590fcb7c7..b06d84ea4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,6 +28,7 @@ classifiers = [ "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)", ] dependencies = [ @@ -226,7 +227,7 @@ signature-mutators = ["labgrid.step.step"] [tool.tox] legacy_tox_ini = """ [tox] -envlist = py38, py39, py310, py311 +envlist = py38, py39, py310, py311, py312 isolated_build = true [testenv] From 0ea9fe597db806f77f9c8b7dc1b9f46a9d1fe034 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 13 Aug 2024 11:38:34 +0200 Subject: [PATCH 36/38] debian: install labgrid-coordinator Signed-off-by: Bastian Krause --- debian/labgrid-coordinator | 3 +++ debian/labgrid.install | 1 + 2 files changed, 4 insertions(+) create mode 100755 debian/labgrid-coordinator diff --git a/debian/labgrid-coordinator b/debian/labgrid-coordinator new file mode 100755 index 000000000..54a34440b --- /dev/null +++ b/debian/labgrid-coordinator @@ -0,0 +1,3 @@ +#!/bin/sh + +exec /opt/venvs/labgrid/bin/labgrid-coordinator "$@" diff --git a/debian/labgrid.install b/debian/labgrid.install index cfea6dd96..2a1d9725d 100755 --- a/debian/labgrid.install +++ b/debian/labgrid.install @@ -1,6 +1,7 @@ #!/usr/bin/dh-exec debian/labgrid.yaml /etc debian/labgrid-client /usr/bin +debian/labgrid-coordinator /usr/bin debian/labgrid-exporter /usr/bin debian/labgrid-pytest /usr/bin debian/labgrid-suggest /usr/bin From 1a406bcb95de8e21f935dafc0993c502d092ab47 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 13 Aug 2024 11:39:10 +0200 Subject: [PATCH 37/38] debian: numpy is no longer an indirect dependency With the autobahn/crossbar dependencies removed, numpy is no longer an indirect dependency of labgrid. Signed-off-by: Bastian Krause --- debian/rules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/rules b/debian/rules index 7c1532c6c..cd09b0c5d 100755 --- a/debian/rules +++ b/debian/rules @@ -13,4 +13,4 @@ override_dh_virtualenv: --upgrade-pip \ --extras deb \ --extra-pip-arg='--no-binary' \ - --extra-pip-arg='cffi,numpy' + --extra-pip-arg='cffi' From ab8f79a64e0266fcac79616917e843b1e1068ef1 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 13 Aug 2024 11:34:10 +0200 Subject: [PATCH 38/38] CHANGES: add gRPC migration to new 24.1 release Signed-off-by: Bastian Krause --- CHANGES.rst | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index d185b909c..b2c4ce84c 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,54 @@ +Release 24.1 (Unreleased) +------------------------- +As announced +`before `_, +this is the first release using gRPC instead of crossbar/autobahn for +communication between client/exporter and coordinator. + +Crossbar/autobahn are unfortunately not very well maintained anymore. The +crossbar component was moved to its own virtualenv to cope with the high number +of dependencies leading to conflicts. Support for Python 3.13 is still not +available in a crossbar release on PyPI. + +That's why labgrid moves to gRPC with this release. gRPC is a well maintained +RPC framework with a lot of users. As a side effect, the message transfer is +more performant and the import times are shorter. + +New Features in 24.1 +~~~~~~~~~~~~~~~~~~~~ +- All components can be installed into the same virtualenv again. + +Bug fixes in 24.1 +~~~~~~~~~~~~~~~~~ + +FIXME + +Breaking changes in 24.1 +~~~~~~~~~~~~~~~~~~~~~~~~ +Maintaining support for both crossbar/autobahn as well as gRPC in labgrid would +be a lot of effort due to the different architectures of those frameworks. +Therefore, a hard migration to gRPC is deemed the lesser issue. + +Due to the migration, 24.1 includes the following breaking changes: + +- The labgrid environment config option ``crossbar_url`` was renamed to + ``coordinator_address``. The environment variable ``LG_CROSSBAR`` was renamed + to ``LG_COORDINATOR``. +- The labgrid environment config option ``crossbar_realm`` is now obsolete as + well as the environment variable ``LG_CROSSBAR_REALM``. +- The coordinator is available as ``labgrid-coordinator`` (instead of + ``crossbar start``). No additional configuration file is required. +- The systemd services in ``contrib/systemd/`` were updated. + +Other breaking changes include: + +FIXME + +Known issues in 24.1 +~~~~~~~~~~~~~~~~~~~~ + +FIXME + Release 24.0 (Released Aug 12, 2024) ------------------------------------