From b17efd9796ffb90a3a3eb9084f73841c265531d8 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Thu, 20 Jun 2024 12:36:18 +0200 Subject: [PATCH] replace autobahn&crossbar by gRPC See README.grpc for more information. Co-developed-by: Rouven Czerwinski Co-developed-by: Bastian Krause Signed-off-by: Jan Luebbe --- .github/workflows/reusable-unit-tests.yml | 6 +- README.grpc | 26 + crossbar-requirements.txt | 2 - doc/conf.py | 11 - labgrid/remote/client.py | 592 +++++++----- labgrid/remote/common.py | 244 ++++- labgrid/remote/coordinator.py | 883 ++++++++++-------- labgrid/remote/exporter.py | 265 +++--- labgrid/remote/generated/generate-proto.sh | 4 + .../generated/labgrid_coordinator_pb2.py | 158 ++++ .../generated/labgrid_coordinator_pb2.pyi | 448 +++++++++ .../generated/labgrid_coordinator_pb2_grpc.py | 833 +++++++++++++++++ .../remote/proto/labgrid-coordinator.proto | 297 ++++++ labgrid/resource/remote.py | 7 +- labgrid/util/proxy.py | 9 + pyproject.toml | 13 +- tests/conftest.py | 56 +- tests/{test_crossbar.py => test_client.py} | 44 +- tests/test_coordinator.py | 172 ++++ tests/test_pb2.py | 152 +++ tests/test_remote.py | 2 - 21 files changed, 3347 insertions(+), 877 deletions(-) create mode 100644 README.grpc delete mode 100644 crossbar-requirements.txt create mode 100755 labgrid/remote/generated/generate-proto.sh create mode 100644 labgrid/remote/generated/labgrid_coordinator_pb2.py create mode 100644 labgrid/remote/generated/labgrid_coordinator_pb2.pyi create mode 100644 labgrid/remote/generated/labgrid_coordinator_pb2_grpc.py create mode 100644 labgrid/remote/proto/labgrid-coordinator.proto rename tests/{test_crossbar.py => test_client.py} (87%) create mode 100644 tests/test_coordinator.py create mode 100644 tests/test_pb2.py diff --git a/.github/workflows/reusable-unit-tests.yml b/.github/workflows/reusable-unit-tests.yml index 16a26f0c6..7cf0d1613 100644 --- a/.github/workflows/reusable-unit-tests.yml +++ b/.github/workflows/reusable-unit-tests.yml @@ -46,17 +46,13 @@ jobs: - name: Install labgrid run: | pip install -e ".[dev]" - - name: Install crossbar in virtualenv - run: | - virtualenv -p python3 crossbar-venv - crossbar-venv/bin/pip install -r crossbar-requirements.txt - name: Lint with pylint run: | pylint --list-msgs-enabled pylint labgrid - name: Test with pytest run: | - TERM=xterm pytest --cov-config .coveragerc --cov=labgrid --local-sshmanager --ssh-username runner --crossbar-venv crossbar-venv -k "not test_docker_with_daemon" + TERM=xterm pytest --cov-config .coveragerc --cov=labgrid --local-sshmanager --ssh-username runner -k "not test_docker_with_daemon" - name: Build documentation run: | make -C doc clean diff --git a/README.grpc b/README.grpc new file mode 100644 index 000000000..0df7db1c1 --- /dev/null +++ b/README.grpc @@ -0,0 +1,26 @@ +Working +------- + +- communication from exporter via coordinator to clients +- labgrid-client create/delete, lock/unlock, aliases, comments, tags +- labgrid-client monitor +- RemotePlace +- coordinator state persistance (places.yaml, resources.yaml) +- resource cleanup on exporter disconnect + +Known Issues +------------ + +- reservations do not work + +Next Steps +---------- + +[ ] add more tests +[ ] update documentation +[ ] document migration steps +[ ] update containers +[ ] implement reservation handling +[x] fix exporter shutdown on CTRL-C +[x] fix exporter restart handling +[x] fix client shutdown on CTRL-C diff --git a/crossbar-requirements.txt b/crossbar-requirements.txt deleted file mode 100644 index d361d83d9..000000000 --- a/crossbar-requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -crossbar==21.3.1 -autobahn<=22.4.1 diff --git a/doc/conf.py b/doc/conf.py index 139f530f0..cfef4259b 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -182,17 +182,6 @@ 'special-members': True, } autodoc_mock_imports = ['onewire', - 'txaio', - 'autobahn', - 'autobahn.asyncio', - 'autobahn.asyncio.wamp', - 'autobahn.asyncio.websocket', - 'autobahn.wamp', - 'autobahn.wamp.types', - 'autobahn.twisted', - 'autobahn.twisted.wamp', - 'autobahn.wamp.exception', - 'twisted.internet.defer', 'gi', 'gi.repository',] diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index 63c0cf859..4cd2e28fc 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -14,16 +14,19 @@ import sys import shlex import json +import itertools from textwrap import indent from socket import gethostname from getpass import getuser from collections import defaultdict, OrderedDict from datetime import datetime from pprint import pformat -import txaio -txaio.use_asyncio() -from autobahn.asyncio.wamp import ApplicationSession +import attr +import grpc + +import labgrid.remote.generated.labgrid_coordinator_pb2_grpc as labgrid_coordinator_pb2_grpc +import labgrid.remote.generated.labgrid_coordinator_pb2 as labgrid_coordinator_pb2 from .common import ( ResourceEntry, @@ -33,21 +36,17 @@ ReservationState, TAG_KEY, TAG_VAL, - enable_tcp_nodelay, - monkey_patch_max_msg_payload_size_ws_option, + queue_as_aiter, ) from .. import Environment, Target, target_factory from ..exceptions import NoDriverFoundError, NoResourceFoundError, InvalidConfigError from ..resource.remote import RemotePlaceManager, RemotePlace -from ..util import diff_dict, flat_dict, filter_dict, dump, atomic_replace, labgrid_version, Timeout +from ..util import diff_dict, flat_dict, dump, atomic_replace, labgrid_version, Timeout from ..util.proxy import proxymanager from ..util.helper import processwrapper from ..driver import Mode, ExecutionError from ..logging import basicConfig, StepLogger -txaio.config.loop = asyncio.get_event_loop() # pylint: disable=no-member -monkey_patch_max_msg_payload_size_ws_option() - class Error(Exception): pass @@ -65,57 +64,139 @@ class InteractiveCommandError(Error): pass -class ClientSession(ApplicationSession): +@attr.s(eq=False) +class ClientSession: """The ClientSession encapsulates all the actions a Client can Invoke on the coordinator.""" + url = attr.ib(validator=attr.validators.instance_of(str)) + loop = attr.ib(validator=attr.validators.instance_of(asyncio.BaseEventLoop)) + env = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(Environment))) + role = attr.ib(default=None, validator=attr.validators.optional(attr.validators.instance_of(str))) + prog = attr.ib(default=None, validator=attr.validators.optional(attr.validators.instance_of(str))) + args = attr.ib(default=None, validator=attr.validators.optional(attr.validators.instance_of(argparse.Namespace))) + monitor = attr.ib(default=False, validator=attr.validators.instance_of(bool)) + def gethostname(self): return os.environ.get("LG_HOSTNAME", gethostname()) def getuser(self): return os.environ.get("LG_USERNAME", getuser()) - def onConnect(self): + def __attrs_post_init__(self): """Actions which are executed if a connection is successfully opened.""" - self.loop = self.config.extra["loop"] - self.connected = self.config.extra["connected"] - self.args = self.config.extra.get("args") - self.env = self.config.extra.get("env", None) - self.role = self.config.extra.get("role", None) - self.prog = self.config.extra.get("prog", os.path.basename(sys.argv[0])) - self.monitor = self.config.extra.get("monitor", False) - enable_tcp_nodelay(self) - self.join( - self.config.realm, - authmethods=["anonymous", "ticket"], - authid=f"client/{self.gethostname()}/{self.getuser()}", - authextra={"authid": f"client/{self.gethostname()}/{self.getuser()}"}, - ) + self.stopping = asyncio.Event() - def onChallenge(self, challenge): - import warnings + self.channel = grpc.aio.insecure_channel(self.url) + self.stub = labgrid_coordinator_pb2_grpc.CoordinatorStub(self.channel) - warnings.warn("Ticket authentication is deprecated. Please update your coordinator.", DeprecationWarning) - logging.warning("Ticket authentication is deprecated. Please update your coordinator.") - return "dummy-ticket" + self.out_queue = asyncio.Queue() + self.stream_call = None + self.pump_task = None + self.sync_id = itertools.count(start=1) + self.sync_events = {} - async def onJoin(self, details): - # FIXME race condition? - resources = await self.call("org.labgrid.coordinator.get_resources") + async def start(self): self.resources = {} - for exporter, groups in resources.items(): - for group_name, group in sorted(groups.items()): - for resource_name, resource in sorted(group.items()): - await self.on_resource_changed(exporter, group_name, resource_name, resource) - - places = await self.call("org.labgrid.coordinator.get_places") self.places = {} - for placename, config in places.items(): - await self.on_place_changed(placename, config) - await self.subscribe(self.on_resource_changed, "org.labgrid.coordinator.resource_changed") - await self.subscribe(self.on_place_changed, "org.labgrid.coordinator.place_changed") - await self.connected(self) + self.pump_task = self.loop.create_task(self.message_pump()) + msg = labgrid_coordinator_pb2.ClientInMessage() + msg.startup.version = labgrid_version() + msg.startup.name = f"{self.gethostname()}/{self.getuser()}" + self.out_queue.put_nowait(msg) + msg = labgrid_coordinator_pb2.ClientInMessage() + msg.subscribe.all_places = True + self.out_queue.put_nowait(msg) + msg = labgrid_coordinator_pb2.ClientInMessage() + msg.subscribe.all_resources = True + self.out_queue.put_nowait(msg) + await self.sync_with_coordinator() + if self.stopping.is_set(): + raise ServerError("Could not connect to coordinator") + + async def stop(self): + self.out_queue.put_nowait(None) # let the sender side exit gracefully + if self.stream_call: + self.stream_call.cancel() + try: + await self.pump_task + except asyncio.CancelledError: + pass + self.cancel_pending_syncs() + + async def sync_with_coordinator(self): + identifier = next(self.sync_id) + event = self.sync_events[identifier] = asyncio.Event() + msg = labgrid_coordinator_pb2.ClientInMessage() + msg.sync.id = identifier + logging.info("sending sync %s", identifier) + self.out_queue.put_nowait(msg) + await event.wait() + if self.stopping.is_set(): + logging.info("sync %s failed", identifier) + else: + logging.info("received sync %s", identifier) + return not self.stopping.is_set() + + def cancel_pending_syncs(self): + assert self.stopping.is_set() # only call when something has gone wrong + while True: + try: + identifier, event = self.sync_events.popitem() + logging.debug("cancelling %s %s", identifier, event) + event.set() + except KeyError: + break + + async def message_pump(self): + got_message = False + try: + self.stream_call = call = self.stub.ClientStream(queue_as_aiter(self.out_queue)) + async for out_msg in call: + out_msg: labgrid_coordinator_pb2.ClientOutMessage + got_message = True + logging.debug("out_msg from coordinator: %s", out_msg) + for update in out_msg.updates: + update_kind = update.WhichOneof("kind") + if update_kind == "resource": + resource: labgrid_coordinator_pb2.Resource = update.resource + await self.on_resource_changed( + resource.path.exporter_name, + resource.path.group_name, + resource.path.resource_name, + ResourceEntry.data_from_pb2(resource), + ) + elif update_kind == "del_resource": + resource_path: labgrid_coordinator_pb2.Resource.Path = update.del_resource + await self.on_resource_changed( + resource_path.exporter_name, resource_path.group_name, resource_path.resource_name, {} + ) + elif update_kind == "place": + place = update.place + await self.on_place_changed(place) + elif update_kind == "del_place": + place_name = update.del_place + await self.on_place_deleted(place_name) + else: + logging.warning("unknown update from coordinator! %s", update_kind) + if out_msg.HasField("sync"): + event = self.sync_events.pop(out_msg.sync.id) + event.set() + except grpc.aio.AioRpcError as e: + if e.code() == grpc.StatusCode.UNAVAILABLE: + if got_message: + logging.error("coordinator became unavailable: %s", e.details()) + else: + logging.error("coordinator is unavailable: %s", e.details()) + else: + logging.exception("unexpected grpc error in coordinator message pump task") + except Exception: + logging.exception("error in coordinator message pump task") + finally: + self.stopping.set() + self.out_queue.put_nowait(None) # let the sender side exit gracefully + self.cancel_pending_syncs() async def on_resource_changed(self, exporter, group_name, resource_name, resource): group = self.resources.setdefault(exporter, {}).setdefault(group_name, {}) @@ -128,44 +209,40 @@ async def on_resource_changed(self, exporter, group_name, resource_name, resourc old = group[resource_name].data group[resource_name].data = resource if self.monitor: - if resource and not old: + if "cls" in resource and not old: print(f"Resource {exporter}/{group_name}/{resource['cls']}/{resource_name} created: {resource}") - elif resource and old: + elif "cls" in resource and old: print(f"Resource {exporter}/{group_name}/{resource['cls']}/{resource_name} changed:") for k, v_old, v_new in diff_dict(flat_dict(old), flat_dict(resource)): print(f" {k}: {v_old} -> {v_new}") else: print(f"Resource {exporter}/{group_name}/???/{resource_name} deleted") - async def on_place_changed(self, name, config): - if not config: - del self.places[name] - if self.monitor: - print(f"Place {name} deleted") - return - config = config.copy() - config["name"] = name - config["matches"] = [ResourceMatch(**match) for match in config["matches"]] - config = filter_dict(config, Place, warn=True) + async def on_place_changed(self, place_pb2: labgrid_coordinator_pb2.Place): + name = place_pb2.name + if name not in self.places: - place = Place(**config) - self.places[name] = place + self.places[name] = Place.from_pb2(place_pb2) if self.monitor: - print(f"Place {name} created: {place}") + print(f"Place {name} created: {place_pb2}") else: place = self.places[name] old = flat_dict(place.asdict()) - place.update(config) + place.update_from_pb2(place_pb2) new = flat_dict(place.asdict()) if self.monitor: print(f"Place {name} changed:") for k, v_old, v_new in diff_dict(old, new): print(f" {k}: {v_old} -> {v_new}") + async def on_place_deleted(self, name: str): + del self.places[name] + if self.monitor: + print(f"Place {name} deleted") + async def do_monitor(self): self.monitor = True - while True: - await asyncio.sleep(3600.0) + await self.stopping.wait() async def complete(self): if self.args.type == "resources": @@ -410,61 +487,62 @@ async def add_place(self): name = self.args.place if not name: raise UserError("missing place name. Set with -p or via env var $PLACE") - if name in self.places: - raise UserError(f"{name} already exists") - res = await self.call("org.labgrid.coordinator.add_place", name) - if not res: - raise ServerError(f"failed to add place {name}") - return res + + place = labgrid_coordinator_pb2.AddPlaceRequest(name=name) + try: + await self.stub.AddPlace(place) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) async def del_place(self): """Delete a place from the coordinator""" - pattern = self.args.place - if pattern not in self.places: - raise UserError("deletes require an exact place name") - place = self.places[pattern] - if place.acquired: - raise UserError(f"place {place.name} is not idle (acquired by {place.acquired})") - name = place.name - if not name: - raise UserError("missing place name. Set with -p or via env var $PLACE") - if name not in self.places: - raise UserError(f"{name} does not exist") - res = await self.call("org.labgrid.coordinator.del_place", name) - if not res: - raise ServerError(f"failed to delete place {name}") - return res + name = self.args.place + place = labgrid_coordinator_pb2.DeletePlaceRequest(name=name) + try: + await self.stub.DeletePlace(place) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) async def add_alias(self): """Add an alias for a place on the coordinator""" place = self.get_idle_place() alias = self.args.alias - if alias in place.aliases: - raise UserError(f"place {place.name} already has alias {alias}") - res = await self.call("org.labgrid.coordinator.add_place_alias", place.name, alias) - if not res: - raise ServerError(f"failed to add alias {alias} for place {place.name}") - return res + + request = labgrid_coordinator_pb2.AddPlaceAliasRequest(placename=place.name, alias=alias) + + try: + await self.stub.AddPlaceAlias(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) async def del_alias(self): """Delete an alias for a place from the coordinator""" place = self.get_idle_place() alias = self.args.alias - if alias not in place.aliases: - raise UserError(f"place {place.name} has no alias {alias}") - res = await self.call("org.labgrid.coordinator.del_place_alias", place.name, alias) - if not res: - raise ServerError(f"failed to delete alias {alias} for place {place.name}") - return res + + request = labgrid_coordinator_pb2.DeletePlaceAliasRequest(placename=place.name, alias=alias) + + try: + await self.stub.DeletePlaceAlias(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) async def set_comment(self): """Set the comment on a place""" place = self.get_place() comment = " ".join(self.args.comment) - res = await self.call("org.labgrid.coordinator.set_place_comment", place.name, comment) - if not res: - raise ServerError(f"failed to set comment {comment} for place {place.name}") - return res + + request = labgrid_coordinator_pb2.SetPlaceCommentRequest(placename=place.name, comment=comment) + + try: + await self.stub.SetPlaceComment(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) async def set_tags(self): """Set the tags on a place""" @@ -480,40 +558,60 @@ async def set_tags(self): if not TAG_VAL.match(v): raise UserError(f"tag value '{v}' needs to match the rexex '{TAG_VAL.pattern}'") tags[k] = v - res = await self.call("org.labgrid.coordinator.set_place_tags", place.name, tags) - if not res: - raise ServerError(f"failed to set tags {' '.join(self.args.tags)} for place {place.name}") - return res + + request = labgrid_coordinator_pb2.SetPlaceTagsRequest(placename=place.name, tags=tags) + + try: + await self.stub.SetPlaceTags(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) async def add_match(self): """Add a match for a place, making fuzzy matching available to the client""" place = self.get_idle_place() if place.acquired: - raise UserError(f"can not change acquired place {place.name}") + raise UserError(f"can not change acquired place {place.name}") # FIXME: done in coordinator? for pattern in self.args.patterns: if not 2 <= pattern.count("/") <= 3: - raise UserError(f"invalid pattern format '{pattern}' (use 'exporter/group/cls/name')") + raise UserError( + f"invalid pattern format '{pattern}' (use 'exporter/group/cls/name')" # FIXME: done in coordinator? + ) if place.hasmatch(pattern.split("/")): - print(f"pattern '{pattern}' exists, skipping", file=sys.stderr) + print(f"pattern '{pattern}' exists, skipping", file=sys.stderr) # FIXME: done in coordinator? continue - res = await self.call("org.labgrid.coordinator.add_place_match", place.name, pattern) - if not res: - raise ServerError(f"failed to add match {pattern} for place {place.name}") + + request = labgrid_coordinator_pb2.AddPlaceMatchRequest(placename=place.name, pattern=pattern) + + try: + await self.stub.AddPlaceMatch(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) async def del_match(self): """Delete a match for a place""" place = self.get_idle_place() if place.acquired: - raise UserError(f"can not change acquired place {place.name}") + raise UserError(f"can not change acquired place {place.name}") # FIXME: done in coordinator? for pattern in self.args.patterns: if not 2 <= pattern.count("/") <= 3: - raise UserError(f"invalid pattern format '{pattern}' (use 'exporter/group/cls/name')") + raise UserError( + f"invalid pattern format '{pattern}' (use 'exporter/group/cls/name')" # FIXME: done in coordinator? + ) if not place.hasmatch(pattern.split("/")): - print(f"pattern '{pattern}' not found, skipping", file=sys.stderr) - res = await self.call("org.labgrid.coordinator.del_place_match", place.name, pattern) - if not res: - raise ServerError(f"failed to delete match {pattern} for place {place.name}") + print(f"pattern '{pattern}' not found, skipping", file=sys.stderr) # FIXME: done in coordinator? + + request = labgrid_coordinator_pb2.DeletePlaceMatchRequest( + placename=place.name, pattern=pattern + ) # FIXME: rename? + + try: + await self.stub.DeletePlaceMatch(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) async def add_named_match(self): """Add a named match for a place. @@ -521,20 +619,29 @@ async def add_named_match(self): Fuzzy matching is not allowed to avoid accidental names conflicts.""" place = self.get_idle_place() if place.acquired: - raise UserError(f"can not change acquired place {place.name}") + raise UserError(f"can not change acquired place {place.name}") # FIXME: done in coordinator? pattern = self.args.pattern name = self.args.name if not 2 <= pattern.count("/") <= 3: - raise UserError(f"invalid pattern format '{pattern}' (use 'exporter/group/cls/name')") + raise UserError( + f"invalid pattern format '{pattern}' (use 'exporter/group/cls/name')" + ) # FIXME: done in coordinator? if place.hasmatch(pattern.split("/")): - raise UserError(f"pattern '{pattern}' exists") + raise UserError(f"pattern '{pattern}' exists") # FIXME: done in coordinator? if "*" in pattern: - raise UserError(f"invalid pattern '{pattern}' ('*' not allowed for named matches)") + raise UserError( + f"invalid pattern '{pattern}' ('*' not allowed for named matches)" + ) # FIXME: done in coordinator? if not name: - raise UserError(f"invalid name '{name}'") - res = await self.call("org.labgrid.coordinator.add_place_match", place.name, pattern, name) - if not res: - raise ServerError(f"failed to add match {pattern} for place {place.name}") + raise UserError(f"invalid name '{name}'") # FIXME: done in coordinator? + + request = labgrid_coordinator_pb2.AddPlaceMatchRequest(placename=place.name, pattern=pattern, rename=name) + + try: + await self.stub.AddPlaceMatch(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) def check_matches(self, place): resources = [] @@ -552,80 +659,90 @@ async def acquire(self): """Acquire a place, marking it unavailable for other clients""" place = self.get_place() if place.acquired: - raise UserError(f"place {place.name} is already acquired by {place.acquired}") + raise UserError( + f"place {place.name} is already acquired by {place.acquired}" + ) # FIXME: done in coordinator? if not self.args.allow_unmatched: self.check_matches(place) - res = await self.call("org.labgrid.coordinator.acquire_place", place.name) + request = labgrid_coordinator_pb2.AcquirePlaceRequest(placename=place.name) - if res: + try: + await self.stub.AcquirePlace(request) + await self.sync_with_coordinator() print(f"acquired place {place.name}") - return - - # check potential failure causes - for exporter, groups in sorted(self.resources.items()): - for group_name, group in sorted(groups.items()): - for resource_name, resource in sorted(group.items()): - resource_path = (exporter, group_name, resource.cls, resource_name) - if resource.acquired is None: - continue - match = place.getmatch(resource_path) - if match is None: - continue - name = resource_name - if match.rename: - name = match.rename - print( - f"Matching resource '{name}' ({exporter}/{group_name}/{resource.cls}/{resource_name}) already acquired by place '{resource.acquired}'" - ) # pylint: disable=line-too-long + except grpc.aio.AioRpcError as e: + # check potential failure causes # FIXME: done in coordinator? + for exporter, groups in sorted(self.resources.items()): + for group_name, group in sorted(groups.items()): + for resource_name, resource in sorted(group.items()): + resource_path = (exporter, group_name, resource.cls, resource_name) + if not resource.acquired: + continue + match = place.getmatch(resource_path) + if match is None: + continue + name = resource_name + if match.rename: + name = match.rename + print( + f"Matching resource '{name}' ({exporter}/{group_name}/{resource.cls}/{resource_name}) already acquired by place '{resource.acquired}'" + ) # pylint: disable=line-too-long - raise ServerError(f"failed to acquire place {place.name}") + raise ServerError(e.details()) async def release(self): """Release a previously acquired place""" place = self.get_place() + user = self.getuser() if not place.acquired: - raise UserError(f"place {place.name} is not acquired") + raise UserError(f"place {place.name} is not acquired") # FIXME: done in coordinator? _, user = place.acquired.split("/") if user != self.getuser(): if not self.args.kick: raise UserError( f"place {place.name} is acquired by a different user ({place.acquired}), use --kick if you are sure" - ) # pylint: disable=line-too-long + ) # pylint: disable=line-too-long # FIXME: done in coordinator? print(f"warning: kicking user ({place.acquired})") - res = await self.call("org.labgrid.coordinator.release_place", place.name) - if not res: - raise ServerError(f"failed to release place {place.name}") + + request = labgrid_coordinator_pb2.ReleasePlaceRequest(placename=place.name) + + try: + await self.stub.ReleasePlace(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) print(f"released place {place.name}") async def release_from(self): """Release a place, but only if acquired by a specific user""" place = self.get_place() - res = await self.call( - "org.labgrid.coordinator.release_place_from", - place.name, - self.args.acquired, - ) - if not res: - raise ServerError(f"failed to release place {place.name}") + + request = labgrid_coordinator_pb2.ReleasePlaceRequest(placename=place.name, fromuser=self.args.acquired) + + try: + await self.stub.ReleasePlace(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) print(f"{self.args.acquired} has released place {place.name}") async def allow(self): """Allow another use access to a previously acquired place""" place = self.get_place() - if not place.acquired: - raise UserError(f"place {place.name} is not acquired") - _, user = place.acquired.split("/") - if user != self.getuser(): - raise UserError(f"place {place.name} is acquired by a different user ({place.acquired})") + user = self.getuser() if "/" not in self.args.user: raise UserError(f"user {self.args.user} must be in / format") - res = await self.call("org.labgrid.coordinator.allow_place", place.name, self.args.user) - if not res: - raise ServerError(f"failed to allow {self.args.user} for place {place.name}") + request = labgrid_coordinator_pb2.AllowPlaceRequest(placename=place.name, user=user) + + try: + await self.stub.ReleasePlace(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) print(f"allowed {self.args.user} for place {place.name}") @@ -1277,14 +1394,32 @@ def write_image(self): raise UserError(e) async def create_reservation(self): - filters = " ".join(self.args.filters) prio = self.args.prio - res = await self.call("org.labgrid.coordinator.create_reservation", filters, prio=prio) - if res is None: - raise ServerError("failed to create reservation") - ((token, config),) = res.items() # we get a one-item dict - config = filter_dict(config, Reservation, warn=True) - res = Reservation(token=token, **config) + + fltr = {} + for pair in self.args.filters: + try: + k, v = pair.split("=") + except ValueError: + raise UserError(f"'{pair}' is not a vaild filter (must contain a '=')") + if not TAG_KEY.match(k): + raise UserError(f"Key '{k}' in filter '{pair}' is invalid") + if not TAG_KEY.match(v): + raise UserError(f"Value '{v}' in filter '{pair}' is invalid") + fltr[k] = v + + fltrs = { + "main": labgrid_coordinator_pb2.Reservation.Filter(filter=fltr), + } + + request = labgrid_coordinator_pb2.CreateReservationRequest(filters=fltrs, prio=prio) + + try: + response: labgrid_coordinator_pb2.CreateReservationResponse = await self.stub.CreateReservation(request) + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) + + res = Reservation.from_pb2(response.reservation) if self.args.shell: print(f"export LG_TOKEN={res.token}") else: @@ -1296,18 +1431,25 @@ async def create_reservation(self): await self._wait_reservation(res.token, verbose=False) async def cancel_reservation(self): - token = self.args.token - res = await self.call("org.labgrid.coordinator.cancel_reservation", token) - if not res: - raise ServerError(f"failed to cancel reservation {token}") + token: str = self.args.token + + request = labgrid_coordinator_pb2.CancelReservationRequest(token=token) - async def _wait_reservation(self, token, verbose=True): + try: + await self.stub.CancelReservation(request) + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) + + async def _wait_reservation(self, token: str, verbose=True): while True: - config = await self.call("org.labgrid.coordinator.poll_reservation", token) - if config is None: - raise ServerError("reservation not found") - config = filter_dict(config, Reservation, warn=True) - res = Reservation(token=token, **config) + request = labgrid_coordinator_pb2.PollReservationRequest(token=token) + + try: + response: labgrid_coordinator_pb2.PollReservationResponse = await self.stub.PollReservation(request) + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) + + res = Reservation.from_pb2(response.reservation) if verbose: res.show() if res.state is ReservationState.waiting: @@ -1320,10 +1462,15 @@ async def wait_reservation(self): await self._wait_reservation(token) async def print_reservations(self): - reservations = await self.call("org.labgrid.coordinator.get_reservations") - for token, config in sorted(reservations.items(), key=lambda x: (-x[1]["prio"], x[1]["created"])): # pylint: disable=line-too-long - config = filter_dict(config, Reservation, warn=True) - res = Reservation(token=token, **config) + request = labgrid_coordinator_pb2.GetReservationsRequest() + + try: + response: labgrid_coordinator_pb2.GetReservationsResponse = await self.stub.GetReservations(request) + reservations = [Reservation.from_pb2(x) for x in response.reservations] + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) + + for res in sorted(reservations, key=lambda x: (-x.prio, x.created)): print(f"Reservation '{res.token}':") res.show(level=1) @@ -1363,46 +1510,17 @@ def print_version(self): print(labgrid_version()) -def start_session(url, realm, extra): - from autobahn.asyncio.wamp import ApplicationRunner - +def start_session(url, extra, debug=False): loop = asyncio.get_event_loop() - ready = asyncio.Event() - - async def connected(session): # pylint: disable=unused-argument - ready.set() - - if not extra: - extra = {} - extra["loop"] = loop - extra["connected"] = connected - - session = [None] + if debug: + loop.set_debug(True) + session = None - def make(*args, **kwargs): - nonlocal session - session[0] = ClientSession(*args, **kwargs) - return session[0] + url = proxymanager.get_grpc_url(url, default_port=20408) - url = proxymanager.get_url(url, default_port=20408) - - runner = ApplicationRunner(url, realm=realm, extra=extra) - coro = runner.run(make, start_loop=False) - - _, protocol = loop.run_until_complete(coro) - - # there is no other notification when the WAMP connection setup times out, - # so we need to wait for one of these protocol futures to resolve - done, pending = loop.run_until_complete( - asyncio.wait({protocol.is_open, protocol.is_closed}, timeout=30, return_when=asyncio.FIRST_COMPLETED) - ) - if protocol.is_closed in done: - raise Error("connection closed during setup") - if protocol.is_open in pending: - raise Error("connection timed out during setup") - - loop.run_until_complete(ready.wait()) - return session[0] + session = ClientSession(url, loop, **extra) + loop.run_until_complete(session.start()) + return session def find_role_by_place(config, place): @@ -1489,10 +1607,10 @@ def main(): parser = argparse.ArgumentParser() parser.add_argument( "-x", - "--crossbar", + "--coordinator", metavar="URL", type=str, - help="crossbar websocket URL (default: value from env variable LG_CROSSBAR, otherwise ws://127.0.0.1:20408/ws)", + help="coordinator URL (default: value from env variable LG_COORDINATOR, otherwise 127.0.0.1:20408)", ) parser.add_argument("-c", "--config", type=str, default=os.environ.get("LG_ENV"), help="config file") parser.add_argument("-p", "--place", type=str, default=place, help="place name/alias") @@ -1898,20 +2016,15 @@ def main(): signal.signal(signal.SIGTERM, lambda *_: sys.exit(0)) try: - crossbar_url = args.crossbar or env.config.get_option("crossbar_url") - except (AttributeError, KeyError): - # in case of no env or not set, use LG_CROSSBAR env variable or default - crossbar_url = os.environ.get("LG_CROSSBAR", "ws://127.0.0.1:20408/ws") - - try: - crossbar_realm = env.config.get_option("crossbar_realm") + coordinator_url = args.coordinator or env.config.get_option("") except (AttributeError, KeyError): - # in case of no env, use LG_CROSSBAR_REALM env variable or default - crossbar_realm = os.environ.get("LG_CROSSBAR_REALM", "realm1") + # in case of no env or not set, use LG_COORDINATOR env variable or default + coordinator_url = os.environ.get("LG_COORDINATOR", "127.0.0.1:20408") - logging.debug('Starting session with "%s", realm: "%s"', crossbar_url, crossbar_realm) + logging.debug('Starting session with "%s"', coordinator_url) + session = start_session(coordinator_url, extra, args.debug) + logging.debug("Started session") - session = start_session(crossbar_url, crossbar_realm, extra) try: if asyncio.iscoroutinefunction(args.func): if getattr(args.func, "needs_target", False): @@ -1924,6 +2037,9 @@ def main(): else: args.func(session) finally: + logging.debug("Stopping session") + session.loop.run_until_complete(session.stop()) + logging.debug("Stopping loop") session.loop.close() except (NoResourceFoundError, NoDriverFoundError, InvalidConfigError) as e: if args.debug: @@ -1953,8 +2069,8 @@ def main(): ) # pylint: disable=line-too-long exitcode = 1 - except ConnectionError as e: - print(f"Could not connect to coordinator: {e}", file=sys.stderr) + except ServerError as e: + print(f"Server error: {e}", file=sys.stderr) exitcode = 1 except InteractiveCommandError as e: if args.debug: diff --git a/labgrid/remote/common.py b/labgrid/remote/common.py index 2ea1d2f1a..0bd7e489f 100644 --- a/labgrid/remote/common.py +++ b/labgrid/remote/common.py @@ -1,14 +1,17 @@ -import socket +import asyncio import time import enum import random import re import string +import logging from datetime import datetime from fnmatch import fnmatchcase import attr +from .generated import labgrid_coordinator_pb2 + __all__ = [ "TAG_KEY", "TAG_VAL", @@ -17,19 +20,50 @@ "Place", "ReservationState", "Reservation", - "enable_tcp_nodelay", - "monkey_patch_max_msg_payload_size_ws_option", ] TAG_KEY = re.compile(r"[a-z][a-z0-9_]+") TAG_VAL = re.compile(r"[a-z0-9_]?") +def set_map_from_dict(m, d): + for k, v in d.items(): + assert isinstance(k, str) + if v is None: + m[k].Clear() + elif isinstance(v, bool): + m[k].bool_value = v + elif isinstance(v, int): + if v < 0: + m[k].int_value = v + else: + m[k].uint_value = v + elif isinstance(v, float): + m[k].float_value = v + elif isinstance(v, str): + m[k].string_value = v + else: + raise ValueError(f"cannot translate {repr(v)} to MapValue") + + +def build_dict_from_map(m): + d = {} + for k, v in m.items(): + v: labgrid_coordinator_pb2.MapValue + kind = v.WhichOneof("kind") + if kind is None: + d[k] = None + else: + d[k] = getattr(v, kind) + return d + + @attr.s(eq=False) class ResourceEntry: data = attr.ib() # cls, params def __attrs_post_init__(self): + assert isinstance(self.data, dict) self.data.setdefault("acquired", None) self.data.setdefault("avail", False) @@ -84,6 +118,35 @@ def release(self): # ignore repeated releases self.data["acquired"] = None + def as_pb2(self): + msg = labgrid_coordinator_pb2.Resource() + msg.cls = self.cls + params = self.params.copy() + extra = params.pop("extra", {}) + set_map_from_dict(msg.params, params) + set_map_from_dict(msg.extra, extra) + if self.acquired is not None: + msg.acquired = self.acquired + msg.avail = self.avail + return msg + + @staticmethod + def data_from_pb2(pb2): + assert isinstance(pb2, labgrid_coordinator_pb2.Resource) + data = { + "cls": pb2.cls, + "params": build_dict_from_map(pb2.params), + "acquired": pb2.acquired, + "avail": pb2.avail, + } + data["params"]["extra"] = build_dict_from_map(pb2.extra) + return data + + @classmethod + def from_pb2(cls, pb2): + assert isinstance(pb2, labgrid_coordinator_pb2.Place) + return cls(cls.data_from_pb2(pb2)) + @attr.s(eq=True, repr=False, str=False) # This class requires eq=True, since we put the matches into a list and require @@ -133,6 +196,26 @@ def ismatch(self, resource_path): return True + def as_pb2(self): + return labgrid_coordinator_pb2.ResourceMatch( + exporter=self.exporter, + group=self.group, + cls=self.cls, + name=self.name, + rename=self.rename, + ) + + @classmethod + def from_pb2(cls, pb2): + assert isinstance(pb2, labgrid_coordinator_pb2.ResourceMatch) + return cls( + exporter=pb2.exporter, + group=pb2.group, + cls=pb2.cls, + name=pb2.name if pb2.HasField("name") else None, + rename=pb2.rename, + ) + @attr.s(eq=False) class Place: @@ -170,14 +253,20 @@ def asdict(self): "reservation": self.reservation, } - def update(self, config): + def update_from_pb2(self, place_pb2): + # FIXME untangle this... + place = Place.from_pb2(place_pb2) fields = attr.fields_dict(type(self)) - for k, v in config.items(): + for k, v in place.asdict().items(): + print(f"{k}: {v}") assert k in fields if k == "name": # we cannot rename places assert v == self.name continue + if k == "matches": + self.matches = [ResourceMatch.from_pb2(m) for m in place_pb2.matches] + continue setattr(self, k, v) def show(self, level=0): @@ -241,6 +330,57 @@ def unmatched(self, resource_paths): def touch(self): self.changed = time.time() + def as_pb2(self): + try: + acquired_resources = [] + for resource in self.acquired_resources: + assert not isinstance(resource, (tuple, list)), "as_pb2() only implemented for coordinator" + assert len(resource.path) == 4 + path = "/".join(resource.path) + acquired_resources.append(path) + + place = labgrid_coordinator_pb2.Place() + place.name = self.name + place.aliases.extend(self.aliases) + place.comment = self.comment + place.matches.extend(m.as_pb2() for m in self.matches) + if self.acquired: + place.acquired = self.acquired + place.acquired_resources.extend(acquired_resources) + place.allowed.extend(self.allowed) + place.changed = self.changed + place.created = self.created + if self.reservation: + place.reservation = self.reservation + for key, value in self.tags.items(): + place.tags[key] = value + return place + except TypeError: + logging.exception("failed to convert place %s to protobuf", self) + raise + + @classmethod + def from_pb2(cls, pb2): + assert isinstance(pb2, labgrid_coordinator_pb2.Place) + acquired_resources = [] + for path in pb2.acquired_resources: + path = path.split("/") + assert len(path) == 4 + acquired_resources.append(path) + return cls( + name=pb2.name, + aliases=pb2.aliases, + comment=pb2.comment, + tags=dict(pb2.tags), + matches=[ResourceMatch.from_pb2(m) for m in pb2.matches], + acquired=pb2.acquired if pb2.HasField("acquired") else None, + acquired_resources=acquired_resources, + allowed=pb2.allowed, + created=pb2.created, + changed=pb2.changed, + reservation=pb2.reservation if pb2.HasField("reservation") else None, + ) + class ReservationState(enum.Enum): waiting = 0 @@ -304,44 +444,58 @@ def show(self, level=0): print(indent + f"created: {datetime.fromtimestamp(self.created)}") print(indent + f"timeout: {datetime.fromtimestamp(self.timeout)}") + def as_pb2(self): + res = labgrid_coordinator_pb2.Reservation() + res.owner = self.owner + res.token = self.token + res.state = self.state.value + res.prio = self.prio + for name, fltr in self.filters.items(): + res.filters[name].CopyFrom(labgrid_coordinator_pb2.Reservation.Filter(filter=fltr)) + if self.allocations: + # TODO: refactor to have only one place per filter group + assert len(self.allocations) == 1 + assert "main" in self.allocations + allocation = self.allocations["main"] + assert len(allocation) == 1 + res.allocations.update({"main": allocation[0]}) + res.created = self.created + res.timeout = self.timeout + return res -def enable_tcp_nodelay(session): - """ - asyncio/autobahn does not set TCP_NODELAY by default, so we need to do it - like this for now. - """ - s = session._transport.transport.get_extra_info("socket") - s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True) - - -def monkey_patch_max_msg_payload_size_ws_option(): - """ - The default maxMessagePayloadSize in autobahn is 1M. For larger setups with a big number of - exported resources, this becomes the limiting factor. - Increase maxMessagePayloadSize in WampWebSocketClientFactory.setProtocolOptions() by monkey - patching it, so autobahn.asyncio.wamp.ApplicationRunner effectively sets the increased value. - - This function must be called before ApplicationRunner is instanciated. - """ - from autobahn.asyncio.websocket import WampWebSocketClientFactory - - original_method = WampWebSocketClientFactory.setProtocolOptions - - def set_protocol_options(*args, **kwargs): - new_max_message_payload_size = 10485760 - - # maxMessagePayloadSize given as positional arg - args = list(args) - try: - args[9] = max((args[9], new_max_message_payload_size)) - except IndexError: - pass - - # maxMessagePayloadSize given as kwarg - kwarg_name = "maxMessagePayloadSize" - if kwarg_name in kwargs and kwargs[kwarg_name] is not None: - kwargs[kwarg_name] = max((kwargs[kwarg_name], new_max_message_payload_size)) - - return original_method(*args, **kwargs) - - WampWebSocketClientFactory.setProtocolOptions = set_protocol_options + @classmethod + def from_pb2(cls, pb2: labgrid_coordinator_pb2.Reservation): + filters = {} + for name, fltr_pb2 in pb2.filters.items(): + filters[name] = dict(fltr_pb2.filter) + allocations = {} + for fltr_name, place_name in pb2.allocations.items(): + allocations[fltr_name] = [place_name] + return cls( + owner=pb2.owner, + token=pb2.token, + state=ReservationState(pb2.state), + prio=pb2.prio, + filters=filters, + allocations=allocations, + created=pb2.created, + timeout=pb2.timeout, + ) + + +async def queue_as_aiter(q): + try: + while True: + try: + item = await q.get() + except asyncio.CancelledError: + # gRPC doesn't like to receive exceptions from the request_iterator + return + if item is None: + return + yield item + q.task_done() + logging.debug("sent message %s", item) + except Exception: + logging.exception("error in queue_as_aiter") + raise diff --git a/labgrid/remote/coordinator.py b/labgrid/remote/coordinator.py index e3ba8210f..6ab4a2380 100644 --- a/labgrid/remote/coordinator.py +++ b/labgrid/remote/coordinator.py @@ -1,26 +1,30 @@ -"""The coordinator module coordinates exported resources and clients accessing them.""" - -# pylint: disable=no-member,unused-argument +#!/usr/bin/env python3 +import argparse +import logging +import os import asyncio -import sys import traceback -from collections import defaultdict -from os import environ -from pprint import pprint from enum import Enum from functools import wraps import attr -from autobahn import wamp -from autobahn.asyncio.wamp import ApplicationRunner, ApplicationSession -from autobahn.wamp.types import RegisterOptions - -from .common import * # pylint: disable=wildcard-import +import grpc +from grpc_reflection.v1alpha import reflection + +from .common import ( + ResourceEntry, + ResourceMatch, + Place, + Reservation, + ReservationState, + queue_as_aiter, + TAG_KEY, + TAG_VAL, +) from .scheduler import TagSet, schedule -from ..util import atomic_replace, yaml - - -monkey_patch_max_msg_payload_size_ws_option() +from .generated import labgrid_coordinator_pb2 +from .generated import labgrid_coordinator_pb2_grpc +from ..util import atomic_replace, labgrid_version, yaml class Action(Enum): @@ -34,19 +38,10 @@ class RemoteSession: """class encapsulating a session, used by ExporterSession and ClientSession""" coordinator = attr.ib() - session = attr.ib() - authid = attr.ib() - version = attr.ib(default="unknown", init=False) - - @property - def key(self): - """Key of the session""" - return self.session - - @property - def name(self): - """Name of the session""" - return self.authid.split("/", 1)[1] + peer = attr.ib() + name = attr.ib() + queue = attr.ib() + version = attr.ib() @attr.s(eq=False) @@ -56,26 +51,41 @@ class ExporterSession(RemoteSession): groups = attr.ib(default=attr.Factory(dict), init=False) - def set_resource(self, groupname, resourcename, resourcedata): + def set_resource(self, groupname, resourcename, resource): + """This is called when Exporters update resources or when they disconnect.""" + logging.info("set_resource %s %s %s", groupname, resourcename, resource) group = self.groups.setdefault(groupname, {}) old = group.get(resourcename) - if resourcedata and old: - old.update(resourcedata) - new = old - elif resourcedata and not old: - new = group[resourcename] = ResourceImport( - resourcedata, path=(self.name, groupname, resourcedata["cls"], resourcename) + if resource is not None: + new = ResourceImport( + data=ResourceImport.data_from_pb2(resource), path=(self.name, groupname, resource.cls, resourcename) ) - elif not resourcedata and old: - new = None - del group[resourcename] + if old: + old.data.update(new.data) + new = old + else: + group[resourcename] = new else: - assert not resourcedata and not old new = None + try: + del group[resourcename] + except KeyError: + pass - self.coordinator.publish( - "org.labgrid.coordinator.resource_changed", self.name, groupname, resourcename, new.asdict() if new else {} - ) + msg = labgrid_coordinator_pb2.ClientOutMessage() + update = msg.updates.add() + if new: + update.resource.CopyFrom(new.as_pb2()) + update.resource.path.exporter_name = self.name + update.resource.path.group_name = groupname + update.resource.path.resource_name = resourcename + else: + update.del_resource.exporter_name = self.name + update.del_resource.group_name = groupname + update.del_resource.resource_name = resourcename + + for client in self.coordinator.clients.values(): + client.queue.put_nowait(msg) if old and new: assert old is new @@ -99,7 +109,38 @@ def get_resources(self): @attr.s(eq=False) class ClientSession(RemoteSession): - pass + def subscribe_places(self): + # send initial places + out_msg = labgrid_coordinator_pb2.ClientOutMessage() + for place in self.coordinator.places.values(): + place: Place + out_msg.updates.add().place.CopyFrom(place.as_pb2()) + self.queue.put_nowait(out_msg) + + def subscribe_resources(self): + # collect initial resources + collected = [] + logging.debug("sending resources to %s", self) + for exporter in self.coordinator.exporters.values(): + logging.debug("sending resources %s", exporter) + exporter: ExporterSession + for groupname, group in exporter.groups.items(): + logging.debug("sending resources %s", groupname) + for resourcename, resource in group.items(): + logging.debug("sending resources %s", resourcename) + resource: ResourceImport + update = labgrid_coordinator_pb2.UpdateResponse() + update.resource.CopyFrom(resource.as_pb2()) + update.resource.path.exporter_name = exporter.name + update.resource.path.group_name = groupname + update.resource.path.resource_name = resourcename + collected.append(update) + # send batches + while collected: + batch, collected = collected[:100], collected[100:] + out_msg = labgrid_coordinator_pb2.ClientOutMessage() + out_msg.updates.extend(batch) + self.queue.put_nowait(out_msg) @attr.s(eq=False) @@ -121,135 +162,42 @@ async def wrapper(self, *args, **kwargs): return wrapper -class CoordinatorComponent(ApplicationSession): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.lock = asyncio.Lock() +class ExporterCommand: + def __init__(self, request) -> None: + self.request = request + self.response = None + self.completed = asyncio.Event() - @locked - async def onConnect(self): - self.sessions = {} + def complete(self, response) -> None: + self.response = response + self.completed.set() + + async def wait(self): + await asyncio.wait_for(self.completed.wait(), 10) + + +class ExporterError(Exception): + pass + + +class Coordinator(labgrid_coordinator_pb2_grpc.CoordinatorServicer): + def __init__(self) -> None: self.places = {} self.reservations = {} self.poll_task = None self.save_scheduled = False + self.lock = asyncio.Lock() + self.exporters: dict[str, ExporterSession] = {} + self.clients: dict[str, ClientSession] = {} self.load() - self.save_later() - - enable_tcp_nodelay(self) - self.join( - self.config.realm, - authmethods=["anonymous"], - authid="coordinator", - authextra={"authid": "coordinator"}, - ) - - @locked - async def onJoin(self, details): - await self.subscribe(self.on_session_join, "wamp.session.on_join") - await self.subscribe(self.on_session_leave, "wamp.session.on_leave") - await self.register( - self.attach, "org.labgrid.coordinator.attach", options=RegisterOptions(details_arg="details") - ) - - # resources - await self.register( - self.set_resource, "org.labgrid.coordinator.set_resource", options=RegisterOptions(details_arg="details") - ) - await self.register(self.get_resources, "org.labgrid.coordinator.get_resources") - - # places - await self.register(self.add_place, "org.labgrid.coordinator.add_place") - await self.register(self.del_place, "org.labgrid.coordinator.del_place") - await self.register(self.add_place_alias, "org.labgrid.coordinator.add_place_alias") - await self.register(self.del_place_alias, "org.labgrid.coordinator.del_place_alias") - await self.register(self.set_place_tags, "org.labgrid.coordinator.set_place_tags") - await self.register(self.set_place_comment, "org.labgrid.coordinator.set_place_comment") - await self.register(self.add_place_match, "org.labgrid.coordinator.add_place_match") - await self.register(self.del_place_match, "org.labgrid.coordinator.del_place_match") - await self.register( - self.acquire_place, "org.labgrid.coordinator.acquire_place", options=RegisterOptions(details_arg="details") - ) - await self.register( - self.release_place, "org.labgrid.coordinator.release_place", options=RegisterOptions(details_arg="details") - ) - await self.register( - self.release_place_from, - "org.labgrid.coordinator.release_place_from", - options=RegisterOptions(details_arg="details"), - ) - await self.register( - self.allow_place, "org.labgrid.coordinator.allow_place", options=RegisterOptions(details_arg="details") - ) - await self.register(self.get_places, "org.labgrid.coordinator.get_places") - - # reservations - await self.register( - self.create_reservation, - "org.labgrid.coordinator.create_reservation", - options=RegisterOptions(details_arg="details"), - ) - await self.register( - self.cancel_reservation, - "org.labgrid.coordinator.cancel_reservation", - ) - await self.register( - self.poll_reservation, - "org.labgrid.coordinator.poll_reservation", - ) - await self.register( - self.get_reservations, - "org.labgrid.coordinator.get_reservations", - ) self.poll_task = asyncio.get_event_loop().create_task(self.poll()) - print("Coordinator ready.") - - @locked - async def onLeave(self, details): - await self.save() - if self.poll_task: - self.poll_task.cancel() - await asyncio.wait([self.poll_task]) - super().onLeave(details) - - @locked - async def onDisconnect(self): - await self.save() - if self.poll_task: - self.poll_task.cancel() - await asyncio.wait([self.poll_task]) - await asyncio.sleep(0.5) # give others a chance to clean up - async def _poll_step(self): # save changes if self.save_scheduled: await self.save() - # poll exporters - for session in list(self.sessions.values()): - if isinstance(session, ExporterSession): - fut = self.call(f"org.labgrid.exporter.{session.name}.version") - done, _ = await asyncio.wait([fut], timeout=5) - if not done: - print(f"kicking exporter ({session.key}/{session.name})") - await self.call("wamp.session.kill", session.key, message="timeout detected by coordinator") - print(f"cleaning up exporter ({session.key}/{session.name})") - await self.on_session_leave(session.key) - print(f"removed exporter ({session.key}/{session.name})") - continue - try: - session.version = done.pop().result() - except wamp.exception.ApplicationError as e: - if e.error == "wamp.error.no_such_procedure": - pass # old client - elif e.error == "wamp.error.canceled": - pass # disconnected - elif e.error == "wamp.error.no_such_session": - pass # client has already disconnected - else: - raise # update reservations self.schedule_reservations() @@ -265,9 +213,17 @@ async def poll(self): traceback.print_exc() def save_later(self): + logging.debug("Setting Save-later") self.save_scheduled = True + def _get_resources(self): + result = {} + for session in self.exporters.values(): + result[session.name] = session.get_resources() + return result + async def save(self): + logging.debug("Running Save") self.save_scheduled = False resources = self._get_resources() @@ -278,7 +234,9 @@ async def save(self): places = places.encode() loop = asyncio.get_event_loop() + logging.debug("Awaiting resources") await loop.run_in_executor(None, atomic_replace, "resources.yaml", resources) + logging.debug("Awaiting places") await loop.run_in_executor(None, atomic_replace, "places.yaml", places) def load(self): @@ -302,6 +260,59 @@ def load(self): self.places[placename] = place except FileNotFoundError: pass + logging.info("loaded %s place(s)", len(self.places)) + + async def ClientStream(self, request_iterator, context): + peer = context.peer() + logging.info("client connected: %s", peer) + assert peer not in self.clients + out_msg_queue = asyncio.Queue() + + async def request_task(): + name = None + version = None + try: + async for in_msg in request_iterator: + in_msg: labgrid_coordinator_pb2.ClientInMessage + logging.debug("client in_msg %s", in_msg) + kind = in_msg.WhichOneof("kind") + if kind == "sync": + out_msg = labgrid_coordinator_pb2.ClientOutMessage() + out_msg.sync.id = in_msg.sync.id + out_msg_queue.put_nowait(out_msg) + elif kind == "startup": + version = in_msg.startup.version + name = in_msg.startup.name + session = self.clients[peer] = ClientSession(self, peer, name, out_msg_queue, version) + logging.debug("Received startup from %s with %s", name, version) + elif kind == "subscribe": + if in_msg.subscribe.all_places: + session.subscribe_places() + if in_msg.subscribe.all_resources: + session.subscribe_resources() + else: + logging.warning("received unknown kind %s from client %s (version %s)", kind, name, version) + logging.debug("client request_task done: %s", context.done()) + except Exception: + logging.exception("error in client message handler") + + runnning_request_task = asyncio.get_event_loop().create_task(request_task()) + + try: + async for out_msg in queue_as_aiter(out_msg_queue): + out_msg: labgrid_coordinator_pb2.ClientOutMessage + logging.debug("client output %s", out_msg) + yield out_msg + finally: + try: + session = self.clients.pop(peer) + except KeyError: + logging.info("Never received startup from peer %s that disconnected", peer) + return + + runnning_request_task.cancel() + await runnning_request_task + logging.debug("client aborted %s, cancelled: %s", session, context.cancelled()) def _add_default_place(self, name): if name in self.places: @@ -313,6 +324,11 @@ def _add_default_place(self, name): place.matches.append(ResourceMatch(exporter="*", group=name, cls="*")) self.places[name] = place + def get_exporter_by_name(self, name): + for exporter in self.exporters.values(): + if exporter.name == name: + return exporter + async def _update_acquired_places(self, action, resource, callback=True): """Update acquired places when resources are added or removed.""" if action not in [Action.ADD, Action.DEL]: @@ -340,151 +356,178 @@ async def _update_acquired_places(self, action, resource, callback=True): self._publish_place(place) def _publish_place(self, place): - self.publish("org.labgrid.coordinator.place_changed", place.name, place.asdict()) + msg = labgrid_coordinator_pb2.ClientOutMessage() + msg.updates.add().place.CopyFrom(place.as_pb2()) + + for client in self.clients.values(): + client.queue.put_nowait(msg) + + def _publish_resource(self, resource: ResourceImport): + msg = labgrid_coordinator_pb2.ClientOutMessage() + update = msg.updates.add() + update.resource.CopyFrom(resource.as_pb2()) + update.resource.path.exporter_name = resource.path[0] + update.resource.path.group_name = resource.path[1] + update.resource.path.resource_name = resource.path[3] + + for client in self.clients.values(): + client.queue.put_nowait(msg) + + async def ExporterStream(self, request_iterator, context): + peer = context.peer() + logging.info("exporter connected: %s", peer) + assert peer not in self.exporters + command_queue = asyncio.Queue() + pending_commands = [] + + out_msg = labgrid_coordinator_pb2.ExporterOutMessage() + out_msg.hello.version = labgrid_version() + yield out_msg + + async def request_task(): + name = None + version = None + try: + async for in_msg in request_iterator: + in_msg: labgrid_coordinator_pb2.ExporterInMessage + logging.debug("exporter in_msg %s", in_msg) + kind = in_msg.WhichOneof("kind") + if kind in "response": + cmd = pending_commands.pop(0) + cmd.complete(in_msg.response) + logging.debug("Command %s is done", cmd) + elif kind == "startup": + version = in_msg.startup.version + name = in_msg.startup.name + session = self.exporters[peer] = ExporterSession(self, peer, name, command_queue, version) + logging.debug("Exporters: %s", self.exporters) + logging.debug("Received startup from %s with %s", name, version) + elif kind == "resource": + logging.debug("Received resource from %s with %s", name, in_msg.resource) + action, resource = session.set_resource( + in_msg.resource.path.group_name, in_msg.resource.path.resource_name, in_msg.resource + ) + if action is Action.ADD: + async with self.lock: + self._add_default_place(in_msg.resource.path.group_name) + if action in (Action.ADD, Action.DEL): + async with self.lock: + await self._update_acquired_places(action, resource) + self.save_later() + else: + logging.warning("received unknown kind %s from exporter %s (version %s)", kind, name, version) - def _publish_resource(self, resource): - self.publish( - "org.labgrid.coordinator.resource_changed", - resource.path[0], # exporter name - resource.path[1], # group name - resource.path[3], # resource name - resource.asdict(), - ) + logging.debug("exporter request_task done: %s", context.done()) + except Exception: + logging.exception("error in exporter message handler") - @locked - async def on_session_join(self, session_details): - print("join") - pprint(session_details) - session = session_details["session"] - authid = session_details["authextra"].get("authid") or session_details["authid"] - if authid.startswith("client/"): - session = ClientSession(self, session, authid) - elif authid.startswith("exporter/"): - session = ExporterSession(self, session, authid) - else: - return - self.sessions[session.key] = session + runnning_request_task = asyncio.get_event_loop().create_task(request_task()) - @locked - async def on_session_leave(self, session_id): - print(f"leave ({session_id})") try: - session = self.sessions.pop(session_id) - except KeyError: - return - if isinstance(session, ExporterSession): + async for cmd in queue_as_aiter(command_queue): + logging.debug("exporter cmd %s", cmd) + out_msg = labgrid_coordinator_pb2.ExporterOutMessage() + out_msg.set_acquired_request.CopyFrom(cmd.request) + pending_commands.append(cmd) + yield out_msg + except asyncio.exceptions.CancelledError: + logging.info("exporter disconnected %s", context.peer()) + except Exception: + logging.exception("error in exporter command handler") + finally: + runnning_request_task.cancel() + await runnning_request_task + + try: + session = self.exporters.pop(peer) + except KeyError: + logging.info("Never received startup from peer %s that disconnected", peer) + return + for groupname, group in session.groups.items(): for resourcename in group.copy(): - action, resource = session.set_resource(groupname, resourcename, {}) + action, resource = session.set_resource(groupname, resourcename, None) await self._update_acquired_places(action, resource, callback=False) - self.save_later() - @locked - async def attach(self, name, details=None): - # TODO check if name is in use - session = self.sessions[details.caller] - session_details = self.sessions[session] - session_details["name"] = name - self.exporters[name] = defaultdict(dict) - - # not @locked because set_resource my be triggered by a acquire() call to - # an exporter, leading to a deadlock on acquire_place() - async def set_resource(self, groupname, resourcename, resourcedata, details=None): - """Called by exporter to create/update/remove resources.""" - session = self.sessions.get(details.caller) - if session is None: - return - assert isinstance(session, ExporterSession) - - groupname = str(groupname) - resourcename = str(resourcename) - # TODO check if acquired - print(details) - pprint(resourcedata) - action, resource = session.set_resource(groupname, resourcename, resourcedata) - if action is Action.ADD: - async with self.lock: - self._add_default_place(groupname) - if action in (Action.ADD, Action.DEL): - async with self.lock: - await self._update_acquired_places(action, resource) - self.save_later() - - def _get_resources(self): - result = {} - for session in self.sessions.values(): - if isinstance(session, ExporterSession): - result[session.name] = session.get_resources() - return result + logging.debug("exporter aborted %s, cancelled: %s", context.peer(), context.cancelled()) @locked - async def get_resources(self, details=None): - return self._get_resources() - - @locked - async def add_place(self, name, details=None): + async def AddPlace(self, request, context): + name = request.name if not name or not isinstance(name, str): - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, "name was not a string") if name in self.places: - return False + await context.abort(grpc.StatusCode.ALREADY_EXISTS, f"Place {name} already exists") + logging.debug("Adding %s", name) place = Place(name) self.places[name] = place self._publish_place(place) self.save_later() - return True + return labgrid_coordinator_pb2.AddPlaceResponse() @locked - async def del_place(self, name, details=None): + async def DeletePlace(self, request, context): + name = request.name if not name or not isinstance(name, str): - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, "name was not a string") if name not in self.places: - return False + await context.abort(grpc.StatusCode.ALREADY_EXISTS, f"Place {name} does not exist") + logging.debug("Deleting %s", name) del self.places[name] - self.publish("org.labgrid.coordinator.place_changed", name, {}) + msg = labgrid_coordinator_pb2.ClientOutMessage() + msg.updates.add().del_place = name + for client in self.clients.values(): + client.queue.put_nowait(msg) self.save_later() - return True + return labgrid_coordinator_pb2.DeletePlaceResponse() @locked - async def add_place_alias(self, placename, alias, details=None): + async def AddPlaceAlias(self, request, context): + placename = request.placename + alias = request.alias try: place = self.places[placename] except KeyError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {placename} does not exist") place.aliases.add(alias) place.touch() self._publish_place(place) self.save_later() - return True + return labgrid_coordinator_pb2.AddPlaceAliasResponse() @locked - async def del_place_alias(self, placename, alias, details=None): + async def DeletePlaceAlias(self, request, context): + placename = request.placename + alias = request.alias try: place = self.places[placename] except KeyError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {placename} does not exist") try: place.aliases.remove(alias) except ValueError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Failed to remove {alias} from {placename}") place.touch() self._publish_place(place) self.save_later() - return True + return labgrid_coordinator_pb2.DeletePlaceAliasResponse() @locked - async def set_place_tags(self, placename, tags, details=None): + async def SetPlaceTags(self, request, context): + placename = request.placename + tags = dict(request.tags) try: place = self.places[placename] except KeyError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {placename} does not exist") assert isinstance(tags, dict) for k, v in tags.items(): assert isinstance(k, str) assert isinstance(v, str) if not TAG_KEY.match(k): - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Key {k} in {tags} is invalid") if not TAG_VAL.match(v): - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Value {v} in {tags} is invalid") for k, v in tags.items(): if not v: try: @@ -496,50 +539,60 @@ async def set_place_tags(self, placename, tags, details=None): place.touch() self._publish_place(place) self.save_later() - return True + return labgrid_coordinator_pb2.SetPlaceTagsResponse() @locked - async def set_place_comment(self, placename, comment, details=None): + async def SetPlaceComment(self, request, context): + placename = request.placename + comment = request.comment try: place = self.places[placename] except KeyError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {placename} does not exist") place.comment = comment place.touch() self._publish_place(place) self.save_later() - return True + return labgrid_coordinator_pb2.SetPlaceCommentResponse() @locked - async def add_place_match(self, placename, pattern, rename=None, details=None): + async def AddPlaceMatch(self, request, context): + placename = request.placename + pattern = request.pattern + rename = request.rename if request.HasField("rename") else None try: place = self.places[placename] except KeyError: - return False - match = ResourceMatch(*pattern.split("/"), rename=rename) - if match in place.matches: - return False - place.matches.append(match) + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {placename} does not exist") + rm = ResourceMatch(*pattern.split("/"), rename=rename) + if rm in place.matches: + await context.abort( + grpc.StatusCode.FAILED_PRECONDITION, f"Match {rm} already exists" + ) # FIXME: functional change, previously client skipped match while complaining + place.matches.append(rm) place.touch() self._publish_place(place) self.save_later() - return True + return labgrid_coordinator_pb2.AddPlaceMatchResponse() @locked - async def del_place_match(self, placename, pattern, rename=None, details=None): + async def DeletePlaceMatch(self, request, context): + placename = request.placename + pattern = request.pattern + rename = request.rename if request.HasField("rename") else None try: place = self.places[placename] except KeyError: - return False - match = ResourceMatch(*pattern.split("/"), rename=rename) + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {placename} does not exist") + rm = ResourceMatch(*pattern.split("/"), rename=rename) try: - place.matches.remove(match) + place.matches.remove(rm) except ValueError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Match {rm} does not exist in {placename}") place.touch() self._publish_place(place) self.save_later() - return True + return labgrid_coordinator_pb2.DeletePlaceMatchResponse() async def _acquire_resources(self, place, resources): resources = resources.copy() # we may modify the list @@ -554,12 +607,18 @@ async def _acquire_resources(self, place, resources): for resource in resources: # this triggers an update from the exporter which is published # to the clients - await self.call( - f"org.labgrid.exporter.{resource.path[0]}.acquire", resource.path[1], resource.path[3], place.name - ) + request = labgrid_coordinator_pb2.ExporterSetAcquiredRequest() + request.group_name = resource.path[1] + request.resource_name = resource.path[3] + request.place_name = place.name + cmd = ExporterCommand(request) + self.get_exporter_by_name(resource.path[0]).queue.put_nowait(cmd) + await cmd.wait() + if not cmd.response.success: + raise ExporterError("failed to acquire {resource}") acquired.append(resource) - except: - print(f"failed to acquire {resource}", file=sys.stderr) + except Exception: + logging.exception("failed to acquire %s", resource) # cleanup await self._release_resources(place, acquired) return False @@ -583,37 +642,48 @@ async def _release_resources(self, place, resources, callback=True): # this triggers an update from the exporter which is published # to the clients if callback: - await self.call( - f"org.labgrid.exporter.{resource.path[0]}.release", resource.path[1], resource.path[3] - ) - except: - print(f"failed to release {resource}", file=sys.stderr) + request = labgrid_coordinator_pb2.ExporterSetAcquiredRequest() + request.group_name = resource.path[1] + request.resource_name = resource.path[3] + # request.place_name is left unset to indicate release + cmd = ExporterCommand(request) + self.get_exporter_by_name(resource.path[0]).queue.put_nowait(cmd) + await cmd.wait() + if not cmd.response.success: + raise ExporterError(f"failed to release {resource}") + except (ExporterError, TimeoutError): + logging.exception("failed to release %s", resource) # at leaset try to notify the clients try: self._publish_resource(resource) except: - pass + logging.exception("failed to publish released resource %s", resource) @locked - async def acquire_place(self, name, details=None): - print(details) + async def AcquirePlace(self, request, context): + peer = context.peer() + name = request.placename + try: + username = self.clients[peer].name + except KeyError: + await context.abort(grpc.StatusCode.FAILED_PRECONDITION, f"Peer {peer} does not have a valid session") + print(request) + try: place = self.places[name] except KeyError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {name} does not exist") if place.acquired: - return False + await context.abort(grpc.StatusCode.FAILED_PRECONDITION, f"Place {name} is already acquired") if place.reservation: res = self.reservations[place.reservation] - if not res.owner == self.sessions[details.caller].name: - return False + if not res.owner == username: + await context.abort(grpc.StatusCode.PERMISSION_DENIED, f"Place {name} was not reserved for {username}") # FIXME use the session object instead? or something else which # survives disconnecting clients? - place.acquired = self.sessions[details.caller].name + place.acquired = username resources = [] - for _, session in sorted(self.sessions.items()): - if not isinstance(session, ExporterSession): - continue + for _, session in sorted(self.exporters.items()): for _, group in sorted(session.groups.items()): for _, resource in sorted(group.items()): if not place.hasmatch(resource.path): @@ -622,23 +692,40 @@ async def acquire_place(self, name, details=None): if not await self._acquire_resources(place, resources): # revert earlier change place.acquired = None - return False + await context.abort(grpc.StatusCode.FAILED_PRECONDITION, f"Failed to acquire resources for place {name}") place.touch() self._publish_place(place) self.save_later() self.schedule_reservations() print(f"{place.name}: place acquired by {place.acquired}") - return True + return labgrid_coordinator_pb2.AcquirePlaceResponse() @locked - async def release_place(self, name, details=None): - print(details) + async def ReleasePlace(self, request, context): + name = request.placename + peer = context.peer() + try: + username = self.clients[peer].name + except KeyError: + await context.abort(grpc.StatusCode.FAILED_PRECONDITION, f"Peer {peer} does not have a valid session") + print(request) + fromuser = request.fromuser if request.HasField("fromuser") else None try: place = self.places[name] except KeyError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {name} does not exist") if not place.acquired: - return False + if fromuser: + return labgrid_coordinator_pb2.ReleasePlaceResponse() + await context.abort(grpc.StatusCode.FAILED_PRECONDITION, f"Place {name} is not acquired") + if fromuser and place.acquired != fromuser: + return labgrid_coordinator_pb2.ReleasePlaceResponse() + if username != place.acquired: + await context.abort( + grpc.StatusCode.FAILED_PRECONDITION, + f"Place {name} is not acquired by {username}, acquired by", + {place.acquired}, + ) await self._release_resources(place, place.acquired_resources) @@ -649,66 +736,43 @@ async def release_place(self, name, details=None): self.save_later() self.schedule_reservations() print(f"{place.name}: place released") - return True + return labgrid_coordinator_pb2.ReleasePlaceResponse() @locked - async def release_place_from(self, name, acquired, details=None): - """ - Release a place, but only if acquired by a specific user - - Note that unlike the release_place API, this function returns True as - long as the specific place is not acquired by the specified user. This - may mean that the place was not acquired at all, is acquired by - another, or was released; which of these states cannot be inferred from - the return code. This is intentional as the purpose of the command is - to validate that the specified user no longer owns the place, and the - exact state is irrelevant as long as that condition is met. - - Returns: - bool: True if the user no longer owns the place, or False if there - was an error that prevented releasing the place - """ + async def AllowPlace(self, request, context): + placename = request.placename + user = request.user + peer = context.peer() try: - place = self.places[name] + username = self.clients[peer].name except KeyError: - return False - if not place.acquired: - return True - if place.acquired != acquired: - return True - - await self._release_resources(place, place.acquired_resources) - - place.acquired = None - place.allowed = set() - place.touch() - self._publish_place(place) - self.save_later() - self.schedule_reservations() - return True - - @locked - async def allow_place(self, name, user, details=None): + await context.abort(grpc.StatusCode.FAILED_PRECONDITION, f"Peer {peer} does not have a valid session") try: - place = self.places[name] + place = self.places[placename] except KeyError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {placename} does not exist") if not place.acquired: - return False - if not place.acquired == self.sessions[details.caller].name: - return False + await context.abort(grpc.StatusCode.FAILED_PRECONDITION, f"Place {placename} is not acquired") + if not place.acquired == username: + await context.abort( + grpc.StatusCode.FAILED_PRECONDITION, f"Place {placename} is not acquired by {username}" + ) place.allowed.add(user) place.touch() self._publish_place(place) self.save_later() - return True + return labgrid_coordinator_pb2.AllowPlaceResponse() def _get_places(self): return {k: v.asdict() for k, v in self.places.items()} @locked - async def get_places(self, details=None): - return self._get_places() + async def GetPlaces(self, unused_request, unused_context): + logging.debug("GetPlaces") + try: + return labgrid_coordinator_pb2.GetPlacesResponse(places=[x.as_pb2() for x in self.places.values()]) + except Exception: + logging.exception("error during get places") def schedule_reservations(self): # The primary information is stored in the reservations and the places @@ -816,54 +880,129 @@ def schedule_reservations(self): self._publish_place(place) @locked - async def create_reservation(self, spec, prio=0.0, details=None): - filter_ = {} - for pair in spec.split(): - try: - k, v = pair.split("=") - except ValueError: - return None - if not TAG_KEY.match(k): - return None - if not TAG_VAL.match(v): - return None - filter_[k] = v - - filters = {"main": filter_} # currently, only one group is implemented - - owner = self.sessions[details.caller].name - res = Reservation(owner=owner, prio=prio, filters=filters) + async def CreateReservation(self, request: labgrid_coordinator_pb2.CreateReservationRequest, context): + peer = context.peer() + + fltrs = {} + for name, fltr_pb in request.filters.items(): + if name != "main": + await context.abort( + grpc.StatusCode.UNIMPLEMENTED, "Reservations for multiple groups are not implemented yet" + ) + fltr = fltrs[name] = {} + for k, v in fltr_pb.filter.items(): + if not TAG_KEY.match(k): + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Key {k} is invalid") + if not TAG_VAL.match(v): + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Value {v} is invalid") + fltr[k] = v + + owner = self.clients[peer].name + res = Reservation(owner=owner, prio=request.prio, filters=fltrs) self.reservations[res.token] = res self.schedule_reservations() - return {res.token: res.asdict()} + return labgrid_coordinator_pb2.CreateReservationResponse(reservation=res.as_pb2()) @locked - async def cancel_reservation(self, token, details=None): - if not isinstance(token, str): - return False + async def CancelReservation(self, request: labgrid_coordinator_pb2.CancelReservationRequest, context): + token = request.token + if not isinstance(token, str) or not token: + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Invalid token {token}") if token not in self.reservations: - return False + await context.abort(grpc.StatusCode.FAILED_PRECONDITION, f"Reservation {token} does not exist") del self.reservations[token] self.schedule_reservations() - return True + return labgrid_coordinator_pb2.CancelReservationResponse() @locked - async def poll_reservation(self, token, details=None): + async def PollReservation(self, request: labgrid_coordinator_pb2.PollReservationRequest, context): + token = request.token try: res = self.reservations[token] except KeyError: - return None + await context.abort(grpc.StatusCode.FAILED_PRECONDITION, f"Reservation {token} does not exist") res.refresh() - return res.asdict() + return labgrid_coordinator_pb2.PollReservationResponse(reservation=res.as_pb2()) @locked - async def get_reservations(self, details=None): - return {k: v.asdict() for k, v in self.reservations.items()} + async def GetReservations(self, request: labgrid_coordinator_pb2.GetReservationsRequest, context): + reservations = [x.as_pb2() for x in self.reservations.values()] + return labgrid_coordinator_pb2.GetReservationsResponse(reservations=reservations) + + +async def serve(listen, cleanup) -> None: + server = grpc.aio.server( + options=[ + ("grpc.keepalive_time_ms", 30000), # Send keepalive ping every 30 seconds + ( + "grpc.keepalive_timeout_ms", + 10000, + ), # Wait 10 seconds for ping ack before considering the connection dead + ("grpc.http2.min_time_between_pings_ms", 15000), # Minimum amount of time between pings + ("grpc.http2.max_pings_without_data", 0), # Allow pings even without active streams + ("grpc.keepalive_permit_without_calls", 1), # Allow keepalive pings even when there are no calls + ], + ) + coordinator = Coordinator() + labgrid_coordinator_pb2_grpc.add_CoordinatorServicer_to_server(coordinator, server) + # enable reflection for use with grpcurl + reflection.enable_server_reflection( + ( + labgrid_coordinator_pb2.DESCRIPTOR.services_by_name["Coordinator"].full_name, + reflection.SERVICE_NAME, + ), + server, + ) + # optionally enable channelz for use with grpcdebug + try: + from grpc_channelz.v1 import channelz + + channelz.add_channelz_servicer(server) + logging.info("Enabled channelz support") + except ImportError: + logging.info("Module grpcio-channelz not available") + + server.add_insecure_port(listen) + logging.debug("Starting server") + await server.start() + + async def server_graceful_shutdown(): + logging.info("Starting graceful shutdown...") + # Shuts down the server with 0 seconds of grace period. During the + # grace period, the server won't accept new connections and allow + # existing RPCs to continue within the grace period. + await server.stop(5) + + cleanup.append(server_graceful_shutdown()) + logging.info("Coordinator ready") + await server.wait_for_termination() + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "-l", + "--listen", + metavar="HOST:PORT", + type=str, + default=os.environ.get("LG_COORDINATOR", "[::]:20408"), + help="coordinator listening host and port", + ) + parser.add_argument("-d", "--debug", action="store_true", default=False, help="enable debug mode") + + args = parser.parse_args() + + logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO) + + loop = asyncio.get_event_loop() + cleanup = [] + loop.set_debug(True) + try: + loop.run_until_complete(serve(args.listen, cleanup)) + finally: + if cleanup: + loop.run_until_complete(*cleanup) if __name__ == "__main__": - runner = ApplicationRunner( - url=environ.get("WS", "ws://127.0.0.1:20408/ws"), - realm="realm1", - ) - runner.run(CoordinatorComponent) + main() diff --git a/labgrid/remote/exporter.py b/labgrid/remote/exporter.py index 03a8cd26c..8da5505d5 100755 --- a/labgrid/remote/exporter.py +++ b/labgrid/remote/exporter.py @@ -7,7 +7,6 @@ import sys import os import os.path -import time import traceback import shutil import subprocess @@ -15,17 +14,16 @@ from pathlib import Path from typing import Dict, Type from socket import gethostname, getfqdn + import attr -from autobahn.asyncio.wamp import ApplicationRunner, ApplicationSession +import grpc from .config import ResourceConfig -from .common import ResourceEntry, enable_tcp_nodelay, monkey_patch_max_msg_payload_size_ws_option +from .common import ResourceEntry, queue_as_aiter +from .generated import labgrid_coordinator_pb2, labgrid_coordinator_pb2_grpc from ..util import get_free_port, labgrid_version -monkey_patch_max_msg_payload_size_ws_option() - -__version__ = labgrid_version() exports: Dict[str, Type[ResourceEntry]] = {} reexec = False @@ -112,10 +110,10 @@ def start(self): start_params = self._get_start_params() try: self._start(start_params) - except Exception: + except Exception as e: self.broken = "start failed" self.logger.exception("failed to start with %s", start_params) - raise + raise BrokenResourceError("Failed to start resource") from e self.start_params = start_params def stop(self): @@ -773,111 +771,149 @@ def _get_params(self): exports["YKUSHPowerPort"] = YKUSHPowerPortExport -class ExporterSession(ApplicationSession): - def onConnect(self): +class Exporter: + def __init__(self, config) -> None: """Set up internal datastructures on successful connection: - Setup loop, name, authid and address - Join the coordinator as an exporter""" - self.loop = self.config.extra["loop"] - self.name = self.config.extra["name"] - self.hostname = self.config.extra["hostname"] - self.isolated = self.config.extra["isolated"] - self.address = self._transport.transport.get_extra_info("sockname")[0] - self.checkpoint = time.monotonic() + self.config = config + self.loop = asyncio.get_event_loop() + self.name = config["name"] + self.hostname = config["hostname"] + self.isolated = config["isolated"] + + self.channel = grpc.aio.insecure_channel(config["coordinator"]) + self.stub = labgrid_coordinator_pb2_grpc.CoordinatorStub(self.channel) + self.out_queue = asyncio.Queue() + self.pump_task = None + self.poll_task = None self.groups = {} - enable_tcp_nodelay(self) - self.join( - self.config.realm, - authmethods=["anonymous", "ticket"], - authid=f"exporter/{self.name}", - authextra={"authid": f"exporter/{self.name}"}, - ) - - def onChallenge(self, challenge): - """Function invoked on received challege, returns just a dummy ticket - at the moment, authentication is not supported yet""" - logging.warning("Ticket authentication is deprecated. Please update your coordinator.") - return "dummy-ticket" - - async def onJoin(self, details): - """On successful join: - - export available resources - - bail out if we are unsuccessful - """ - print(details) - - prefix = f"org.labgrid.exporter.{self.name}" - try: - await self.register(self.acquire, f"{prefix}.acquire") - await self.register(self.release, f"{prefix}.release") - await self.register(self.version, f"{prefix}.version") - - config_template_env = { - "env": os.environ, - "isolated": self.isolated, - "hostname": self.hostname, - "name": self.name, - } - resource_config = ResourceConfig(self.config.extra["resources"], config_template_env) - for group_name, group in resource_config.data.items(): - group_name = str(group_name) - for resource_name, params in group.items(): - resource_name = str(resource_name) - if resource_name == "location": - continue - if params is None: - continue - cls = params.pop("cls", resource_name) - - # this may call back to acquire the resource immediately - await self.add_resource(group_name, resource_name, cls, params) - self.checkpoint = time.monotonic() - - except Exception: # pylint: disable=broad-except - traceback.print_exc(file=sys.stderr) - self.loop.stop() - return + async def run(self) -> None: + self.pump_task = self.loop.create_task(self.message_pump()) + self.send_started() + + config_template_env = { + "env": os.environ, + "isolated": self.isolated, + "hostname": self.hostname, + "name": self.name, + } + resource_config = ResourceConfig(self.config["resources"], config_template_env) + for group_name, group in resource_config.data.items(): + group_name = str(group_name) + for resource_name, params in group.items(): + resource_name = str(resource_name) + if resource_name == "location": + continue + if params is None: + continue + cls = params.pop("cls", resource_name) + + # this may call back to acquire the resource immediately + await self.add_resource(group_name, resource_name, cls, params) + # flush queued message + await self.out_queue.join() + + logging.info("creating poll task") self.poll_task = self.loop.create_task(self.poll()) - async def onLeave(self, details): - """Cleanup after leaving the coordinator connection""" - if self.poll_task: - self.poll_task.cancel() - await asyncio.wait([self.poll_task]) - super().onLeave(details) - - async def onDisconnect(self): - print("connection lost", file=sys.stderr) - global reexec - reexec = True - if self.poll_task: - self.poll_task.cancel() - await asyncio.wait([self.poll_task]) - await asyncio.sleep(0.5) # give others a chance to clean up - self.loop.stop() + (done, pending) = await asyncio.wait((self.pump_task, self.poll_task), return_when=asyncio.FIRST_COMPLETED) + logging.debug("task(s) %s exited, shutting down exporter", done) + for task in pending: + task.cancel() + + await self.pump_task + await self.poll_task + + def send_started(self): + msg = labgrid_coordinator_pb2.ExporterInMessage() + msg.startup.version = labgrid_version() + msg.startup.name = self.name + self.out_queue.put_nowait(msg) + + async def message_pump(self): + got_message = False + try: + async for out_message in self.stub.ExporterStream(queue_as_aiter(self.out_queue)): + got_message = True + logging.debug("received message %s", out_message) + kind = out_message.WhichOneof("kind") + if kind == "hello": + logging.info("connected to exporter version %s", out_message.hello.version) + elif kind == "set_acquired_request": + logging.debug("acquire request") + success = False + reason = None + try: + if out_message.set_acquired_request.place_name: + await self.acquire( + out_message.set_acquired_request.group_name, + out_message.set_acquired_request.resource_name, + out_message.set_acquired_request.place_name, + ) + else: + await self.release( + out_message.set_acquired_request.group_name, + out_message.set_acquired_request.resource_name, + ) + success = True + except BrokenResourceError as e: + reason = e.args[0] + finally: + in_message = labgrid_coordinator_pb2.ExporterInMessage() + in_message.response.success = success + if reason: + in_message.response.reason = reason + logging.debug("queing %s", in_message) + self.out_queue.put_nowait(in_message) + logging.debug("queued %s", in_message) + else: + logging.debug("unknown request: %s", kind) + except grpc.aio.AioRpcError as e: + self.out_queue.put_nowait(None) # let the sender side exit gracefully + if e.code() == grpc.StatusCode.UNAVAILABLE: + if got_message: + logging.error("coordinator became unavailable: %s", e.details()) + global reexec + reexec = True + else: + logging.error("coordinator is unavailable: %s", e.details()) + else: + logging.exception("unexpected grpc error in coordinator message pump task") + except Exception: + self.out_queue.put_nowait(None) # let the sender side exit gracefully + logging.exception("error in coordinator message pump") + + # only send command response when the other updates have left the queue + # perhaps with queue join/task_done + # this should be a command from the coordinator async def acquire(self, group_name, resource_name, place_name): - resource = self.groups[group_name][resource_name] + resource = self.groups.get(group_name, {}).get(resource_name) + if resource is None: + logging.error("acquire request for unknown resource %s/%s by %s", group_name, resource_name, place_name) + return + try: resource.acquire(place_name) finally: await self.update_resource(group_name, resource_name) async def release(self, group_name, resource_name): - resource = self.groups[group_name][resource_name] + resource = self.groups.get(group_name, {}).get(resource_name) + if resource is None: + logging.error("release request for unknown resource %s/%s", group_name, resource_name) + return + try: resource.release() finally: await self.update_resource(group_name, resource_name) - async def version(self): - self.checkpoint = time.monotonic() - return __version__ - async def _poll_step(self): for group_name, group in self.groups.items(): for resource_name, resource in group.items(): @@ -904,10 +940,6 @@ async def poll(self): break except Exception: # pylint: disable=broad-except traceback.print_exc(file=sys.stderr) - age = time.monotonic() - self.checkpoint - if age > 300: - print(f"missed checkpoint, exiting (last was {age} seconds ago)", file=sys.stderr) - self.disconnect() async def add_resource(self, group_name, resource_name, cls, params): """Add a resource to the exporter and update status on the coordinator""" @@ -934,20 +966,28 @@ async def add_resource(self, group_name, resource_name, cls, params): async def update_resource(self, group_name, resource_name): """Update status on the coordinator""" resource = self.groups[group_name][resource_name] - data = resource.asdict() - print(data) - await self.call("org.labgrid.coordinator.set_resource", group_name, resource_name, data) + msg = labgrid_coordinator_pb2.ExporterInMessage() + msg.resource.CopyFrom(resource.as_pb2()) + msg.resource.path.group_name = group_name + msg.resource.path.resource_name = resource_name + self.out_queue.put_nowait(msg) + logging.info("queued update for resource %s/%s", group_name, resource_name) + + +async def amain(config) -> bool: + exporter = Exporter(config) + await exporter.run() def main(): parser = argparse.ArgumentParser() parser.add_argument( - "-x", - "--crossbar", - metavar="URL", + "-c", + "--coordinator", + metavar="HOST:PORT", type=str, - default=os.environ.get("LG_CROSSBAR", "ws://127.0.0.1:20408/ws"), - help="crossbar websocket URL", + default=os.environ.get("LG_COORDINATOR", "127.0.0.1:20408"), + help="coordinator host and port", ) parser.add_argument( "-n", @@ -979,29 +1019,22 @@ def main(): args = parser.parse_args() - level = "debug" if args.debug else "info" + logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO) - extra = { + config = { "name": args.name or gethostname(), "hostname": args.hostname or (getfqdn() if args.fqdn else gethostname()), "resources": args.resources, + "coordinator": args.coordinator, "isolated": args.isolated, } - crossbar_url = args.crossbar - crossbar_realm = os.environ.get("LG_CROSSBAR_REALM", "realm1") + print(f"exporter name: {config['name']}") + print(f"exporter hostname: {config['hostname']}") + print(f"resource config file: {config['resources']}") - print(f"crossbar URL: {crossbar_url}") - print(f"crossbar realm: {crossbar_realm}") - print(f"exporter name: {extra['name']}") - print(f"exporter hostname: {extra['hostname']}") - print(f"resource config file: {extra['resources']}") + asyncio.run(amain(config), debug=bool(args.debug)) - extra["loop"] = loop = asyncio.get_event_loop() - if args.debug: - loop.set_debug(True) - runner = ApplicationRunner(url=crossbar_url, realm=crossbar_realm, extra=extra) - runner.run(ExporterSession, log_level=level) if reexec: exit(100) diff --git a/labgrid/remote/generated/generate-proto.sh b/labgrid/remote/generated/generate-proto.sh new file mode 100755 index 000000000..d160b0c74 --- /dev/null +++ b/labgrid/remote/generated/generate-proto.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +set -ex +python3 -m grpc_tools.protoc -I../proto --python_out=. --pyi_out=. --grpc_python_out=. ../proto/labgrid-coordinator.proto +sed -i "s/import labgrid/from . import labgrid/g" labgrid_coordinator_pb2_grpc.py diff --git a/labgrid/remote/generated/labgrid_coordinator_pb2.py b/labgrid/remote/generated/labgrid_coordinator_pb2.py new file mode 100644 index 000000000..9ef7819d1 --- /dev/null +++ b/labgrid/remote/generated/labgrid_coordinator_pb2.py @@ -0,0 +1,158 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: labgrid-coordinator.proto +# Protobuf Python Version: 5.26.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19labgrid-coordinator.proto\x12\x07labgrid\"\x8a\x01\n\x0f\x43lientInMessage\x12\x1d\n\x04sync\x18\x01 \x01(\x0b\x32\r.labgrid.SyncH\x00\x12\'\n\x07startup\x18\x02 \x01(\x0b\x32\x14.labgrid.StartupDoneH\x00\x12\'\n\tsubscribe\x18\x03 \x01(\x0b\x32\x12.labgrid.SubscribeH\x00\x42\x06\n\x04kind\"\x12\n\x04Sync\x12\n\n\x02id\x18\x01 \x01(\x04\",\n\x0bStartupDone\x12\x0f\n\x07version\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"r\n\tSubscribe\x12\x1b\n\x0eis_unsubscribe\x18\x01 \x01(\x08H\x01\x88\x01\x01\x12\x14\n\nall_places\x18\x02 \x01(\x08H\x00\x12\x17\n\rall_resources\x18\x03 \x01(\x08H\x00\x42\x06\n\x04kindB\x11\n\x0f_is_unsubscribe\"g\n\x10\x43lientOutMessage\x12 \n\x04sync\x18\x01 \x01(\x0b\x32\r.labgrid.SyncH\x00\x88\x01\x01\x12(\n\x07updates\x18\x02 \x03(\x0b\x32\x17.labgrid.UpdateResponseB\x07\n\x05_sync\"\xa5\x01\n\x0eUpdateResponse\x12%\n\x08resource\x18\x01 \x01(\x0b\x32\x11.labgrid.ResourceH\x00\x12.\n\x0c\x64\x65l_resource\x18\x02 \x01(\x0b\x32\x16.labgrid.Resource.PathH\x00\x12\x1f\n\x05place\x18\x03 \x01(\x0b\x32\x0e.labgrid.PlaceH\x00\x12\x13\n\tdel_place\x18\x04 \x01(\tH\x00\x42\x06\n\x04kind\"\x9a\x01\n\x11\x45xporterInMessage\x12%\n\x08resource\x18\x01 \x01(\x0b\x32\x11.labgrid.ResourceH\x00\x12\'\n\x07startup\x18\x02 \x01(\x0b\x32\x14.labgrid.StartupDoneH\x00\x12-\n\x08response\x18\x03 \x01(\x0b\x32\x19.labgrid.ExporterResponseH\x00\x42\x06\n\x04kind\"\x9e\x03\n\x08Resource\x12$\n\x04path\x18\x01 \x01(\x0b\x32\x16.labgrid.Resource.Path\x12\x0b\n\x03\x63ls\x18\x02 \x01(\t\x12-\n\x06params\x18\x03 \x03(\x0b\x32\x1d.labgrid.Resource.ParamsEntry\x12+\n\x05\x65xtra\x18\x04 \x03(\x0b\x32\x1c.labgrid.Resource.ExtraEntry\x12\x10\n\x08\x61\x63quired\x18\x05 \x01(\t\x12\r\n\x05\x61vail\x18\x06 \x01(\x08\x1a_\n\x04Path\x12\x1a\n\rexporter_name\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\ngroup_name\x18\x02 \x01(\t\x12\x15\n\rresource_name\x18\x03 \x01(\tB\x10\n\x0e_exporter_name\x1a@\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.labgrid.MapValue:\x02\x38\x01\x1a?\n\nExtraEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.labgrid.MapValue:\x02\x38\x01\"\x82\x01\n\x08MapValue\x12\x14\n\nbool_value\x18\x01 \x01(\x08H\x00\x12\x13\n\tint_value\x18\x02 \x01(\x03H\x00\x12\x14\n\nuint_value\x18\x03 \x01(\x04H\x00\x12\x15\n\x0b\x66loat_value\x18\x04 \x01(\x01H\x00\x12\x16\n\x0cstring_value\x18\x05 \x01(\tH\x00\x42\x06\n\x04kind\"C\n\x10\x45xporterResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x13\n\x06reason\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\t\n\x07_reason\"\x18\n\x05Hello\x12\x0f\n\x07version\x18\x01 \x01(\t\"\x82\x01\n\x12\x45xporterOutMessage\x12\x1f\n\x05hello\x18\x01 \x01(\x0b\x32\x0e.labgrid.HelloH\x00\x12\x43\n\x14set_acquired_request\x18\x02 \x01(\x0b\x32#.labgrid.ExporterSetAcquiredRequestH\x00\x42\x06\n\x04kind\"o\n\x1a\x45xporterSetAcquiredRequest\x12\x12\n\ngroup_name\x18\x01 \x01(\t\x12\x15\n\rresource_name\x18\x02 \x01(\t\x12\x17\n\nplace_name\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\r\n\x0b_place_name\"\x1f\n\x0f\x41\x64\x64PlaceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x12\n\x10\x41\x64\x64PlaceResponse\"\"\n\x12\x44\x65letePlaceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x15\n\x13\x44\x65letePlaceResponse\"\x12\n\x10GetPlacesRequest\"3\n\x11GetPlacesResponse\x12\x1e\n\x06places\x18\x01 \x03(\x0b\x32\x0e.labgrid.Place\"\xd2\x02\n\x05Place\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07\x61liases\x18\x02 \x03(\t\x12\x0f\n\x07\x63omment\x18\x03 \x01(\t\x12&\n\x04tags\x18\x04 \x03(\x0b\x32\x18.labgrid.Place.TagsEntry\x12\'\n\x07matches\x18\x05 \x03(\x0b\x32\x16.labgrid.ResourceMatch\x12\x15\n\x08\x61\x63quired\x18\x06 \x01(\tH\x00\x88\x01\x01\x12\x1a\n\x12\x61\x63quired_resources\x18\x07 \x03(\t\x12\x0f\n\x07\x61llowed\x18\x08 \x03(\t\x12\x0f\n\x07\x63reated\x18\t \x01(\x01\x12\x0f\n\x07\x63hanged\x18\n \x01(\x01\x12\x18\n\x0breservation\x18\x0b \x01(\tH\x01\x88\x01\x01\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x0b\n\t_acquiredB\x0e\n\x0c_reservation\"y\n\rResourceMatch\x12\x10\n\x08\x65xporter\x18\x01 \x01(\t\x12\r\n\x05group\x18\x02 \x01(\t\x12\x0b\n\x03\x63ls\x18\x03 \x01(\t\x12\x11\n\x04name\x18\x04 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06rename\x18\x05 \x01(\tH\x01\x88\x01\x01\x42\x07\n\x05_nameB\t\n\x07_rename\"8\n\x14\x41\x64\x64PlaceAliasRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\x12\r\n\x05\x61lias\x18\x02 \x01(\t\"\x17\n\x15\x41\x64\x64PlaceAliasResponse\";\n\x17\x44\x65letePlaceAliasRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\x12\r\n\x05\x61lias\x18\x02 \x01(\t\"\x1a\n\x18\x44\x65letePlaceAliasResponse\"\x8b\x01\n\x13SetPlaceTagsRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\x12\x34\n\x04tags\x18\x02 \x03(\x0b\x32&.labgrid.SetPlaceTagsRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x16\n\x14SetPlaceTagsResponse\"<\n\x16SetPlaceCommentRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\x12\x0f\n\x07\x63omment\x18\x02 \x01(\t\"\x19\n\x17SetPlaceCommentResponse\"Z\n\x14\x41\x64\x64PlaceMatchRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\x12\x0f\n\x07pattern\x18\x02 \x01(\t\x12\x13\n\x06rename\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\t\n\x07_rename\"\x17\n\x15\x41\x64\x64PlaceMatchResponse\"]\n\x17\x44\x65letePlaceMatchRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\x12\x0f\n\x07pattern\x18\x02 \x01(\t\x12\x13\n\x06rename\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\t\n\x07_rename\"\x1a\n\x18\x44\x65letePlaceMatchResponse\"(\n\x13\x41\x63quirePlaceRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\"\x16\n\x14\x41\x63quirePlaceResponse\"L\n\x13ReleasePlaceRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\x12\x15\n\x08\x66romuser\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x0b\n\t_fromuser\"\x16\n\x14ReleasePlaceResponse\"4\n\x11\x41llowPlaceRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\x12\x0c\n\x04user\x18\x02 \x01(\t\"\x14\n\x12\x41llowPlaceResponse\"\xb6\x01\n\x18\x43reateReservationRequest\x12?\n\x07\x66ilters\x18\x01 \x03(\x0b\x32..labgrid.CreateReservationRequest.FiltersEntry\x12\x0c\n\x04prio\x18\x02 \x01(\x01\x1aK\n\x0c\x46iltersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12*\n\x05value\x18\x02 \x01(\x0b\x32\x1b.labgrid.Reservation.Filter:\x02\x38\x01\"F\n\x19\x43reateReservationResponse\x12)\n\x0breservation\x18\x01 \x01(\x0b\x32\x14.labgrid.Reservation\"\xcd\x03\n\x0bReservation\x12\r\n\x05owner\x18\x01 \x01(\t\x12\r\n\x05token\x18\x02 \x01(\t\x12\r\n\x05state\x18\x03 \x01(\x05\x12\x0c\n\x04prio\x18\x04 \x01(\x01\x12\x32\n\x07\x66ilters\x18\x05 \x03(\x0b\x32!.labgrid.Reservation.FiltersEntry\x12:\n\x0b\x61llocations\x18\x06 \x03(\x0b\x32%.labgrid.Reservation.AllocationsEntry\x12\x0f\n\x07\x63reated\x18\x07 \x01(\x01\x12\x0f\n\x07timeout\x18\x08 \x01(\x01\x1ap\n\x06\x46ilter\x12\x37\n\x06\x66ilter\x18\x01 \x03(\x0b\x32\'.labgrid.Reservation.Filter.FilterEntry\x1a-\n\x0b\x46ilterEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1aK\n\x0c\x46iltersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12*\n\x05value\x18\x02 \x01(\x0b\x32\x1b.labgrid.Reservation.Filter:\x02\x38\x01\x1a\x32\n\x10\x41llocationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\")\n\x18\x43\x61ncelReservationRequest\x12\r\n\x05token\x18\x01 \x01(\t\"\x1b\n\x19\x43\x61ncelReservationResponse\"\'\n\x16PollReservationRequest\x12\r\n\x05token\x18\x01 \x01(\t\"D\n\x17PollReservationResponse\x12)\n\x0breservation\x18\x01 \x01(\x0b\x32\x14.labgrid.Reservation\"E\n\x17GetReservationsResponse\x12*\n\x0creservations\x18\x01 \x03(\x0b\x32\x14.labgrid.Reservation\"\x18\n\x16GetReservationsRequest2\xd2\x0b\n\x0b\x43oordinator\x12I\n\x0c\x43lientStream\x12\x18.labgrid.ClientInMessage\x1a\x19.labgrid.ClientOutMessage\"\x00(\x01\x30\x01\x12O\n\x0e\x45xporterStream\x12\x1a.labgrid.ExporterInMessage\x1a\x1b.labgrid.ExporterOutMessage\"\x00(\x01\x30\x01\x12\x41\n\x08\x41\x64\x64Place\x12\x18.labgrid.AddPlaceRequest\x1a\x19.labgrid.AddPlaceResponse\"\x00\x12J\n\x0b\x44\x65letePlace\x12\x1b.labgrid.DeletePlaceRequest\x1a\x1c.labgrid.DeletePlaceResponse\"\x00\x12\x44\n\tGetPlaces\x12\x19.labgrid.GetPlacesRequest\x1a\x1a.labgrid.GetPlacesResponse\"\x00\x12P\n\rAddPlaceAlias\x12\x1d.labgrid.AddPlaceAliasRequest\x1a\x1e.labgrid.AddPlaceAliasResponse\"\x00\x12Y\n\x10\x44\x65letePlaceAlias\x12 .labgrid.DeletePlaceAliasRequest\x1a!.labgrid.DeletePlaceAliasResponse\"\x00\x12M\n\x0cSetPlaceTags\x12\x1c.labgrid.SetPlaceTagsRequest\x1a\x1d.labgrid.SetPlaceTagsResponse\"\x00\x12V\n\x0fSetPlaceComment\x12\x1f.labgrid.SetPlaceCommentRequest\x1a .labgrid.SetPlaceCommentResponse\"\x00\x12P\n\rAddPlaceMatch\x12\x1d.labgrid.AddPlaceMatchRequest\x1a\x1e.labgrid.AddPlaceMatchResponse\"\x00\x12Y\n\x10\x44\x65letePlaceMatch\x12 .labgrid.DeletePlaceMatchRequest\x1a!.labgrid.DeletePlaceMatchResponse\"\x00\x12M\n\x0c\x41\x63quirePlace\x12\x1c.labgrid.AcquirePlaceRequest\x1a\x1d.labgrid.AcquirePlaceResponse\"\x00\x12M\n\x0cReleasePlace\x12\x1c.labgrid.ReleasePlaceRequest\x1a\x1d.labgrid.ReleasePlaceResponse\"\x00\x12G\n\nAllowPlace\x12\x1a.labgrid.AllowPlaceRequest\x1a\x1b.labgrid.AllowPlaceResponse\"\x00\x12\\\n\x11\x43reateReservation\x12!.labgrid.CreateReservationRequest\x1a\".labgrid.CreateReservationResponse\"\x00\x12\\\n\x11\x43\x61ncelReservation\x12!.labgrid.CancelReservationRequest\x1a\".labgrid.CancelReservationResponse\"\x00\x12V\n\x0fPollReservation\x12\x1f.labgrid.PollReservationRequest\x1a .labgrid.PollReservationResponse\"\x00\x12V\n\x0fGetReservations\x12\x1f.labgrid.GetReservationsRequest\x1a .labgrid.GetReservationsResponse\"\x00\x62\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'labgrid_coordinator_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + DESCRIPTOR._loaded_options = None + _globals['_RESOURCE_PARAMSENTRY']._loaded_options = None + _globals['_RESOURCE_PARAMSENTRY']._serialized_options = b'8\001' + _globals['_RESOURCE_EXTRAENTRY']._loaded_options = None + _globals['_RESOURCE_EXTRAENTRY']._serialized_options = b'8\001' + _globals['_PLACE_TAGSENTRY']._loaded_options = None + _globals['_PLACE_TAGSENTRY']._serialized_options = b'8\001' + _globals['_SETPLACETAGSREQUEST_TAGSENTRY']._loaded_options = None + _globals['_SETPLACETAGSREQUEST_TAGSENTRY']._serialized_options = b'8\001' + _globals['_CREATERESERVATIONREQUEST_FILTERSENTRY']._loaded_options = None + _globals['_CREATERESERVATIONREQUEST_FILTERSENTRY']._serialized_options = b'8\001' + _globals['_RESERVATION_FILTER_FILTERENTRY']._loaded_options = None + _globals['_RESERVATION_FILTER_FILTERENTRY']._serialized_options = b'8\001' + _globals['_RESERVATION_FILTERSENTRY']._loaded_options = None + _globals['_RESERVATION_FILTERSENTRY']._serialized_options = b'8\001' + _globals['_RESERVATION_ALLOCATIONSENTRY']._loaded_options = None + _globals['_RESERVATION_ALLOCATIONSENTRY']._serialized_options = b'8\001' + _globals['_CLIENTINMESSAGE']._serialized_start=39 + _globals['_CLIENTINMESSAGE']._serialized_end=177 + _globals['_SYNC']._serialized_start=179 + _globals['_SYNC']._serialized_end=197 + _globals['_STARTUPDONE']._serialized_start=199 + _globals['_STARTUPDONE']._serialized_end=243 + _globals['_SUBSCRIBE']._serialized_start=245 + _globals['_SUBSCRIBE']._serialized_end=359 + _globals['_CLIENTOUTMESSAGE']._serialized_start=361 + _globals['_CLIENTOUTMESSAGE']._serialized_end=464 + _globals['_UPDATERESPONSE']._serialized_start=467 + _globals['_UPDATERESPONSE']._serialized_end=632 + _globals['_EXPORTERINMESSAGE']._serialized_start=635 + _globals['_EXPORTERINMESSAGE']._serialized_end=789 + _globals['_RESOURCE']._serialized_start=792 + _globals['_RESOURCE']._serialized_end=1206 + _globals['_RESOURCE_PATH']._serialized_start=980 + _globals['_RESOURCE_PATH']._serialized_end=1075 + _globals['_RESOURCE_PARAMSENTRY']._serialized_start=1077 + _globals['_RESOURCE_PARAMSENTRY']._serialized_end=1141 + _globals['_RESOURCE_EXTRAENTRY']._serialized_start=1143 + _globals['_RESOURCE_EXTRAENTRY']._serialized_end=1206 + _globals['_MAPVALUE']._serialized_start=1209 + _globals['_MAPVALUE']._serialized_end=1339 + _globals['_EXPORTERRESPONSE']._serialized_start=1341 + _globals['_EXPORTERRESPONSE']._serialized_end=1408 + _globals['_HELLO']._serialized_start=1410 + _globals['_HELLO']._serialized_end=1434 + _globals['_EXPORTEROUTMESSAGE']._serialized_start=1437 + _globals['_EXPORTEROUTMESSAGE']._serialized_end=1567 + _globals['_EXPORTERSETACQUIREDREQUEST']._serialized_start=1569 + _globals['_EXPORTERSETACQUIREDREQUEST']._serialized_end=1680 + _globals['_ADDPLACEREQUEST']._serialized_start=1682 + _globals['_ADDPLACEREQUEST']._serialized_end=1713 + _globals['_ADDPLACERESPONSE']._serialized_start=1715 + _globals['_ADDPLACERESPONSE']._serialized_end=1733 + _globals['_DELETEPLACEREQUEST']._serialized_start=1735 + _globals['_DELETEPLACEREQUEST']._serialized_end=1769 + _globals['_DELETEPLACERESPONSE']._serialized_start=1771 + _globals['_DELETEPLACERESPONSE']._serialized_end=1792 + _globals['_GETPLACESREQUEST']._serialized_start=1794 + _globals['_GETPLACESREQUEST']._serialized_end=1812 + _globals['_GETPLACESRESPONSE']._serialized_start=1814 + _globals['_GETPLACESRESPONSE']._serialized_end=1865 + _globals['_PLACE']._serialized_start=1868 + _globals['_PLACE']._serialized_end=2206 + _globals['_PLACE_TAGSENTRY']._serialized_start=2134 + _globals['_PLACE_TAGSENTRY']._serialized_end=2177 + _globals['_RESOURCEMATCH']._serialized_start=2208 + _globals['_RESOURCEMATCH']._serialized_end=2329 + _globals['_ADDPLACEALIASREQUEST']._serialized_start=2331 + _globals['_ADDPLACEALIASREQUEST']._serialized_end=2387 + _globals['_ADDPLACEALIASRESPONSE']._serialized_start=2389 + _globals['_ADDPLACEALIASRESPONSE']._serialized_end=2412 + _globals['_DELETEPLACEALIASREQUEST']._serialized_start=2414 + _globals['_DELETEPLACEALIASREQUEST']._serialized_end=2473 + _globals['_DELETEPLACEALIASRESPONSE']._serialized_start=2475 + _globals['_DELETEPLACEALIASRESPONSE']._serialized_end=2501 + _globals['_SETPLACETAGSREQUEST']._serialized_start=2504 + _globals['_SETPLACETAGSREQUEST']._serialized_end=2643 + _globals['_SETPLACETAGSREQUEST_TAGSENTRY']._serialized_start=2134 + _globals['_SETPLACETAGSREQUEST_TAGSENTRY']._serialized_end=2177 + _globals['_SETPLACETAGSRESPONSE']._serialized_start=2645 + _globals['_SETPLACETAGSRESPONSE']._serialized_end=2667 + _globals['_SETPLACECOMMENTREQUEST']._serialized_start=2669 + _globals['_SETPLACECOMMENTREQUEST']._serialized_end=2729 + _globals['_SETPLACECOMMENTRESPONSE']._serialized_start=2731 + _globals['_SETPLACECOMMENTRESPONSE']._serialized_end=2756 + _globals['_ADDPLACEMATCHREQUEST']._serialized_start=2758 + _globals['_ADDPLACEMATCHREQUEST']._serialized_end=2848 + _globals['_ADDPLACEMATCHRESPONSE']._serialized_start=2850 + _globals['_ADDPLACEMATCHRESPONSE']._serialized_end=2873 + _globals['_DELETEPLACEMATCHREQUEST']._serialized_start=2875 + _globals['_DELETEPLACEMATCHREQUEST']._serialized_end=2968 + _globals['_DELETEPLACEMATCHRESPONSE']._serialized_start=2970 + _globals['_DELETEPLACEMATCHRESPONSE']._serialized_end=2996 + _globals['_ACQUIREPLACEREQUEST']._serialized_start=2998 + _globals['_ACQUIREPLACEREQUEST']._serialized_end=3038 + _globals['_ACQUIREPLACERESPONSE']._serialized_start=3040 + _globals['_ACQUIREPLACERESPONSE']._serialized_end=3062 + _globals['_RELEASEPLACEREQUEST']._serialized_start=3064 + _globals['_RELEASEPLACEREQUEST']._serialized_end=3140 + _globals['_RELEASEPLACERESPONSE']._serialized_start=3142 + _globals['_RELEASEPLACERESPONSE']._serialized_end=3164 + _globals['_ALLOWPLACEREQUEST']._serialized_start=3166 + _globals['_ALLOWPLACEREQUEST']._serialized_end=3218 + _globals['_ALLOWPLACERESPONSE']._serialized_start=3220 + _globals['_ALLOWPLACERESPONSE']._serialized_end=3240 + _globals['_CREATERESERVATIONREQUEST']._serialized_start=3243 + _globals['_CREATERESERVATIONREQUEST']._serialized_end=3425 + _globals['_CREATERESERVATIONREQUEST_FILTERSENTRY']._serialized_start=3350 + _globals['_CREATERESERVATIONREQUEST_FILTERSENTRY']._serialized_end=3425 + _globals['_CREATERESERVATIONRESPONSE']._serialized_start=3427 + _globals['_CREATERESERVATIONRESPONSE']._serialized_end=3497 + _globals['_RESERVATION']._serialized_start=3500 + _globals['_RESERVATION']._serialized_end=3961 + _globals['_RESERVATION_FILTER']._serialized_start=3720 + _globals['_RESERVATION_FILTER']._serialized_end=3832 + _globals['_RESERVATION_FILTER_FILTERENTRY']._serialized_start=3787 + _globals['_RESERVATION_FILTER_FILTERENTRY']._serialized_end=3832 + _globals['_RESERVATION_FILTERSENTRY']._serialized_start=3350 + _globals['_RESERVATION_FILTERSENTRY']._serialized_end=3425 + _globals['_RESERVATION_ALLOCATIONSENTRY']._serialized_start=3911 + _globals['_RESERVATION_ALLOCATIONSENTRY']._serialized_end=3961 + _globals['_CANCELRESERVATIONREQUEST']._serialized_start=3963 + _globals['_CANCELRESERVATIONREQUEST']._serialized_end=4004 + _globals['_CANCELRESERVATIONRESPONSE']._serialized_start=4006 + _globals['_CANCELRESERVATIONRESPONSE']._serialized_end=4033 + _globals['_POLLRESERVATIONREQUEST']._serialized_start=4035 + _globals['_POLLRESERVATIONREQUEST']._serialized_end=4074 + _globals['_POLLRESERVATIONRESPONSE']._serialized_start=4076 + _globals['_POLLRESERVATIONRESPONSE']._serialized_end=4144 + _globals['_GETRESERVATIONSRESPONSE']._serialized_start=4146 + _globals['_GETRESERVATIONSRESPONSE']._serialized_end=4215 + _globals['_GETRESERVATIONSREQUEST']._serialized_start=4217 + _globals['_GETRESERVATIONSREQUEST']._serialized_end=4241 + _globals['_COORDINATOR']._serialized_start=4244 + _globals['_COORDINATOR']._serialized_end=5734 +# @@protoc_insertion_point(module_scope) diff --git a/labgrid/remote/generated/labgrid_coordinator_pb2.pyi b/labgrid/remote/generated/labgrid_coordinator_pb2.pyi new file mode 100644 index 000000000..366f4e438 --- /dev/null +++ b/labgrid/remote/generated/labgrid_coordinator_pb2.pyi @@ -0,0 +1,448 @@ +from google.protobuf.internal import containers as _containers +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class ClientInMessage(_message.Message): + __slots__ = ("sync", "startup", "subscribe") + SYNC_FIELD_NUMBER: _ClassVar[int] + STARTUP_FIELD_NUMBER: _ClassVar[int] + SUBSCRIBE_FIELD_NUMBER: _ClassVar[int] + sync: Sync + startup: StartupDone + subscribe: Subscribe + def __init__(self, sync: _Optional[_Union[Sync, _Mapping]] = ..., startup: _Optional[_Union[StartupDone, _Mapping]] = ..., subscribe: _Optional[_Union[Subscribe, _Mapping]] = ...) -> None: ... + +class Sync(_message.Message): + __slots__ = ("id",) + ID_FIELD_NUMBER: _ClassVar[int] + id: int + def __init__(self, id: _Optional[int] = ...) -> None: ... + +class StartupDone(_message.Message): + __slots__ = ("version", "name") + VERSION_FIELD_NUMBER: _ClassVar[int] + NAME_FIELD_NUMBER: _ClassVar[int] + version: str + name: str + def __init__(self, version: _Optional[str] = ..., name: _Optional[str] = ...) -> None: ... + +class Subscribe(_message.Message): + __slots__ = ("is_unsubscribe", "all_places", "all_resources") + IS_UNSUBSCRIBE_FIELD_NUMBER: _ClassVar[int] + ALL_PLACES_FIELD_NUMBER: _ClassVar[int] + ALL_RESOURCES_FIELD_NUMBER: _ClassVar[int] + is_unsubscribe: bool + all_places: bool + all_resources: bool + def __init__(self, is_unsubscribe: bool = ..., all_places: bool = ..., all_resources: bool = ...) -> None: ... + +class ClientOutMessage(_message.Message): + __slots__ = ("sync", "updates") + SYNC_FIELD_NUMBER: _ClassVar[int] + UPDATES_FIELD_NUMBER: _ClassVar[int] + sync: Sync + updates: _containers.RepeatedCompositeFieldContainer[UpdateResponse] + def __init__(self, sync: _Optional[_Union[Sync, _Mapping]] = ..., updates: _Optional[_Iterable[_Union[UpdateResponse, _Mapping]]] = ...) -> None: ... + +class UpdateResponse(_message.Message): + __slots__ = ("resource", "del_resource", "place", "del_place") + RESOURCE_FIELD_NUMBER: _ClassVar[int] + DEL_RESOURCE_FIELD_NUMBER: _ClassVar[int] + PLACE_FIELD_NUMBER: _ClassVar[int] + DEL_PLACE_FIELD_NUMBER: _ClassVar[int] + resource: Resource + del_resource: Resource.Path + place: Place + del_place: str + def __init__(self, resource: _Optional[_Union[Resource, _Mapping]] = ..., del_resource: _Optional[_Union[Resource.Path, _Mapping]] = ..., place: _Optional[_Union[Place, _Mapping]] = ..., del_place: _Optional[str] = ...) -> None: ... + +class ExporterInMessage(_message.Message): + __slots__ = ("resource", "startup", "response") + RESOURCE_FIELD_NUMBER: _ClassVar[int] + STARTUP_FIELD_NUMBER: _ClassVar[int] + RESPONSE_FIELD_NUMBER: _ClassVar[int] + resource: Resource + startup: StartupDone + response: ExporterResponse + def __init__(self, resource: _Optional[_Union[Resource, _Mapping]] = ..., startup: _Optional[_Union[StartupDone, _Mapping]] = ..., response: _Optional[_Union[ExporterResponse, _Mapping]] = ...) -> None: ... + +class Resource(_message.Message): + __slots__ = ("path", "cls", "params", "extra", "acquired", "avail") + class Path(_message.Message): + __slots__ = ("exporter_name", "group_name", "resource_name") + EXPORTER_NAME_FIELD_NUMBER: _ClassVar[int] + GROUP_NAME_FIELD_NUMBER: _ClassVar[int] + RESOURCE_NAME_FIELD_NUMBER: _ClassVar[int] + exporter_name: str + group_name: str + resource_name: str + def __init__(self, exporter_name: _Optional[str] = ..., group_name: _Optional[str] = ..., resource_name: _Optional[str] = ...) -> None: ... + class ParamsEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: MapValue + def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[MapValue, _Mapping]] = ...) -> None: ... + class ExtraEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: MapValue + def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[MapValue, _Mapping]] = ...) -> None: ... + PATH_FIELD_NUMBER: _ClassVar[int] + CLS_FIELD_NUMBER: _ClassVar[int] + PARAMS_FIELD_NUMBER: _ClassVar[int] + EXTRA_FIELD_NUMBER: _ClassVar[int] + ACQUIRED_FIELD_NUMBER: _ClassVar[int] + AVAIL_FIELD_NUMBER: _ClassVar[int] + path: Resource.Path + cls: str + params: _containers.MessageMap[str, MapValue] + extra: _containers.MessageMap[str, MapValue] + acquired: str + avail: bool + def __init__(self, path: _Optional[_Union[Resource.Path, _Mapping]] = ..., cls: _Optional[str] = ..., params: _Optional[_Mapping[str, MapValue]] = ..., extra: _Optional[_Mapping[str, MapValue]] = ..., acquired: _Optional[str] = ..., avail: bool = ...) -> None: ... + +class MapValue(_message.Message): + __slots__ = ("bool_value", "int_value", "uint_value", "float_value", "string_value") + BOOL_VALUE_FIELD_NUMBER: _ClassVar[int] + INT_VALUE_FIELD_NUMBER: _ClassVar[int] + UINT_VALUE_FIELD_NUMBER: _ClassVar[int] + FLOAT_VALUE_FIELD_NUMBER: _ClassVar[int] + STRING_VALUE_FIELD_NUMBER: _ClassVar[int] + bool_value: bool + int_value: int + uint_value: int + float_value: float + string_value: str + def __init__(self, bool_value: bool = ..., int_value: _Optional[int] = ..., uint_value: _Optional[int] = ..., float_value: _Optional[float] = ..., string_value: _Optional[str] = ...) -> None: ... + +class ExporterResponse(_message.Message): + __slots__ = ("success", "reason") + SUCCESS_FIELD_NUMBER: _ClassVar[int] + REASON_FIELD_NUMBER: _ClassVar[int] + success: bool + reason: str + def __init__(self, success: bool = ..., reason: _Optional[str] = ...) -> None: ... + +class Hello(_message.Message): + __slots__ = ("version",) + VERSION_FIELD_NUMBER: _ClassVar[int] + version: str + def __init__(self, version: _Optional[str] = ...) -> None: ... + +class ExporterOutMessage(_message.Message): + __slots__ = ("hello", "set_acquired_request") + HELLO_FIELD_NUMBER: _ClassVar[int] + SET_ACQUIRED_REQUEST_FIELD_NUMBER: _ClassVar[int] + hello: Hello + set_acquired_request: ExporterSetAcquiredRequest + def __init__(self, hello: _Optional[_Union[Hello, _Mapping]] = ..., set_acquired_request: _Optional[_Union[ExporterSetAcquiredRequest, _Mapping]] = ...) -> None: ... + +class ExporterSetAcquiredRequest(_message.Message): + __slots__ = ("group_name", "resource_name", "place_name") + GROUP_NAME_FIELD_NUMBER: _ClassVar[int] + RESOURCE_NAME_FIELD_NUMBER: _ClassVar[int] + PLACE_NAME_FIELD_NUMBER: _ClassVar[int] + group_name: str + resource_name: str + place_name: str + def __init__(self, group_name: _Optional[str] = ..., resource_name: _Optional[str] = ..., place_name: _Optional[str] = ...) -> None: ... + +class AddPlaceRequest(_message.Message): + __slots__ = ("name",) + NAME_FIELD_NUMBER: _ClassVar[int] + name: str + def __init__(self, name: _Optional[str] = ...) -> None: ... + +class AddPlaceResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class DeletePlaceRequest(_message.Message): + __slots__ = ("name",) + NAME_FIELD_NUMBER: _ClassVar[int] + name: str + def __init__(self, name: _Optional[str] = ...) -> None: ... + +class DeletePlaceResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class GetPlacesRequest(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class GetPlacesResponse(_message.Message): + __slots__ = ("places",) + PLACES_FIELD_NUMBER: _ClassVar[int] + places: _containers.RepeatedCompositeFieldContainer[Place] + def __init__(self, places: _Optional[_Iterable[_Union[Place, _Mapping]]] = ...) -> None: ... + +class Place(_message.Message): + __slots__ = ("name", "aliases", "comment", "tags", "matches", "acquired", "acquired_resources", "allowed", "created", "changed", "reservation") + class TagsEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... + NAME_FIELD_NUMBER: _ClassVar[int] + ALIASES_FIELD_NUMBER: _ClassVar[int] + COMMENT_FIELD_NUMBER: _ClassVar[int] + TAGS_FIELD_NUMBER: _ClassVar[int] + MATCHES_FIELD_NUMBER: _ClassVar[int] + ACQUIRED_FIELD_NUMBER: _ClassVar[int] + ACQUIRED_RESOURCES_FIELD_NUMBER: _ClassVar[int] + ALLOWED_FIELD_NUMBER: _ClassVar[int] + CREATED_FIELD_NUMBER: _ClassVar[int] + CHANGED_FIELD_NUMBER: _ClassVar[int] + RESERVATION_FIELD_NUMBER: _ClassVar[int] + name: str + aliases: _containers.RepeatedScalarFieldContainer[str] + comment: str + tags: _containers.ScalarMap[str, str] + matches: _containers.RepeatedCompositeFieldContainer[ResourceMatch] + acquired: str + acquired_resources: _containers.RepeatedScalarFieldContainer[str] + allowed: _containers.RepeatedScalarFieldContainer[str] + created: float + changed: float + reservation: str + def __init__(self, name: _Optional[str] = ..., aliases: _Optional[_Iterable[str]] = ..., comment: _Optional[str] = ..., tags: _Optional[_Mapping[str, str]] = ..., matches: _Optional[_Iterable[_Union[ResourceMatch, _Mapping]]] = ..., acquired: _Optional[str] = ..., acquired_resources: _Optional[_Iterable[str]] = ..., allowed: _Optional[_Iterable[str]] = ..., created: _Optional[float] = ..., changed: _Optional[float] = ..., reservation: _Optional[str] = ...) -> None: ... + +class ResourceMatch(_message.Message): + __slots__ = ("exporter", "group", "cls", "name", "rename") + EXPORTER_FIELD_NUMBER: _ClassVar[int] + GROUP_FIELD_NUMBER: _ClassVar[int] + CLS_FIELD_NUMBER: _ClassVar[int] + NAME_FIELD_NUMBER: _ClassVar[int] + RENAME_FIELD_NUMBER: _ClassVar[int] + exporter: str + group: str + cls: str + name: str + rename: str + def __init__(self, exporter: _Optional[str] = ..., group: _Optional[str] = ..., cls: _Optional[str] = ..., name: _Optional[str] = ..., rename: _Optional[str] = ...) -> None: ... + +class AddPlaceAliasRequest(_message.Message): + __slots__ = ("placename", "alias") + PLACENAME_FIELD_NUMBER: _ClassVar[int] + ALIAS_FIELD_NUMBER: _ClassVar[int] + placename: str + alias: str + def __init__(self, placename: _Optional[str] = ..., alias: _Optional[str] = ...) -> None: ... + +class AddPlaceAliasResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class DeletePlaceAliasRequest(_message.Message): + __slots__ = ("placename", "alias") + PLACENAME_FIELD_NUMBER: _ClassVar[int] + ALIAS_FIELD_NUMBER: _ClassVar[int] + placename: str + alias: str + def __init__(self, placename: _Optional[str] = ..., alias: _Optional[str] = ...) -> None: ... + +class DeletePlaceAliasResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class SetPlaceTagsRequest(_message.Message): + __slots__ = ("placename", "tags") + class TagsEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... + PLACENAME_FIELD_NUMBER: _ClassVar[int] + TAGS_FIELD_NUMBER: _ClassVar[int] + placename: str + tags: _containers.ScalarMap[str, str] + def __init__(self, placename: _Optional[str] = ..., tags: _Optional[_Mapping[str, str]] = ...) -> None: ... + +class SetPlaceTagsResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class SetPlaceCommentRequest(_message.Message): + __slots__ = ("placename", "comment") + PLACENAME_FIELD_NUMBER: _ClassVar[int] + COMMENT_FIELD_NUMBER: _ClassVar[int] + placename: str + comment: str + def __init__(self, placename: _Optional[str] = ..., comment: _Optional[str] = ...) -> None: ... + +class SetPlaceCommentResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class AddPlaceMatchRequest(_message.Message): + __slots__ = ("placename", "pattern", "rename") + PLACENAME_FIELD_NUMBER: _ClassVar[int] + PATTERN_FIELD_NUMBER: _ClassVar[int] + RENAME_FIELD_NUMBER: _ClassVar[int] + placename: str + pattern: str + rename: str + def __init__(self, placename: _Optional[str] = ..., pattern: _Optional[str] = ..., rename: _Optional[str] = ...) -> None: ... + +class AddPlaceMatchResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class DeletePlaceMatchRequest(_message.Message): + __slots__ = ("placename", "pattern", "rename") + PLACENAME_FIELD_NUMBER: _ClassVar[int] + PATTERN_FIELD_NUMBER: _ClassVar[int] + RENAME_FIELD_NUMBER: _ClassVar[int] + placename: str + pattern: str + rename: str + def __init__(self, placename: _Optional[str] = ..., pattern: _Optional[str] = ..., rename: _Optional[str] = ...) -> None: ... + +class DeletePlaceMatchResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class AcquirePlaceRequest(_message.Message): + __slots__ = ("placename",) + PLACENAME_FIELD_NUMBER: _ClassVar[int] + placename: str + def __init__(self, placename: _Optional[str] = ...) -> None: ... + +class AcquirePlaceResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class ReleasePlaceRequest(_message.Message): + __slots__ = ("placename", "fromuser") + PLACENAME_FIELD_NUMBER: _ClassVar[int] + FROMUSER_FIELD_NUMBER: _ClassVar[int] + placename: str + fromuser: str + def __init__(self, placename: _Optional[str] = ..., fromuser: _Optional[str] = ...) -> None: ... + +class ReleasePlaceResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class AllowPlaceRequest(_message.Message): + __slots__ = ("placename", "user") + PLACENAME_FIELD_NUMBER: _ClassVar[int] + USER_FIELD_NUMBER: _ClassVar[int] + placename: str + user: str + def __init__(self, placename: _Optional[str] = ..., user: _Optional[str] = ...) -> None: ... + +class AllowPlaceResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class CreateReservationRequest(_message.Message): + __slots__ = ("filters", "prio") + class FiltersEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: Reservation.Filter + def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[Reservation.Filter, _Mapping]] = ...) -> None: ... + FILTERS_FIELD_NUMBER: _ClassVar[int] + PRIO_FIELD_NUMBER: _ClassVar[int] + filters: _containers.MessageMap[str, Reservation.Filter] + prio: float + def __init__(self, filters: _Optional[_Mapping[str, Reservation.Filter]] = ..., prio: _Optional[float] = ...) -> None: ... + +class CreateReservationResponse(_message.Message): + __slots__ = ("reservation",) + RESERVATION_FIELD_NUMBER: _ClassVar[int] + reservation: Reservation + def __init__(self, reservation: _Optional[_Union[Reservation, _Mapping]] = ...) -> None: ... + +class Reservation(_message.Message): + __slots__ = ("owner", "token", "state", "prio", "filters", "allocations", "created", "timeout") + class Filter(_message.Message): + __slots__ = ("filter",) + class FilterEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... + FILTER_FIELD_NUMBER: _ClassVar[int] + filter: _containers.ScalarMap[str, str] + def __init__(self, filter: _Optional[_Mapping[str, str]] = ...) -> None: ... + class FiltersEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: Reservation.Filter + def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[Reservation.Filter, _Mapping]] = ...) -> None: ... + class AllocationsEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... + OWNER_FIELD_NUMBER: _ClassVar[int] + TOKEN_FIELD_NUMBER: _ClassVar[int] + STATE_FIELD_NUMBER: _ClassVar[int] + PRIO_FIELD_NUMBER: _ClassVar[int] + FILTERS_FIELD_NUMBER: _ClassVar[int] + ALLOCATIONS_FIELD_NUMBER: _ClassVar[int] + CREATED_FIELD_NUMBER: _ClassVar[int] + TIMEOUT_FIELD_NUMBER: _ClassVar[int] + owner: str + token: str + state: int + prio: float + filters: _containers.MessageMap[str, Reservation.Filter] + allocations: _containers.ScalarMap[str, str] + created: float + timeout: float + def __init__(self, owner: _Optional[str] = ..., token: _Optional[str] = ..., state: _Optional[int] = ..., prio: _Optional[float] = ..., filters: _Optional[_Mapping[str, Reservation.Filter]] = ..., allocations: _Optional[_Mapping[str, str]] = ..., created: _Optional[float] = ..., timeout: _Optional[float] = ...) -> None: ... + +class CancelReservationRequest(_message.Message): + __slots__ = ("token",) + TOKEN_FIELD_NUMBER: _ClassVar[int] + token: str + def __init__(self, token: _Optional[str] = ...) -> None: ... + +class CancelReservationResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class PollReservationRequest(_message.Message): + __slots__ = ("token",) + TOKEN_FIELD_NUMBER: _ClassVar[int] + token: str + def __init__(self, token: _Optional[str] = ...) -> None: ... + +class PollReservationResponse(_message.Message): + __slots__ = ("reservation",) + RESERVATION_FIELD_NUMBER: _ClassVar[int] + reservation: Reservation + def __init__(self, reservation: _Optional[_Union[Reservation, _Mapping]] = ...) -> None: ... + +class GetReservationsResponse(_message.Message): + __slots__ = ("reservations",) + RESERVATIONS_FIELD_NUMBER: _ClassVar[int] + reservations: _containers.RepeatedCompositeFieldContainer[Reservation] + def __init__(self, reservations: _Optional[_Iterable[_Union[Reservation, _Mapping]]] = ...) -> None: ... + +class GetReservationsRequest(_message.Message): + __slots__ = () + def __init__(self) -> None: ... diff --git a/labgrid/remote/generated/labgrid_coordinator_pb2_grpc.py b/labgrid/remote/generated/labgrid_coordinator_pb2_grpc.py new file mode 100644 index 000000000..ddd612e1b --- /dev/null +++ b/labgrid/remote/generated/labgrid_coordinator_pb2_grpc.py @@ -0,0 +1,833 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc +import warnings + +from . import labgrid_coordinator_pb2 as labgrid__coordinator__pb2 + +GRPC_GENERATED_VERSION = '1.64.1' +GRPC_VERSION = grpc.__version__ +EXPECTED_ERROR_RELEASE = '1.65.0' +SCHEDULED_RELEASE_DATE = 'June 25, 2024' +_version_not_supported = False + +try: + from grpc._utilities import first_version_is_lower + _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + warnings.warn( + f'The grpc package installed is at version {GRPC_VERSION},' + + f' but the generated code in labgrid_coordinator_pb2_grpc.py depends on' + + f' grpcio>={GRPC_GENERATED_VERSION}.' + + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + + f' This warning will become an error in {EXPECTED_ERROR_RELEASE},' + + f' scheduled for release on {SCHEDULED_RELEASE_DATE}.', + RuntimeWarning + ) + + +class CoordinatorStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.ClientStream = channel.stream_stream( + '/labgrid.Coordinator/ClientStream', + request_serializer=labgrid__coordinator__pb2.ClientInMessage.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.ClientOutMessage.FromString, + _registered_method=True) + self.ExporterStream = channel.stream_stream( + '/labgrid.Coordinator/ExporterStream', + request_serializer=labgrid__coordinator__pb2.ExporterInMessage.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.ExporterOutMessage.FromString, + _registered_method=True) + self.AddPlace = channel.unary_unary( + '/labgrid.Coordinator/AddPlace', + request_serializer=labgrid__coordinator__pb2.AddPlaceRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.AddPlaceResponse.FromString, + _registered_method=True) + self.DeletePlace = channel.unary_unary( + '/labgrid.Coordinator/DeletePlace', + request_serializer=labgrid__coordinator__pb2.DeletePlaceRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.DeletePlaceResponse.FromString, + _registered_method=True) + self.GetPlaces = channel.unary_unary( + '/labgrid.Coordinator/GetPlaces', + request_serializer=labgrid__coordinator__pb2.GetPlacesRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.GetPlacesResponse.FromString, + _registered_method=True) + self.AddPlaceAlias = channel.unary_unary( + '/labgrid.Coordinator/AddPlaceAlias', + request_serializer=labgrid__coordinator__pb2.AddPlaceAliasRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.AddPlaceAliasResponse.FromString, + _registered_method=True) + self.DeletePlaceAlias = channel.unary_unary( + '/labgrid.Coordinator/DeletePlaceAlias', + request_serializer=labgrid__coordinator__pb2.DeletePlaceAliasRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.DeletePlaceAliasResponse.FromString, + _registered_method=True) + self.SetPlaceTags = channel.unary_unary( + '/labgrid.Coordinator/SetPlaceTags', + request_serializer=labgrid__coordinator__pb2.SetPlaceTagsRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.SetPlaceTagsResponse.FromString, + _registered_method=True) + self.SetPlaceComment = channel.unary_unary( + '/labgrid.Coordinator/SetPlaceComment', + request_serializer=labgrid__coordinator__pb2.SetPlaceCommentRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.SetPlaceCommentResponse.FromString, + _registered_method=True) + self.AddPlaceMatch = channel.unary_unary( + '/labgrid.Coordinator/AddPlaceMatch', + request_serializer=labgrid__coordinator__pb2.AddPlaceMatchRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.AddPlaceMatchResponse.FromString, + _registered_method=True) + self.DeletePlaceMatch = channel.unary_unary( + '/labgrid.Coordinator/DeletePlaceMatch', + request_serializer=labgrid__coordinator__pb2.DeletePlaceMatchRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.DeletePlaceMatchResponse.FromString, + _registered_method=True) + self.AcquirePlace = channel.unary_unary( + '/labgrid.Coordinator/AcquirePlace', + request_serializer=labgrid__coordinator__pb2.AcquirePlaceRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.AcquirePlaceResponse.FromString, + _registered_method=True) + self.ReleasePlace = channel.unary_unary( + '/labgrid.Coordinator/ReleasePlace', + request_serializer=labgrid__coordinator__pb2.ReleasePlaceRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.ReleasePlaceResponse.FromString, + _registered_method=True) + self.AllowPlace = channel.unary_unary( + '/labgrid.Coordinator/AllowPlace', + request_serializer=labgrid__coordinator__pb2.AllowPlaceRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.AllowPlaceResponse.FromString, + _registered_method=True) + self.CreateReservation = channel.unary_unary( + '/labgrid.Coordinator/CreateReservation', + request_serializer=labgrid__coordinator__pb2.CreateReservationRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.CreateReservationResponse.FromString, + _registered_method=True) + self.CancelReservation = channel.unary_unary( + '/labgrid.Coordinator/CancelReservation', + request_serializer=labgrid__coordinator__pb2.CancelReservationRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.CancelReservationResponse.FromString, + _registered_method=True) + self.PollReservation = channel.unary_unary( + '/labgrid.Coordinator/PollReservation', + request_serializer=labgrid__coordinator__pb2.PollReservationRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.PollReservationResponse.FromString, + _registered_method=True) + self.GetReservations = channel.unary_unary( + '/labgrid.Coordinator/GetReservations', + request_serializer=labgrid__coordinator__pb2.GetReservationsRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.GetReservationsResponse.FromString, + _registered_method=True) + + +class CoordinatorServicer(object): + """Missing associated documentation comment in .proto file.""" + + def ClientStream(self, request_iterator, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ExporterStream(self, request_iterator, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def AddPlace(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeletePlace(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetPlaces(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def AddPlaceAlias(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeletePlaceAlias(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SetPlaceTags(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SetPlaceComment(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def AddPlaceMatch(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeletePlaceMatch(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def AcquirePlace(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReleasePlace(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def AllowPlace(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CreateReservation(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CancelReservation(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def PollReservation(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetReservations(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_CoordinatorServicer_to_server(servicer, server): + rpc_method_handlers = { + 'ClientStream': grpc.stream_stream_rpc_method_handler( + servicer.ClientStream, + request_deserializer=labgrid__coordinator__pb2.ClientInMessage.FromString, + response_serializer=labgrid__coordinator__pb2.ClientOutMessage.SerializeToString, + ), + 'ExporterStream': grpc.stream_stream_rpc_method_handler( + servicer.ExporterStream, + request_deserializer=labgrid__coordinator__pb2.ExporterInMessage.FromString, + response_serializer=labgrid__coordinator__pb2.ExporterOutMessage.SerializeToString, + ), + 'AddPlace': grpc.unary_unary_rpc_method_handler( + servicer.AddPlace, + request_deserializer=labgrid__coordinator__pb2.AddPlaceRequest.FromString, + response_serializer=labgrid__coordinator__pb2.AddPlaceResponse.SerializeToString, + ), + 'DeletePlace': grpc.unary_unary_rpc_method_handler( + servicer.DeletePlace, + request_deserializer=labgrid__coordinator__pb2.DeletePlaceRequest.FromString, + response_serializer=labgrid__coordinator__pb2.DeletePlaceResponse.SerializeToString, + ), + 'GetPlaces': grpc.unary_unary_rpc_method_handler( + servicer.GetPlaces, + request_deserializer=labgrid__coordinator__pb2.GetPlacesRequest.FromString, + response_serializer=labgrid__coordinator__pb2.GetPlacesResponse.SerializeToString, + ), + 'AddPlaceAlias': grpc.unary_unary_rpc_method_handler( + servicer.AddPlaceAlias, + request_deserializer=labgrid__coordinator__pb2.AddPlaceAliasRequest.FromString, + response_serializer=labgrid__coordinator__pb2.AddPlaceAliasResponse.SerializeToString, + ), + 'DeletePlaceAlias': grpc.unary_unary_rpc_method_handler( + servicer.DeletePlaceAlias, + request_deserializer=labgrid__coordinator__pb2.DeletePlaceAliasRequest.FromString, + response_serializer=labgrid__coordinator__pb2.DeletePlaceAliasResponse.SerializeToString, + ), + 'SetPlaceTags': grpc.unary_unary_rpc_method_handler( + servicer.SetPlaceTags, + request_deserializer=labgrid__coordinator__pb2.SetPlaceTagsRequest.FromString, + response_serializer=labgrid__coordinator__pb2.SetPlaceTagsResponse.SerializeToString, + ), + 'SetPlaceComment': grpc.unary_unary_rpc_method_handler( + servicer.SetPlaceComment, + request_deserializer=labgrid__coordinator__pb2.SetPlaceCommentRequest.FromString, + response_serializer=labgrid__coordinator__pb2.SetPlaceCommentResponse.SerializeToString, + ), + 'AddPlaceMatch': grpc.unary_unary_rpc_method_handler( + servicer.AddPlaceMatch, + request_deserializer=labgrid__coordinator__pb2.AddPlaceMatchRequest.FromString, + response_serializer=labgrid__coordinator__pb2.AddPlaceMatchResponse.SerializeToString, + ), + 'DeletePlaceMatch': grpc.unary_unary_rpc_method_handler( + servicer.DeletePlaceMatch, + request_deserializer=labgrid__coordinator__pb2.DeletePlaceMatchRequest.FromString, + response_serializer=labgrid__coordinator__pb2.DeletePlaceMatchResponse.SerializeToString, + ), + 'AcquirePlace': grpc.unary_unary_rpc_method_handler( + servicer.AcquirePlace, + request_deserializer=labgrid__coordinator__pb2.AcquirePlaceRequest.FromString, + response_serializer=labgrid__coordinator__pb2.AcquirePlaceResponse.SerializeToString, + ), + 'ReleasePlace': grpc.unary_unary_rpc_method_handler( + servicer.ReleasePlace, + request_deserializer=labgrid__coordinator__pb2.ReleasePlaceRequest.FromString, + response_serializer=labgrid__coordinator__pb2.ReleasePlaceResponse.SerializeToString, + ), + 'AllowPlace': grpc.unary_unary_rpc_method_handler( + servicer.AllowPlace, + request_deserializer=labgrid__coordinator__pb2.AllowPlaceRequest.FromString, + response_serializer=labgrid__coordinator__pb2.AllowPlaceResponse.SerializeToString, + ), + 'CreateReservation': grpc.unary_unary_rpc_method_handler( + servicer.CreateReservation, + request_deserializer=labgrid__coordinator__pb2.CreateReservationRequest.FromString, + response_serializer=labgrid__coordinator__pb2.CreateReservationResponse.SerializeToString, + ), + 'CancelReservation': grpc.unary_unary_rpc_method_handler( + servicer.CancelReservation, + request_deserializer=labgrid__coordinator__pb2.CancelReservationRequest.FromString, + response_serializer=labgrid__coordinator__pb2.CancelReservationResponse.SerializeToString, + ), + 'PollReservation': grpc.unary_unary_rpc_method_handler( + servicer.PollReservation, + request_deserializer=labgrid__coordinator__pb2.PollReservationRequest.FromString, + response_serializer=labgrid__coordinator__pb2.PollReservationResponse.SerializeToString, + ), + 'GetReservations': grpc.unary_unary_rpc_method_handler( + servicer.GetReservations, + request_deserializer=labgrid__coordinator__pb2.GetReservationsRequest.FromString, + response_serializer=labgrid__coordinator__pb2.GetReservationsResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'labgrid.Coordinator', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers('labgrid.Coordinator', rpc_method_handlers) + + + # This class is part of an EXPERIMENTAL API. +class Coordinator(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def ClientStream(request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.stream_stream( + request_iterator, + target, + '/labgrid.Coordinator/ClientStream', + labgrid__coordinator__pb2.ClientInMessage.SerializeToString, + labgrid__coordinator__pb2.ClientOutMessage.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def ExporterStream(request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.stream_stream( + request_iterator, + target, + '/labgrid.Coordinator/ExporterStream', + labgrid__coordinator__pb2.ExporterInMessage.SerializeToString, + labgrid__coordinator__pb2.ExporterOutMessage.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def AddPlace(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/labgrid.Coordinator/AddPlace', + labgrid__coordinator__pb2.AddPlaceRequest.SerializeToString, + labgrid__coordinator__pb2.AddPlaceResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def DeletePlace(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/labgrid.Coordinator/DeletePlace', + labgrid__coordinator__pb2.DeletePlaceRequest.SerializeToString, + labgrid__coordinator__pb2.DeletePlaceResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def GetPlaces(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/labgrid.Coordinator/GetPlaces', + labgrid__coordinator__pb2.GetPlacesRequest.SerializeToString, + labgrid__coordinator__pb2.GetPlacesResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def AddPlaceAlias(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/labgrid.Coordinator/AddPlaceAlias', + labgrid__coordinator__pb2.AddPlaceAliasRequest.SerializeToString, + labgrid__coordinator__pb2.AddPlaceAliasResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def DeletePlaceAlias(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/labgrid.Coordinator/DeletePlaceAlias', + labgrid__coordinator__pb2.DeletePlaceAliasRequest.SerializeToString, + labgrid__coordinator__pb2.DeletePlaceAliasResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def SetPlaceTags(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/labgrid.Coordinator/SetPlaceTags', + labgrid__coordinator__pb2.SetPlaceTagsRequest.SerializeToString, + labgrid__coordinator__pb2.SetPlaceTagsResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def SetPlaceComment(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/labgrid.Coordinator/SetPlaceComment', + labgrid__coordinator__pb2.SetPlaceCommentRequest.SerializeToString, + labgrid__coordinator__pb2.SetPlaceCommentResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def AddPlaceMatch(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/labgrid.Coordinator/AddPlaceMatch', + labgrid__coordinator__pb2.AddPlaceMatchRequest.SerializeToString, + labgrid__coordinator__pb2.AddPlaceMatchResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def DeletePlaceMatch(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/labgrid.Coordinator/DeletePlaceMatch', + labgrid__coordinator__pb2.DeletePlaceMatchRequest.SerializeToString, + labgrid__coordinator__pb2.DeletePlaceMatchResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def AcquirePlace(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/labgrid.Coordinator/AcquirePlace', + labgrid__coordinator__pb2.AcquirePlaceRequest.SerializeToString, + labgrid__coordinator__pb2.AcquirePlaceResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def ReleasePlace(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/labgrid.Coordinator/ReleasePlace', + labgrid__coordinator__pb2.ReleasePlaceRequest.SerializeToString, + labgrid__coordinator__pb2.ReleasePlaceResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def AllowPlace(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/labgrid.Coordinator/AllowPlace', + labgrid__coordinator__pb2.AllowPlaceRequest.SerializeToString, + labgrid__coordinator__pb2.AllowPlaceResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def CreateReservation(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/labgrid.Coordinator/CreateReservation', + labgrid__coordinator__pb2.CreateReservationRequest.SerializeToString, + labgrid__coordinator__pb2.CreateReservationResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def CancelReservation(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/labgrid.Coordinator/CancelReservation', + labgrid__coordinator__pb2.CancelReservationRequest.SerializeToString, + labgrid__coordinator__pb2.CancelReservationResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def PollReservation(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/labgrid.Coordinator/PollReservation', + labgrid__coordinator__pb2.PollReservationRequest.SerializeToString, + labgrid__coordinator__pb2.PollReservationResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def GetReservations(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/labgrid.Coordinator/GetReservations', + labgrid__coordinator__pb2.GetReservationsRequest.SerializeToString, + labgrid__coordinator__pb2.GetReservationsResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) diff --git a/labgrid/remote/proto/labgrid-coordinator.proto b/labgrid/remote/proto/labgrid-coordinator.proto new file mode 100644 index 000000000..e0585f7e1 --- /dev/null +++ b/labgrid/remote/proto/labgrid-coordinator.proto @@ -0,0 +1,297 @@ +syntax = "proto3"; + +package labgrid; + +service Coordinator { + rpc ClientStream(stream ClientInMessage) returns (stream ClientOutMessage) {} + + rpc ExporterStream(stream ExporterInMessage) returns (stream ExporterOutMessage) {} + + rpc AddPlace(AddPlaceRequest) returns (AddPlaceResponse) {} + + rpc DeletePlace(DeletePlaceRequest) returns (DeletePlaceResponse) {} + + rpc GetPlaces(GetPlacesRequest) returns (GetPlacesResponse) {} + + rpc AddPlaceAlias(AddPlaceAliasRequest) returns (AddPlaceAliasResponse) {} + + rpc DeletePlaceAlias(DeletePlaceAliasRequest) returns (DeletePlaceAliasResponse) {} + + rpc SetPlaceTags(SetPlaceTagsRequest) returns (SetPlaceTagsResponse) {} + + rpc SetPlaceComment(SetPlaceCommentRequest) returns (SetPlaceCommentResponse) {} + + rpc AddPlaceMatch(AddPlaceMatchRequest) returns (AddPlaceMatchResponse) {} + + rpc DeletePlaceMatch(DeletePlaceMatchRequest) returns (DeletePlaceMatchResponse) {} + + rpc AcquirePlace(AcquirePlaceRequest) returns (AcquirePlaceResponse) {} + + rpc ReleasePlace(ReleasePlaceRequest) returns (ReleasePlaceResponse) {} + + rpc AllowPlace(AllowPlaceRequest) returns (AllowPlaceResponse) {} + + rpc CreateReservation(CreateReservationRequest) returns (CreateReservationResponse) {} + + rpc CancelReservation(CancelReservationRequest) returns (CancelReservationResponse) {} + + rpc PollReservation(PollReservationRequest) returns (PollReservationResponse) {} + + rpc GetReservations(GetReservationsRequest) returns (GetReservationsResponse) {} +} + +message ClientInMessage { + oneof kind { + Sync sync = 1; + StartupDone startup = 2; + Subscribe subscribe = 3; + }; +}; + +message Sync { + uint64 id = 1; +}; + +message StartupDone { + string version = 1; + string name = 2; +}; + +message Subscribe { + optional bool is_unsubscribe = 1; + oneof kind { + bool all_places = 2; + bool all_resources = 3; + } +}; + +message ClientOutMessage { + optional Sync sync = 1; + repeated UpdateResponse updates = 2; +}; + +message UpdateResponse { + oneof kind { + Resource resource = 1; + Resource.Path del_resource = 2; + Place place = 3; + string del_place = 4; + }; +}; + +message ExporterInMessage { + oneof kind { + Resource resource = 1; + StartupDone startup = 2; + ExporterResponse response = 3; + }; +}; + +message Resource { + message Path { + optional string exporter_name = 1; + string group_name = 2; + string resource_name = 3; + } + Path path = 1; + string cls = 2; + map params = 3; + map extra = 4; + string acquired = 5; + bool avail = 6; +}; + +message MapValue { + oneof kind { + bool bool_value = 1; + int64 int_value = 2; + uint64 uint_value = 3; + double float_value = 4; + string string_value = 5; + // FIXME do we need arrays? + } +}; + +message ExporterResponse { + bool success = 1; + optional string reason = 2; +}; + +message Hello { + string version = 1; +} + +message ExporterOutMessage { + oneof kind { + Hello hello = 1; + ExporterSetAcquiredRequest set_acquired_request = 2; + }; +}; + +message ExporterSetAcquiredRequest { + string group_name = 1; + string resource_name = 2; + optional string place_name = 3; +}; + +message AddPlaceRequest { + string name = 1; +}; + +message AddPlaceResponse { +}; + +message DeletePlaceRequest { + string name = 1; +}; + +message DeletePlaceResponse { +}; + +message GetPlacesRequest { +}; + +message GetPlacesResponse { + repeated Place places = 1; +} + +message Place { + string name = 1; + repeated string aliases = 2; + string comment = 3; + map tags = 4; + repeated ResourceMatch matches = 5; + optional string acquired = 6; + repeated string acquired_resources = 7; + repeated string allowed = 8; + double created = 9; + double changed = 10; + optional string reservation = 11; +}; + +message ResourceMatch { + string exporter = 1; + string group = 2; + string cls = 3; + optional string name = 4; + optional string rename = 5; +}; + +message AddPlaceAliasRequest { + string placename = 1; + string alias = 2; +}; + +message AddPlaceAliasResponse { +}; + +message DeletePlaceAliasRequest { + string placename = 1; + string alias = 2; +}; + +message DeletePlaceAliasResponse { +}; + +message SetPlaceTagsRequest { + string placename = 1; + map tags = 2; +}; + +message SetPlaceTagsResponse { +}; + +message SetPlaceCommentRequest { + string placename = 1; + string comment = 2; +}; + +message SetPlaceCommentResponse { +}; + +message AddPlaceMatchRequest { + string placename = 1; + string pattern = 2; + optional string rename = 3; +}; + +message AddPlaceMatchResponse { +}; + +message DeletePlaceMatchRequest { + string placename = 1; + string pattern = 2; + optional string rename = 3; +}; + +message DeletePlaceMatchResponse { +}; + +message AcquirePlaceRequest { + string placename = 1; +}; + +message AcquirePlaceResponse { +}; + +message ReleasePlaceRequest { + string placename = 1; + optional string fromuser = 2; +}; + +message ReleasePlaceResponse { +}; + +message AllowPlaceRequest { + string placename = 1; + string user = 2; +}; + +message AllowPlaceResponse { +}; + + +message CreateReservationRequest { + map filters = 1; + double prio = 2; +}; + +message CreateReservationResponse { + Reservation reservation = 1; +}; + +message Reservation { + message Filter { + map filter = 1; + } + string owner = 1; + string token = 2; + int32 state = 3; + double prio = 4; + map filters = 5; + map allocations = 6; + double created = 7; + double timeout = 8; +}; + +message CancelReservationRequest { + string token = 1; +}; + +message CancelReservationResponse { +}; + +message PollReservationRequest { + string token = 1; +}; + +message PollReservationResponse { + Reservation reservation = 1; +}; + +message GetReservationsResponse { + repeated Reservation reservations = 1; +}; + +message GetReservationsRequest { +}; diff --git a/labgrid/resource/remote.py b/labgrid/resource/remote.py index ad116382d..8b9bcc189 100644 --- a/labgrid/resource/remote.py +++ b/labgrid/resource/remote.py @@ -11,7 +11,6 @@ class RemotePlaceManager(ResourceManager): def __attrs_post_init__(self): super().__attrs_post_init__() self.url = None - self.realm = None self.loop = None self.session = None self.ready = None @@ -23,7 +22,7 @@ def _start(self): from ..remote.client import start_session try: - self.session = start_session(self.url, self.realm, {'env': self.env}) + self.session = start_session(self.url, {'env': self.env}) except ConnectionRefusedError as e: raise ConnectionRefusedError(f"Could not connect to coordinator {self.url}") \ from e @@ -39,12 +38,10 @@ def on_resource_added(self, resource): # be the same). if not self.session: self.env = remote_place.target.env - self.url = os.environ.get("LG_CROSSBAR", "ws://127.0.0.1:20408/ws") - self.realm = os.environ.get("LG_CROSSBAR_REALM", "realm1") + self.url = os.environ.get("LG_CROSSBAR", "127.0.0.1:20408") if self.env: config = self.env.config self.url = config.get_option('crossbar_url', self.url) - self.realm = config.get_option('crossbar_realm', self.realm) self._start() place = self.session.get_place(remote_place.name) # pylint: disable=no-member resource_entries = self.session.get_target_resources(place) # pylint: disable=no-member diff --git a/labgrid/util/proxy.py b/labgrid/util/proxy.py index a5740e8fd..8c301bc78 100644 --- a/labgrid/util/proxy.py +++ b/labgrid/util/proxy.py @@ -95,6 +95,15 @@ def get_url(cls, url, *, default_port=None): return urlunsplit(s) + + @classmethod + def get_grpc_url(cls, url, *, default_port=None): + url = f"//{url}" + url = proxymanager.get_url(url, default_port=default_port) + url = url.lstrip("/") + return url + + @classmethod def get_command(cls, res, host, port, ifname=None): """get argument list to start a proxy process connected to the target""" diff --git a/pyproject.toml b/pyproject.toml index d84b7b571..58f42d08b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,7 +33,9 @@ classifiers = [ dependencies = [ "ansicolors>=1.1.8", "attrs>=21.4.0", - "autobahn>=21.3.1", + "grpcio==1.64.1", + "grpcio-reflection==1.64.1", + "protobuf>=5.27.0", "jinja2>=3.0.2", "pexpect>=4.8.0", "pyserial-labgrid>=3.4.0.1", @@ -118,11 +120,18 @@ dev = [ # additional dev dependencies "psutil>=5.8.0", + "pytest-benchmark>=4.0.0", "pytest-cov>=3.0.0", "pytest-dependency>=0.5.1", "pytest-isort>=2.0.0", "pytest-mock>=3.6.1", "pylint>=3.0.0", + + # GRPC Protobuf compiler + "grpcio-tools==1.64.1", + + # GRPC Channelz support + "grpcio-channelz==1.64.1", ] [project.scripts] @@ -130,6 +139,7 @@ labgrid-autoinstall = "labgrid.autoinstall.main:main" labgrid-client = "labgrid.remote.client:main" labgrid-exporter = "labgrid.remote.exporter:main" labgrid-suggest = "labgrid.resource.suggest:main" +labgrid-coordinator = "labgrid.remote.coordinator:main" # the following makes a plugin available to pytest [project.entry-points.pytest11] @@ -211,6 +221,7 @@ enable = [ generated-members = [ "labgrid.driver.*", "labgrid.strategy.*", + "labgrid_coordinator_pb2", ] signature-mutators = ["labgrid.step.step"] diff --git a/tests/conftest.py b/tests/conftest.py index 50bcad1a0..dc9c1707a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -100,56 +100,24 @@ def serial_driver_no_name(target, serial_port, mocker): return s @pytest.fixture(scope='function') -def crossbar_config(tmpdir, pytestconfig): - crossbar_config = '.crossbar/config-anonymous.yaml' - - pytestconfig.rootdir.join(crossbar_config).copy(tmpdir.mkdir('.crossbar')) - crossbar_config = tmpdir.join(crossbar_config) - - # crossbar runs labgrid's coordinator component as a guest, record its coverage - if pytestconfig.pluginmanager.get_plugin('pytest_cov'): - with open(crossbar_config, 'r+') as stream: - conf = yaml.safe_load(stream) - - for worker in conf['workers']: - if worker['type'] == 'guest': - worker['executable'] = 'coverage' - worker['arguments'].insert(0, 'run') - worker['arguments'].insert(1, '--parallel-mode') - # pytest-cov combines coverage files in root dir automatically, so copy it there - coverage_data = pytestconfig.rootdir.join('.coverage') - worker['arguments'].insert(2, f'--data-file={coverage_data}') - - stream.seek(0) - yaml.safe_dump(conf, stream) - - return crossbar_config - -@pytest.fixture(scope='function') -def crossbar(tmpdir, pytestconfig, crossbar_config): - crossbar_venv = Path(pytestconfig.getoption("--crossbar-venv")) - if not crossbar_venv.is_absolute(): - crossbar_venv = pytestconfig.rootdir / crossbar_venv - crossbar_bin = crossbar_venv / "bin/crossbar" +def coordinator(tmpdir): spawn = pexpect.spawn( - f'{crossbar_bin} start --color false --logformat none --config {crossbar_config}', - logfile=Prefixer(sys.stdout.buffer, 'crossbar'), + f'labgrid-coordinator', + logfile=Prefixer(sys.stdout.buffer, 'coordinator'), cwd=str(tmpdir)) try: - spawn.expect('Realm .* started') - spawn.expect('Guest .* started') spawn.expect('Coordinator ready') except: - print(f"crossbar startup failed with {spawn.before}") + print(f"coordinator startup failed with {spawn.before}") raise - reader = threading.Thread(target=keep_reading, name='crossbar-reader', args=(spawn,), daemon=True) + reader = threading.Thread(target=keep_reading, name='coordinator-reader', args=(spawn,), daemon=True) reader.start() yield spawn # let coverage write its data: # https://coverage.readthedocs.io/en/latest/subprocess.html#process-termination - print("stopping crossbar") + print("stopping coordinator") spawn.kill(SIGTERM) spawn.expect(pexpect.EOF) spawn.wait() @@ -157,7 +125,7 @@ def crossbar(tmpdir, pytestconfig, crossbar_config): reader.join() @pytest.fixture(scope='function') -def exporter(tmpdir, crossbar): +def exporter(tmpdir, coordinator): p = tmpdir.join("exports.yaml") p.write( """ @@ -182,7 +150,7 @@ def exporter(tmpdir, crossbar): logfile=Prefixer(sys.stdout.buffer, 'exporter'), cwd=str(tmpdir)) try: - spawn.expect('exporter/testhost') + spawn.expect('exporter name: testhost') except: print(f"exporter startup failed with {spawn.before}") raise @@ -201,8 +169,6 @@ def pytest_addoption(parser): help="Run SSHManager tests against localhost") parser.addoption("--ssh-username", default=None, help="SSH username to use for SSHDriver testing") - parser.addoption("--crossbar-venv", default=None, - help="Path to separate virtualenv with crossbar installed") def pytest_configure(config): # register an additional marker @@ -213,7 +179,7 @@ def pytest_configure(config): config.addinivalue_line("markers", "sshusername: test SSHDriver against Localhost") config.addinivalue_line("markers", - "crossbar: test against local crossbar") + "coordinator: test against local coordinator") def pytest_runtest_setup(item): envmarker = item.get_closest_marker("sigrokusb") @@ -228,7 +194,3 @@ def pytest_runtest_setup(item): if envmarker is not None: if item.config.getoption("--ssh-username") is None: pytest.skip("SSHDriver tests against localhost not enabled (enable with --ssh-username )") - envmarker = item.get_closest_marker("crossbar") - if envmarker is not None: - if item.config.getoption("--crossbar-venv") is None: - pytest.skip("No path to crossbar virtualenv given (set with --crossbar-venv )") diff --git a/tests/test_crossbar.py b/tests/test_client.py similarity index 87% rename from tests/test_crossbar.py rename to tests/test_client.py index a1db0eeeb..3f2f15aae 100644 --- a/tests/test_crossbar.py +++ b/tests/test_client.py @@ -8,8 +8,6 @@ psutil = pytest.importorskip("psutil") -pytestmark = pytest.mark.crossbar - def suspend_tree(pid): main = psutil.Process(pid) main.suspend() @@ -22,12 +20,11 @@ def resume_tree(pid): for child in main.children(recursive=True): child.resume() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") -def test_startup(crossbar): +def test_startup(coordinator): pass @pytest.fixture(scope='function') -def place(crossbar): +def place(coordinator): with pexpect.spawn('python -m labgrid.remote.client -p test create') as spawn: spawn.expect(pexpect.EOF) spawn.close() @@ -65,26 +62,24 @@ def place_acquire(place, exporter): assert spawn.exitstatus == 0, spawn.before.strip() def test_connect_error(): - with pexpect.spawn('python -m labgrid.remote.client -x ws://127.0.0.1:20409/ws places') as spawn: + with pexpect.spawn('python -m labgrid.remote.client -x 127.0.0.1:20409 places') as spawn: spawn.expect("Could not connect to coordinator") spawn.expect(pexpect.EOF) spawn.close() assert spawn.exitstatus == 1, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") -def test_connect_timeout(crossbar): - suspend_tree(crossbar.pid) +def test_connect_timeout(coordinator): + suspend_tree(coordinator.pid) try: with pexpect.spawn('python -m labgrid.remote.client places') as spawn: - spawn.expect("connection closed during setup") + spawn.expect("connection attempt timed out before receiving SETTINGS frame") spawn.expect(pexpect.EOF) spawn.close() assert spawn.exitstatus == 1, spawn.before.strip() finally: - resume_tree(crossbar.pid) + resume_tree(coordinator.pid) pass -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_show(place): with pexpect.spawn('python -m labgrid.remote.client -p test show') as spawn: spawn.expect("Place 'test':") @@ -92,7 +87,6 @@ def test_place_show(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_alias(place): with pexpect.spawn('python -m labgrid.remote.client -p test add-alias foo') as spawn: spawn.expect(pexpect.EOF) @@ -104,7 +98,6 @@ def test_place_alias(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_comment(place): with pexpect.spawn('python -m labgrid.remote.client -p test set-comment my comment') as spawn: spawn.expect(pexpect.EOF) @@ -118,7 +111,6 @@ def test_place_comment(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_match(place): with pexpect.spawn('python -m labgrid.remote.client -p test add-match "e1/g1/r1" "e2/g2/*"') as spawn: spawn.expect(pexpect.EOF) @@ -137,7 +129,6 @@ def test_place_match(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_match_duplicates(place): # first given match should succeed, second should be skipped matches = ( @@ -158,7 +149,6 @@ def test_place_match_duplicates(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_acquire(place): with pexpect.spawn('python -m labgrid.remote.client -p test acquire') as spawn: spawn.expect(pexpect.EOF) @@ -176,7 +166,6 @@ def test_place_acquire(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_acquire_enforce(place): with pexpect.spawn('python -m labgrid.remote.client -p test add-match does/not/exist') as spawn: spawn.expect(pexpect.EOF) @@ -200,7 +189,6 @@ def test_place_acquire_enforce(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_acquire_broken(place, exporter): with pexpect.spawn('python -m labgrid.remote.client -p test add-match "*/Broken/*"') as spawn: spawn.expect(pexpect.EOF) @@ -208,7 +196,7 @@ def test_place_acquire_broken(place, exporter): assert spawn.exitstatus == 0, spawn.before.strip() with pexpect.spawn('python -m labgrid.remote.client -p test acquire') as spawn: - spawn.expect('failed to acquire place test') + spawn.expect('Failed to acquire resources for place test') spawn.expect(pexpect.EOF) spawn.close() assert spawn.exitstatus == 1, spawn.before.strip() @@ -220,7 +208,6 @@ def test_place_acquire_broken(place, exporter): print(spawn.before.decode()) assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_release_from(monkeypatch, place, exporter): user = "test-user" host = "test-host" @@ -267,23 +254,20 @@ def test_place_release_from(monkeypatch, place, exporter): before = spawn.before.decode("utf-8").strip() assert user not in before and not host in before, before -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") -def test_place_add_no_name(crossbar): +def test_place_add_no_name(coordinator): with pexpect.spawn('python -m labgrid.remote.client create') as spawn: spawn.expect("missing place name") spawn.expect(pexpect.EOF) spawn.close() assert spawn.exitstatus != 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") -def test_place_del_no_name(crossbar): +def test_place_del_no_name(coordinator): with pexpect.spawn('python -m labgrid.remote.client delete') as spawn: - spawn.expect("deletes require an exact place name") + spawn.expect("name was not a string") spawn.expect(pexpect.EOF) spawn.close() assert spawn.exitstatus != 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_remoteplace_target(place_acquire, tmpdir): from labgrid.environment import Environment p = tmpdir.join("config.yaml") @@ -304,7 +288,6 @@ def test_remoteplace_target(place_acquire, tmpdir): remote_place = t.get_resource("RemotePlace") assert remote_place.tags == {"board": "bar"} -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_remoteplace_target_without_env(request, place_acquire): from labgrid import Target from labgrid.resource import RemotePlace @@ -313,7 +296,6 @@ def test_remoteplace_target_without_env(request, place_acquire): remote_place = RemotePlace(t, name="test") assert remote_place.tags == {"board": "bar"} -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_resource_conflict(place_acquire, tmpdir): with pexpect.spawn('python -m labgrid.remote.client -p test2 create') as spawn: spawn.expect(pexpect.EOF) @@ -335,7 +317,6 @@ def test_resource_conflict(place_acquire, tmpdir): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_reservation(place_acquire, tmpdir): with pexpect.spawn('python -m labgrid.remote.client reserve --shell board=bar name=test') as spawn: spawn.expect(pexpect.EOF) @@ -413,7 +394,6 @@ def test_reservation(place_acquire, tmpdir): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_exporter_timeout(place, exporter): with pexpect.spawn('python -m labgrid.remote.client resources') as spawn: spawn.expect(pexpect.EOF) @@ -451,7 +431,6 @@ def test_exporter_timeout(place, exporter): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_reservation_custom_config(place, exporter, tmpdir): p = tmpdir.join("config.yaml") p.write( @@ -489,7 +468,6 @@ def test_reservation_custom_config(place, exporter, tmpdir): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_same_name_resources(place, exporter, tmpdir): with pexpect.spawn('python -m labgrid.remote.client -p test add-named-match "testhost/Many/NetworkService" "samename"') as spawn: spawn.expect(pexpect.EOF) diff --git a/tests/test_coordinator.py b/tests/test_coordinator.py new file mode 100644 index 000000000..acb84baba --- /dev/null +++ b/tests/test_coordinator.py @@ -0,0 +1,172 @@ + +import os +import re +import sys +import time + +import pytest +import pexpect + +import grpc +import labgrid.remote.generated.labgrid_coordinator_pb2_grpc as labgrid_coordinator_pb2_grpc +import labgrid.remote.generated.labgrid_coordinator_pb2 as labgrid_coordinator_pb2 + +psutil = pytest.importorskip("psutil") + +@pytest.fixture(scope='function') +def channel_stub(): + import queue + queue = queue.Queue() + + channel = grpc.insecure_channel("127.0.0.1:20408") + stub = labgrid_coordinator_pb2_grpc.CoordinatorStub(channel) + def generate_startup(queue): + msg = labgrid_coordinator_pb2.ClientInMessage() + msg.startup.version = "2.0.0" + msg.startup.name = "testclient" + messages = [ + msg + ] + for msg in messages: + yield msg + while True: + msg = queue.get() + yield msg + queue.task_done() + + stream = stub.ClientStream(generate_startup(queue)) + yield stub + channel.close() + +@pytest.fixture(scope='function') +def coordinator_place(channel_stub): + name = "test" + place = labgrid_coordinator_pb2.AddPlaceRequest(name=name) + res = channel_stub.AddPlace(place) + assert res, f"There was an error: {res}" + return channel_stub + +def test_startup(coordinator): + pass + +def test_coordinator_add_place(coordinator, channel_stub): + name = "test" + place = labgrid_coordinator_pb2.AddPlaceRequest(name=name) + res = channel_stub.AddPlace(place) + assert res, f"There was an error: {res}" + +def test_coordinator_del_place(coordinator, channel_stub): + name = "test" + place = labgrid_coordinator_pb2.AddPlaceRequest(name=name) + res = channel_stub.AddPlace(place) + assert res, f"There was an error: {res}" + place = labgrid_coordinator_pb2.DeletePlaceRequest(name=name) + res = channel_stub.DeletePlace(place) + assert res, f"There was an error: {res}" + +def test_coordinator_get_places(coordinator, channel_stub): + name = "test" + place = labgrid_coordinator_pb2.AddPlaceRequest(name=name) + res = channel_stub.AddPlace(place) + assert res, f"There was an error: {res}" + name = "test2" + place = labgrid_coordinator_pb2.AddPlaceRequest(name=name) + res = channel_stub.AddPlace(place) + assert res, f"There was an error: {res}" + + request = labgrid_coordinator_pb2.GetPlacesRequest() + res = channel_stub.GetPlaces(request) + + from labgrid.remote.common import Place + places = set() + names = set() + for pb2 in res.places: + place = Place.from_pb2(pb2) + places.add(place) + names.add(place.name) + + assert len(places) == 2, f"Returned places not two: {places}" + assert set(names) == {"test", "test2"}, f"There was an error: {res}" + +@pytest.mark.timeout(5) +def test_coordinator_exporter_session(coordinator, channel_stub): + import queue + queue = queue.Queue() + + def generate_startup(queue): + msg = labgrid_coordinator_pb2.ExporterInMessage() + msg.startup.version = "2.0.0" + msg.startup.name = "testporter" + messages = [ + msg + ] + for msg in messages: + yield msg + while True: + msg = queue.get() + yield msg + queue.task_done() + + coordinator = channel_stub.ExporterStream(generate_startup(queue), wait_for_ready=True) + +def test_coordinator_place_acquire(coordinator, coordinator_place): + stub = coordinator_place + res = stub.AcquirePlace(labgrid_coordinator_pb2.AcquirePlaceRequest(placename="test")) + assert res + +def test_coordinator_place_acquire_release(coordinator, coordinator_place): + stub = coordinator_place + res = stub.AcquirePlace(labgrid_coordinator_pb2.AcquirePlaceRequest(placename="test")) + assert res + res = stub.ReleasePlace(labgrid_coordinator_pb2.ReleasePlaceRequest(placename="test")) + assert res + +def test_coordinator_place_add_alias(coordinator, coordinator_place): + stub = coordinator_place + res = stub.AddPlaceAlias(labgrid_coordinator_pb2.AddPlaceAliasRequest(placename="test", alias="testalias")) + assert res + +def test_coordinator_place_add_remove_alias(coordinator, coordinator_place): + stub = coordinator_place + res = stub.AddPlaceAlias(labgrid_coordinator_pb2.AddPlaceAliasRequest(placename="test", alias="testalias")) + assert res + res = stub.DeletePlaceAlias(labgrid_coordinator_pb2.DeletePlaceAliasRequest(placename="test", alias="testalias")) + assert res + +def test_coordinator_place_set_tags(coordinator, coordinator_place): + stub = coordinator_place + res = stub.SetPlaceTags(labgrid_coordinator_pb2.SetPlaceTagsRequest(placename="test", tags={"one": "two"})) + assert res + +def test_coordinator_place_set_comment(coordinator, coordinator_place): + stub = coordinator_place + res = stub.SetPlaceComment(labgrid_coordinator_pb2.SetPlaceCommentRequest(placename="test", comment="testcomment")) + assert res + +def test_coordinator_place_add_match(coordinator, coordinator_place): + stub = coordinator_place + res = stub.AddPlaceMatch(labgrid_coordinator_pb2.AddPlaceMatchRequest(placename="test", pattern="this/test/pattern")) + assert res + +def test_coordinator_place_add_delete_match(coordinator, coordinator_place): + stub = coordinator_place + res = stub.AddPlaceMatch(labgrid_coordinator_pb2.AddPlaceMatchRequest(placename="test", pattern="this/test/pattern")) + assert res + res = stub.DeletePlaceMatch(labgrid_coordinator_pb2.DeletePlaceMatchRequest(placename="test", pattern="this/test/pattern")) + assert res + +def test_coordinator_place_allow(coordinator, coordinator_place): + stub = coordinator_place + res = stub.AcquirePlace(labgrid_coordinator_pb2.AcquirePlaceRequest(placename="test")) + assert res + res = stub.AllowPlace(labgrid_coordinator_pb2.AllowPlaceRequest(placename="test", user="othertest")) + assert res + +def test_coordinator_create_reservation(coordinator, coordinator_place): + tags = {"board": "test"} + stub = coordinator_place + res = stub.SetPlaceTags(labgrid_coordinator_pb2.SetPlaceTagsRequest(placename="test", tags=tags)) + spec = "board=test" + assert res + res = stub.CreateReservation(labgrid_coordinator_pb2.CreateReservationRequest(spec=spec, prio=1.0)) + assert res diff --git a/tests/test_pb2.py b/tests/test_pb2.py new file mode 100644 index 000000000..474ec5bd7 --- /dev/null +++ b/tests/test_pb2.py @@ -0,0 +1,152 @@ +from labgrid.remote.common import Place, ResourceMatch, Reservation, set_map_from_dict, build_dict_from_map +import labgrid.remote.generated.labgrid_coordinator_pb2 as labgrid_coordinator_pb2 + +def test_place_as_pb2(): + place = Place(name="testing-place") + pb2 = place.as_pb2() + assert pb2.name == "testing-place" + assert pb2.created == place.created + assert pb2.changed == place.changed + +def test_place_from_pb2(): + place_start = Place(name="testing-place", comment="such-comment") + pb2 = place_start.as_pb2() + assert pb2.name == "testing-place" + place_new = Place.from_pb2(pb2) + assert place_new.name == "testing-place" + assert place_new.name == place_start.name + assert place_new.comment == place_start.comment + assert place_new.tags == place_start.tags + assert place_new.matches == place_start.matches + assert place_new.acquired == place_start.acquired + assert place_new.acquired_resources == place_start.acquired_resources + assert place_new.allowed == place_start.allowed + assert place_new.created == place_start.created + assert place_new.changed == place_start.changed + assert place_new.reservation == place_start.reservation + +def test_from_pb2_tags(): + tags = {"some": "test", "more": "values"} + place_start = Place(name="testing-place", tags=tags) + pb2 = place_start.as_pb2() + assert pb2.name == "testing-place", f"PB2 has wrong name: {pb2}" + assert pb2.tags != None, f"PB2 has no tags field: {pb2}" + place_new = Place.from_pb2(pb2) + assert place_new.name == "testing-place" + assert place_new.tags == place_start.tags + assert place_new.tags == tags + +def test_from_pb2_matches(): + rm = ResourceMatch("such","test","match") + place_start = Place(name="testing-place", matches=[rm]) + pb2 = place_start.as_pb2() + assert pb2.name == "testing-place", f"PB2 has wrong name: {pb2}" + assert pb2.tags != None, f"PB2 has no tags field: {pb2}" + place_new = Place.from_pb2(pb2) + assert place_new.name == "testing-place" + assert place_new.tags == place_start.tags + assert place_new.matches == place_start.matches + +def test_from_pb2_tags_deepcopy(): + # Used by the RemotePlaceManager + tags = {"some": "test", "more": "values"} + place_start = Place(name="testing-place", tags=tags) + pb2 = place_start.as_pb2() + place_new = Place.from_pb2(pb2) + import copy + tags_copy = copy.deepcopy(place_new.tags) + +def test_place_as_pb2_copy_with_match(): + tags = {"some": "test", "more": "values"} + # Used by the RemotePlaceManager + place_start = Place(name="testing-place", tags=tags, comment="Hello", aliases={"some": "alias"}, matches = [ResourceMatch("testporter","somegroup","someclass")] ) + out = labgrid_coordinator_pb2.ClientOutMessage() + out.update.place.CopyFrom(place_start.as_pb2()) + +def test_match_as_from_pb2(): + rms = ResourceMatch("*","somegroup","someclass") + pb2 = rms.as_pb2() + assert pb2 + rme = ResourceMatch.from_pb2(pb2) + assert rms == rme + +def test_reservation_as_pb2(): + reservation = Reservation("test", filters={"some": "filter"}, allocations={"some": "allocation"}) + pb2 = reservation.as_pb2() + assert pb2.owner == "test" + assert pb2.token == reservation.token + assert pb2.state == reservation.state.value + assert pb2.filters == reservation.filters + assert pb2.created == reservation.created + assert pb2.timeout == reservation.timeout + +def test_reservation_as_from_pb2(): + resold = Reservation("test", filters={"some": "filter"}, allocations={"some": "allocation"}) + pb2 = resold.as_pb2() + assert pb2.owner == resold.owner + assert pb2.token == resold.token + assert pb2.state == resold.state.value + assert pb2.filters == resold.filters + assert pb2.created == resold.created + assert pb2.timeout == resold.timeout + + resnew = Reservation.from_pb2(pb2) + + assert resnew.owner == resold.owner + assert resnew.token == resold.token + assert resnew.state == resold.state + assert resnew.filters == resold.filters + assert resnew.created == resold.created + assert resnew.timeout == resold.timeout + +def test_resource_dict(): + params = { + 'host': 'foo', + 'model_id': 2277, + 'vendor_id': 1133, + 'path': None, + } + + resource = labgrid_coordinator_pb2.Resource() + set_map_from_dict(resource.params, params) + bm = resource.SerializeToString() + + resource = labgrid_coordinator_pb2.Resource() + resource.ParseFromString(bm) + decoded = build_dict_from_map(resource.params) + + assert params == decoded + +def test_map_serialize(benchmark): + params = { + 'host': 'foo', + 'model_id': 2277, + 'vendor_id': 1133, + 'path': None, + } + + def run(): + resource = labgrid_coordinator_pb2.Resource() + set_map_from_dict(resource.params, params) + bm = resource.SerializeToString() + + benchmark(run) + +def test_map_deser(benchmark): + params = { + 'host': 'foo', + 'model_id': 2277, + 'vendor_id': 1133, + 'path': None, + } + + resource = labgrid_coordinator_pb2.Resource() + set_map_from_dict(resource.params, params) + bm = resource.SerializeToString() + + def run(): + resource = labgrid_coordinator_pb2.Resource() + resource.ParseFromString(bm) + decoded = build_dict_from_map(resource.params) + + benchmark(run) diff --git a/tests/test_remote.py b/tests/test_remote.py index 4f803b043..c88eab473 100644 --- a/tests/test_remote.py +++ b/tests/test_remote.py @@ -1,8 +1,6 @@ import pytest import pexpect -pytest.importorskip('autobahn') - def test_client_help(): with pexpect.spawn('python -m labgrid.remote.client --help') as spawn: spawn.expect('usage')