From e52da4609e88e5b558582a6761efc32da205a452 Mon Sep 17 00:00:00 2001 From: Hendrik Makait Date: Fri, 27 Sep 2024 13:00:43 +0200 Subject: [PATCH] Don't stop Adaptive on error (#8871) --- distributed/deploy/adaptive.py | 115 ++++++- distributed/deploy/adaptive_core.py | 116 ++----- distributed/deploy/tests/test_adaptive.py | 293 +++++++++++++++--- .../deploy/tests/test_adaptive_core.py | 136 ++------ 4 files changed, 387 insertions(+), 273 deletions(-) diff --git a/distributed/deploy/adaptive.py b/distributed/deploy/adaptive.py index 1638659db4..dd4e411f5f 100644 --- a/distributed/deploy/adaptive.py +++ b/distributed/deploy/adaptive.py @@ -1,20 +1,39 @@ from __future__ import annotations import logging +from collections.abc import Hashable +from datetime import timedelta from inspect import isawaitable +from typing import TYPE_CHECKING, Any, Callable, Literal, cast from tornado.ioloop import IOLoop import dask.config from dask.utils import parse_timedelta +from distributed.compatibility import PeriodicCallback +from distributed.core import Status from distributed.deploy.adaptive_core import AdaptiveCore from distributed.protocol import pickle from distributed.utils import log_errors +if TYPE_CHECKING: + from typing_extensions import TypeAlias + + from distributed.deploy.cluster import Cluster + from distributed.scheduler import WorkerState + logger = logging.getLogger(__name__) +AdaptiveStateState: TypeAlias = Literal[ + "starting", + "running", + "stopped", + "inactive", +] + + class Adaptive(AdaptiveCore): ''' Adaptively allocate workers based on scheduler load. A superclass. @@ -81,16 +100,21 @@ class Adaptive(AdaptiveCore): specified in the dask config under the distributed.adaptive key. ''' + interval: float | None + periodic_callback: PeriodicCallback | None + #: Whether this adaptive strategy is periodically adapting + state: AdaptiveStateState + def __init__( self, - cluster=None, - interval=None, - minimum=None, - maximum=None, - wait_count=None, - target_duration=None, - worker_key=None, - **kwargs, + cluster: Cluster, + interval: str | float | timedelta | None = None, + minimum: int | None = None, + maximum: int | float | None = None, + wait_count: int | None = None, + target_duration: str | float | timedelta | None = None, + worker_key: Callable[[WorkerState], Hashable] | None = None, + **kwargs: Any, ): self.cluster = cluster self.worker_key = worker_key @@ -99,20 +123,78 @@ def __init__( if interval is None: interval = dask.config.get("distributed.adaptive.interval") if minimum is None: - minimum = dask.config.get("distributed.adaptive.minimum") + minimum = cast(int, dask.config.get("distributed.adaptive.minimum")) if maximum is None: - maximum = dask.config.get("distributed.adaptive.maximum") + maximum = cast(float, dask.config.get("distributed.adaptive.maximum")) if wait_count is None: - wait_count = dask.config.get("distributed.adaptive.wait-count") + wait_count = cast(int, dask.config.get("distributed.adaptive.wait-count")) if target_duration is None: - target_duration = dask.config.get("distributed.adaptive.target-duration") + target_duration = cast( + str, dask.config.get("distributed.adaptive.target-duration") + ) + + self.interval = parse_timedelta(interval, "seconds") + self.periodic_callback = None + + if self.interval and self.cluster: + import weakref + + self_ref = weakref.ref(self) + + async def _adapt(): + adaptive = self_ref() + if not adaptive or adaptive.state != "running": + return + if adaptive.cluster.status != Status.running: + adaptive.stop(reason="cluster-not-running") + return + try: + await adaptive.adapt() + except Exception: + logger.warning( + "Adaptive encountered an error while adapting", exc_info=True + ) + + self.periodic_callback = PeriodicCallback(_adapt, self.interval * 1000) + self.state = "starting" + self.loop.add_callback(self._start) + else: + self.state = "inactive" self.target_duration = parse_timedelta(target_duration) - super().__init__( - minimum=minimum, maximum=maximum, wait_count=wait_count, interval=interval + super().__init__(minimum=minimum, maximum=maximum, wait_count=wait_count) + + def _start(self) -> None: + if self.state != "starting": + return + + assert self.periodic_callback is not None + self.periodic_callback.start() + self.state = "running" + logger.info( + "Adaptive scaling started: minimum=%s maximum=%s", + self.minimum, + self.maximum, ) + def stop(self, reason: str = "unknown") -> None: + if self.state in ("inactive", "stopped"): + return + + if self.state == "running": + assert self.periodic_callback is not None + self.periodic_callback.stop() + logger.info( + "Adaptive scaling stopped: minimum=%s maximum=%s. Reason: %s", + self.minimum, + self.maximum, + reason, + ) + + self.periodic_callback = None + self.state = "stopped" + @property def scheduler(self): return self.cluster.scheduler_comm @@ -210,6 +292,9 @@ async def scale_up(self, n): def loop(self) -> IOLoop: """Override Adaptive.loop""" if self.cluster: - return self.cluster.loop + return self.cluster.loop # type: ignore[return-value] else: return IOLoop.current() + + def __del__(self): + self.stop(reason="adaptive-deleted") diff --git a/distributed/deploy/adaptive_core.py b/distributed/deploy/adaptive_core.py index ccb81008cf..128353d9cf 100644 --- a/distributed/deploy/adaptive_core.py +++ b/distributed/deploy/adaptive_core.py @@ -2,37 +2,24 @@ import logging import math +from abc import ABC, abstractmethod from collections import defaultdict, deque from collections.abc import Iterable -from datetime import timedelta -from typing import TYPE_CHECKING, Literal, cast +from typing import TYPE_CHECKING, cast import tlz as toolz -from tornado.ioloop import IOLoop import dask.config -from dask.utils import parse_timedelta -from distributed.compatibility import PeriodicCallback from distributed.metrics import time if TYPE_CHECKING: - from typing_extensions import TypeAlias - from distributed.scheduler import WorkerState logger = logging.getLogger(__name__) -AdaptiveStateState: TypeAlias = Literal[ - "starting", - "running", - "stopped", - "inactive", -] - - -class AdaptiveCore: +class AdaptiveCore(ABC): """ The core logic for adaptive deployments, with none of the cluster details @@ -91,54 +78,22 @@ class AdaptiveCore: minimum: int maximum: int | float wait_count: int - interval: int | float - periodic_callback: PeriodicCallback | None - plan: set[WorkerState] - requested: set[WorkerState] - observed: set[WorkerState] close_counts: defaultdict[WorkerState, int] - _adapting: bool - #: Whether this adaptive strategy is periodically adapting - _state: AdaptiveStateState log: deque[tuple[float, dict]] + _adapting: bool def __init__( self, minimum: int = 0, maximum: int | float = math.inf, wait_count: int = 3, - interval: str | int | float | timedelta = "1s", ): if not isinstance(maximum, int) and not math.isinf(maximum): - raise TypeError(f"maximum must be int or inf; got {maximum}") + raise ValueError(f"maximum must be int or inf; got {maximum}") self.minimum = minimum self.maximum = maximum self.wait_count = wait_count - self.interval = parse_timedelta(interval, "seconds") - self.periodic_callback = None - - if self.interval: - import weakref - - self_ref = weakref.ref(self) - - async def _adapt(): - core = self_ref() - if core: - await core.adapt() - - self.periodic_callback = PeriodicCallback(_adapt, self.interval * 1000) - self._state = "starting" - self.loop.add_callback(self._start) - else: - self._state = "inactive" - try: - self.plan = set() - self.requested = set() - self.observed = set() - except Exception: - pass # internal state self.close_counts = defaultdict(int) @@ -147,38 +102,22 @@ async def _adapt(): maxlen=dask.config.get("distributed.admin.low-level-log-length") ) - def _start(self) -> None: - if self._state != "starting": - return - - assert self.periodic_callback is not None - self.periodic_callback.start() - self._state = "running" - logger.info( - "Adaptive scaling started: minimum=%s maximum=%s", - self.minimum, - self.maximum, - ) - - def stop(self) -> None: - if self._state in ("inactive", "stopped"): - return + @property + @abstractmethod + def plan(self) -> set[WorkerState]: ... - if self._state == "running": - assert self.periodic_callback is not None - self.periodic_callback.stop() - logger.info( - "Adaptive scaling stopped: minimum=%s maximum=%s", - self.minimum, - self.maximum, - ) + @property + @abstractmethod + def requested(self) -> set[WorkerState]: ... - self.periodic_callback = None - self._state = "stopped" + @property + @abstractmethod + def observed(self) -> set[WorkerState]: ... + @abstractmethod async def target(self) -> int: """The target number of workers that should exist""" - raise NotImplementedError() + ... async def workers_to_close(self, target: int) -> list: """ @@ -198,11 +137,11 @@ async def safe_target(self) -> int: return n - async def scale_down(self, n: int) -> None: - raise NotImplementedError() + @abstractmethod + async def scale_down(self, n: int) -> None: ... - async def scale_up(self, workers: Iterable) -> None: - raise NotImplementedError() + @abstractmethod + async def scale_up(self, workers: Iterable) -> None: ... async def recommendations(self, target: int) -> dict: """ @@ -270,20 +209,5 @@ async def adapt(self) -> None: await self.scale_up(**recommendations) if status == "down": await self.scale_down(**recommendations) - except OSError: - if status != "down": - logger.error("Adaptive stopping due to error", exc_info=True) - self.stop() - else: - logger.error( - "Error during adaptive downscaling. Ignoring.", exc_info=True - ) finally: self._adapting = False - - def __del__(self): - self.stop() - - @property - def loop(self) -> IOLoop: - return IOLoop.current() diff --git a/distributed/deploy/tests/test_adaptive.py b/distributed/deploy/tests/test_adaptive.py index a71fdfb298..441c1c609d 100644 --- a/distributed/deploy/tests/test_adaptive.py +++ b/distributed/deploy/tests/test_adaptive.py @@ -1,6 +1,7 @@ from __future__ import annotations import asyncio +import logging import math from time import sleep @@ -17,8 +18,16 @@ Worker, wait, ) +from distributed.core import Status +from distributed.deploy.cluster import Cluster from distributed.metrics import time -from distributed.utils_test import async_poll_for, gen_cluster, gen_test, slowinc +from distributed.utils_test import ( + async_poll_for, + captured_logger, + gen_cluster, + gen_test, + slowinc, +) def test_adaptive_local_cluster(loop): @@ -80,39 +89,6 @@ async def test_adaptive_local_cluster_multi_workers(): await c.gather(futures) -@pytest.mark.xfail(reason="changed API") -@gen_test() -async def test_adaptive_scale_down_override(): - class TestAdaptive(Adaptive): - def __init__(self, *args, **kwargs): - self.min_size = kwargs.pop("min_size", 0) - super().__init__(*args, **kwargs) - - async def workers_to_close(self, **kwargs): - num_workers = len(self.cluster.workers) - to_close = await self.scheduler.workers_to_close(**kwargs) - if num_workers - len(to_close) < self.min_size: - to_close = to_close[: num_workers - self.min_size] - - return to_close - - class TestCluster(LocalCluster): - def scale_up(self, n, **kwargs): - assert False - - async with TestCluster( - n_workers=10, processes=False, asynchronous=True, dashboard_address=":0" - ) as cluster: - ta = cluster.adapt( - min_size=2, interval=0.1, scale_factor=2, Adaptive=TestAdaptive - ) - await asyncio.sleep(0.3) - - # Assert that adaptive cycle does not reduce cluster below minimum size - # as determined via override. - assert len(cluster.scheduler.workers) == 2 - - @gen_test() async def test_min_max(): async with LocalCluster( @@ -400,17 +376,23 @@ async def test_adapt_cores_memory(): @gen_test() async def test_adaptive_config(): - with dask.config.set( - {"distributed.adaptive.minimum": 10, "distributed.adaptive.wait-count": 8} - ): - try: - adapt = Adaptive(interval="5s") - assert adapt.minimum == 10 - assert adapt.maximum == math.inf - assert adapt.interval == 5 - assert adapt.wait_count == 8 - finally: - adapt.stop() + async with LocalCluster( + n_workers=0, + asynchronous=True, + silence_logs=False, + dashboard_address=":0", + ) as cluster: + with dask.config.set( + {"distributed.adaptive.minimum": 10, "distributed.adaptive.wait-count": 8} + ): + try: + adapt = Adaptive(cluster, interval="5s") + assert adapt.minimum == 10 + assert adapt.maximum == math.inf + assert adapt.interval == 5 + assert adapt.wait_count == 8 + finally: + adapt.stop() @gen_test() @@ -427,6 +409,8 @@ async def test_update_adaptive(): first = cluster.adapt(maximum=1) second = cluster.adapt(maximum=2) await asyncio.sleep(0.2) + assert first.state == "stopped" + assert second.state == "running" assert first.periodic_callback is None assert second.periodic_callback.is_running() @@ -454,6 +438,19 @@ async def test_adaptive_no_memory_limit(): ) +@gen_test() +async def test_adapt_gets_stopped_on_cluster_close(): + class MyCluster(Cluster): + pass + + async with MyCluster(asynchronous=True) as cluster: + adapt = cluster.adapt(minimum=1, maximum=10, interval="10ms") + while adapt.state != "running": + await asyncio.sleep(0.01) + await cluster.close() + assert adapt.state == "stopped" + + @gen_test() async def test_scale_needs_to_be_awaited(): """ @@ -495,13 +492,12 @@ async def test_adaptive_stopped(): n_workers=0, asynchronous=True, dashboard_address=":0" ) as cluster: instance = cluster.adapt(interval="10ms") + await async_poll_for(lambda: instance.state == "running", timeout=5) assert instance.periodic_callback is not None - - await async_poll_for(lambda: instance.periodic_callback.is_running(), timeout=5) - + assert instance.periodic_callback.is_running() pc = instance.periodic_callback - - await async_poll_for(lambda: not pc.is_running(), timeout=5) + await async_poll_for(lambda: instance.state == "stopped", timeout=5) + assert not pc.is_running() @pytest.mark.parametrize("saturation", [1, float("inf")]) @@ -544,3 +540,200 @@ async def test_respect_average_nthreads(c, s, w): await asyncio.sleep(0.001) assert s.adaptive_target() == 40 + + +class MyAdaptive(Adaptive): + def __init__(self, *args, interval=None, **kwargs): + super().__init__(*args, interval=interval, **kwargs) + self._target = 0 + self._log = [] + self._observed = set() + self._plan = set() + self._requested = set() + + @property + def observed(self): + return self._observed + + @property + def plan(self): + return self._plan + + @property + def requested(self): + return self._requested + + async def target(self): + return self._target + + async def scale_up(self, n=0): + self._plan = self._requested = set(range(n)) + + async def scale_down(self, workers=()): + for collection in [self.plan, self.requested, self.observed]: + for w in workers: + collection.discard(w) + + +@gen_test() +async def test_adaptive_stops_on_cluster_status_change(): + async with LocalCluster( + n_workers=0, + asynchronous=True, + silence_logs=False, + dashboard_address=":0", + ) as cluster: + adapt = Adaptive(cluster, interval="100 ms") + assert adapt.state == "starting" + await async_poll_for(lambda: adapt.state == "running", timeout=5) + + assert adapt.periodic_callback + assert adapt.periodic_callback.is_running() + + try: + cluster.status = Status.closing + + await async_poll_for(lambda: adapt.state != "running", timeout=5) + assert adapt.state == "stopped" + assert not adapt.periodic_callback + finally: + # Set back to running to let normal shutdown do its thing + cluster.status = Status.running + + +@gen_test() +async def test_interval(): + async with LocalCluster( + n_workers=0, + asynchronous=True, + silence_logs=False, + dashboard_address=":0", + ) as cluster: + adapt = MyAdaptive(cluster=cluster, interval="100 ms") + assert not adapt.plan + + for i in [0, 3, 1]: + start = time() + adapt._target = i + while len(adapt.plan) != i: + await asyncio.sleep(0.01) + assert time() < start + 2 + + adapt.stop() + await asyncio.sleep(0.05) + + adapt._target = 10 + await asyncio.sleep(0.02) + assert len(adapt.plan) == 1 # last value from before, unchanged + + +@gen_test() +async def test_adapt_logs_error_in_safe_target(): + class BadAdaptive(MyAdaptive): + """Adaptive subclass which raises an OSError when attempting to adapt + + We use this to check that error handling works properly + """ + + def safe_target(self): + raise OSError() + + async with LocalCluster( + n_workers=0, + asynchronous=True, + silence_logs=False, + dashboard_address=":0", + ) as cluster: + with captured_logger( + "distributed.deploy.adaptive", level=logging.WARNING + ) as log: + adapt = cluster.adapt( + Adaptive=BadAdaptive, minimum=1, maximum=4, interval="10ms" + ) + while "encountered an error" not in log.getvalue(): + await asyncio.sleep(0.01) + assert "stop" not in log.getvalue() + assert adapt.state == "running" + assert adapt.periodic_callback + assert adapt.periodic_callback.is_running() + + +@gen_test() +async def test_adapt_callback_logs_error_in_scale_down(): + class BadAdaptive(MyAdaptive): + async def scale_down(self, workers=None): + raise OSError() + + async with LocalCluster( + n_workers=0, + asynchronous=True, + silence_logs=False, + dashboard_address=":0", + ) as cluster: + adapt = cluster.adapt( + Adaptive=BadAdaptive, minimum=1, maximum=4, wait_count=0, interval="10ms" + ) + adapt._target = 2 + await async_poll_for(lambda: adapt.state == "running", timeout=5) + assert adapt.periodic_callback.is_running() + await adapt.adapt() + assert len(adapt.plan) == 2 + assert len(adapt.requested) == 2 + with captured_logger( + "distributed.deploy.adaptive", level=logging.WARNING + ) as log: + adapt._target = 0 + while "encountered an error" not in log.getvalue(): + await asyncio.sleep(0.01) + assert "stop" not in log.getvalue() + assert not adapt._adapting + assert adapt.periodic_callback + assert adapt.periodic_callback.is_running() + + +@pytest.mark.parametrize("wait_until_running", [True, False]) +@gen_test() +async def test_adaptive_logs_stopping_once(wait_until_running): + async with LocalCluster( + n_workers=0, + asynchronous=True, + silence_logs=False, + dashboard_address=":0", + ) as cluster: + with captured_logger("distributed.deploy.adaptive") as log: + adapt = cluster.adapt(Adaptive=MyAdaptive, interval="100ms") + if wait_until_running: + await async_poll_for(lambda: adapt.state == "running", timeout=5) + assert adapt.periodic_callback + assert adapt.periodic_callback.is_running() + pc = adapt.periodic_callback + else: + assert adapt.periodic_callback + assert not adapt.periodic_callback.is_running() + pc = adapt.periodic_callback + + adapt.stop() + adapt.stop() + assert adapt.state == "stopped" + assert not adapt.periodic_callback + assert not pc.is_running() + lines = log.getvalue().splitlines() + assert sum("Adaptive scaling stopped" in line for line in lines) == 1 + + +@gen_test() +async def test_adapt_stop_del(): + async with LocalCluster( + n_workers=0, + asynchronous=True, + silence_logs=False, + dashboard_address=":0", + ) as cluster: + adapt = cluster.adapt(Adaptive=MyAdaptive, interval="100ms") + pc = adapt.periodic_callback + await async_poll_for(lambda: adapt.state == "running", timeout=5) # noqa: F821 + + # Remove reference of adaptive object from cluster + cluster._adaptive = None + del adapt + await async_poll_for(lambda: not pc.is_running(), timeout=5) diff --git a/distributed/deploy/tests/test_adaptive_core.py b/distributed/deploy/tests/test_adaptive_core.py index b5cfd734ab..cc2336e76a 100644 --- a/distributed/deploy/tests/test_adaptive_core.py +++ b/distributed/deploy/tests/test_adaptive_core.py @@ -1,23 +1,35 @@ from __future__ import annotations -import asyncio - from distributed.deploy.adaptive_core import AdaptiveCore -from distributed.metrics import time -from distributed.utils_test import captured_logger, gen_test +from distributed.utils_test import gen_test -class MyAdaptive(AdaptiveCore): - def __init__(self, *args, interval=None, **kwargs): - super().__init__(*args, interval=interval, **kwargs) +class MyAdaptiveCore(AdaptiveCore): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._observed = set() + self._plan = set() + self._requested = set() self._target = 0 self._log = [] + @property + def observed(self): + return self._observed + + @property + def plan(self): + return self._plan + + @property + def requested(self): + return self._requested + async def target(self): return self._target async def scale_up(self, n=0): - self.plan = self.requested = set(range(n)) + self._plan = self._requested = set(range(n)) async def scale_down(self, workers=()): for collection in [self.plan, self.requested, self.observed]: @@ -27,7 +39,7 @@ async def scale_down(self, workers=()): @gen_test() async def test_safe_target(): - adapt = MyAdaptive(minimum=1, maximum=4) + adapt = MyAdaptiveCore(minimum=1, maximum=4) assert await adapt.safe_target() == 1 adapt._target = 10 assert await adapt.safe_target() == 4 @@ -35,7 +47,7 @@ async def test_safe_target(): @gen_test() async def test_scale_up(): - adapt = MyAdaptive(minimum=1, maximum=4) + adapt = MyAdaptiveCore(minimum=1, maximum=4) await adapt.adapt() assert adapt.log[-1][1] == {"status": "up", "n": 1} assert adapt.plan == {0} @@ -48,12 +60,12 @@ async def test_scale_up(): @gen_test() async def test_scale_down(): - adapt = MyAdaptive(minimum=1, maximum=4, wait_count=2) + adapt = MyAdaptiveCore(minimum=1, maximum=4, wait_count=2) adapt._target = 10 await adapt.adapt() assert len(adapt.log) == 1 - adapt.observed = {0, 1, 3} # all but 2 have arrived + adapt._observed = {0, 1, 3} # all but 2 have arrived adapt._target = 2 await adapt.adapt() @@ -70,103 +82,3 @@ async def test_scale_down(): await adapt.adapt() await adapt.adapt() assert list(adapt.log) == old - - -@gen_test() -async def test_interval(): - adapt = MyAdaptive(interval="5 ms") - assert not adapt.plan - - for i in [0, 3, 1]: - start = time() - adapt._target = i - while len(adapt.plan) != i: - await asyncio.sleep(0.001) - assert time() < start + 2 - - adapt.stop() - await asyncio.sleep(0.05) - - adapt._target = 10 - await asyncio.sleep(0.02) - assert len(adapt.plan) == 1 # last value from before, unchanged - - -@gen_test() -async def test_adapt_oserror_safe_target(): - class BadAdaptive(MyAdaptive): - """AdaptiveCore subclass which raises an OSError when attempting to adapt - - We use this to check that error handling works properly - """ - - def safe_target(self): - raise OSError() - - with captured_logger("distributed.deploy.adaptive_core") as log: - adapt = BadAdaptive(minimum=1, maximum=4, interval="10ms") - while adapt._state != "stopped": - await asyncio.sleep(0.01) - text = log.getvalue() - assert "Adaptive stopping due to error" in text - assert "Adaptive scaling stopped" in text - assert not adapt._adapting - assert not adapt.periodic_callback - - -@gen_test() -async def test_adapt_oserror_scale(): - """ - FIXME: - If we encounter an OSError during scale down, we continue as before. It is - not entirely clear if this is the correct behaviour but defines the current - state. - This was probably introduced to protect against comm failures during - shutdown but the scale down command should be robust call to the scheduler - which is never scaled down. - """ - - class BadAdaptive(MyAdaptive): - async def scale_down(self, workers=None): - raise OSError() - - adapt = BadAdaptive(minimum=1, maximum=4, wait_count=0, interval="10ms") - adapt._target = 2 - while not adapt.periodic_callback.is_running(): - await asyncio.sleep(0.01) - await adapt.adapt() - assert len(adapt.plan) == 2 - assert len(adapt.requested) == 2 - with captured_logger("distributed.deploy.adaptive_core") as log: - adapt._target = 0 - await adapt.adapt() - text = log.getvalue() - assert "Error during adaptive downscaling" in text - assert not adapt._adapting - assert adapt.periodic_callback - assert adapt.periodic_callback.is_running() - adapt.stop() - - -@gen_test() -async def test_adaptive_logs_stopping_once(): - with captured_logger("distributed.deploy.adaptive_core") as log: - adapt = MyAdaptive(interval="100ms") - while not adapt.periodic_callback.is_running(): - await asyncio.sleep(0.01) - adapt.stop() - adapt.stop() - lines = log.getvalue().splitlines() - assert sum("Adaptive scaling stopped" in line for line in lines) == 1 - - -@gen_test() -async def test_adapt_stop_del(): - adapt = MyAdaptive(interval="100ms") - pc = adapt.periodic_callback - while not adapt.periodic_callback.is_running(): - await asyncio.sleep(0.01) - - del adapt - while pc.is_running(): - await asyncio.sleep(0.01)