From 4866a9a99feecc35104f9e8898b0c842389aa506 Mon Sep 17 00:00:00 2001 From: Michal Moskal Date: Thu, 14 Mar 2024 21:46:55 +0000 Subject: [PATCH 01/38] removing python from gctrl --- controllers/guidance_ctrl/Cargo.toml | 11 - controllers/guidance_ctrl/Lib/LICENSE | 21 - .../guidance_ctrl/Lib/_collections_abc.py | 1121 ------- controllers/guidance_ctrl/Lib/_py_abc.py | 147 - controllers/guidance_ctrl/Lib/_weakrefset.py | 206 -- controllers/guidance_ctrl/Lib/abc.py | 192 -- .../guidance_ctrl/Lib/collections/__init__.py | 1578 ---------- .../Lib/collections/_defaultdict.py | 58 - .../guidance_ctrl/Lib/collections/abc.py | 3 - controllers/guidance_ctrl/Lib/contextlib.py | 779 ----- controllers/guidance_ctrl/Lib/copy.sh | 11 - controllers/guidance_ctrl/Lib/copyreg.py | 209 -- controllers/guidance_ctrl/Lib/enum.py | 2081 ------------- controllers/guidance_ctrl/Lib/functools.py | 1006 ------- controllers/guidance_ctrl/Lib/genericpath.py | 158 - .../guidance_ctrl/Lib/guidance/LICENSE.md | 21 - .../guidance_ctrl/Lib/guidance/__init__.py | 89 - .../guidance_ctrl/Lib/guidance/_grammar.py | 859 ------ .../Lib/guidance/_json_schema_to_grammar.py | 123 - .../guidance_ctrl/Lib/guidance/_parser.py | 545 ---- .../guidance_ctrl/Lib/guidance/_server.py | 89 - .../guidance_ctrl/Lib/guidance/_utils.py | 222 -- .../Lib/guidance/library/__init__.py | 34 - .../Lib/guidance/library/_any_char.py | 7 - .../Lib/guidance/library/_any_char_but.py | 21 - .../Lib/guidance/library/_block.py | 16 - .../Lib/guidance/library/_char_range.py | 8 - .../Lib/guidance/library/_char_set.py | 17 - .../Lib/guidance/library/_format.py | 5 - .../Lib/guidance/library/_gen.py | 239 -- .../Lib/guidance/library/_image.py | 34 - .../Lib/guidance/library/_one_or_more.py | 6 - .../Lib/guidance/library/_optional.py | 6 - .../Lib/guidance/library/_prefix_tree.py | 25 - .../Lib/guidance/library/_regex.py | 116 - .../Lib/guidance/library/_role.py | 95 - .../Lib/guidance/library/_set_attribute.py | 21 - .../Lib/guidance/library/_set_var.py | 21 - .../Lib/guidance/library/_silent.py | 6 - .../Lib/guidance/library/_substring.py | 144 - .../Lib/guidance/library/_tool.py | 62 - .../Lib/guidance/library/_zero_or_more.py | 6 - .../guidance_ctrl/Lib/guidance/selectors.py | 116 - controllers/guidance_ctrl/Lib/keyword.py | 63 - controllers/guidance_ctrl/Lib/operator.py | 467 --- controllers/guidance_ctrl/Lib/os.py | 1122 ------- controllers/guidance_ctrl/Lib/posixpath.py | 544 ---- controllers/guidance_ctrl/Lib/re.py | 384 --- controllers/guidance_ctrl/Lib/reprlib.py | 161 - controllers/guidance_ctrl/Lib/sre_compile.py | 784 ----- .../guidance_ctrl/Lib/sre_constants.py | 293 -- controllers/guidance_ctrl/Lib/sre_parse.py | 1064 ------- controllers/guidance_ctrl/Lib/stat.py | 195 -- controllers/guidance_ctrl/Lib/types.py | 308 -- controllers/guidance_ctrl/Lib/typing.py | 2675 ----------------- controllers/guidance_ctrl/build.rs | 13 - controllers/guidance_ctrl/samples/gtest.py | 50 - controllers/guidance_ctrl/src/gctrl.rs | 716 +---- 58 files changed, 67 insertions(+), 19306 deletions(-) delete mode 100644 controllers/guidance_ctrl/Lib/LICENSE delete mode 100644 controllers/guidance_ctrl/Lib/_collections_abc.py delete mode 100644 controllers/guidance_ctrl/Lib/_py_abc.py delete mode 100644 controllers/guidance_ctrl/Lib/_weakrefset.py delete mode 100644 controllers/guidance_ctrl/Lib/abc.py delete mode 100644 controllers/guidance_ctrl/Lib/collections/__init__.py delete mode 100644 controllers/guidance_ctrl/Lib/collections/_defaultdict.py delete mode 100644 controllers/guidance_ctrl/Lib/collections/abc.py delete mode 100644 controllers/guidance_ctrl/Lib/contextlib.py delete mode 100755 controllers/guidance_ctrl/Lib/copy.sh delete mode 100644 controllers/guidance_ctrl/Lib/copyreg.py delete mode 100644 controllers/guidance_ctrl/Lib/enum.py delete mode 100644 controllers/guidance_ctrl/Lib/functools.py delete mode 100644 controllers/guidance_ctrl/Lib/genericpath.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/LICENSE.md delete mode 100644 controllers/guidance_ctrl/Lib/guidance/__init__.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/_grammar.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/_json_schema_to_grammar.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/_parser.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/_server.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/_utils.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/library/__init__.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/library/_any_char.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/library/_any_char_but.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/library/_block.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/library/_char_range.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/library/_char_set.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/library/_format.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/library/_gen.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/library/_image.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/library/_one_or_more.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/library/_optional.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/library/_prefix_tree.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/library/_regex.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/library/_role.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/library/_set_attribute.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/library/_set_var.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/library/_silent.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/library/_substring.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/library/_tool.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/library/_zero_or_more.py delete mode 100644 controllers/guidance_ctrl/Lib/guidance/selectors.py delete mode 100644 controllers/guidance_ctrl/Lib/keyword.py delete mode 100644 controllers/guidance_ctrl/Lib/operator.py delete mode 100644 controllers/guidance_ctrl/Lib/os.py delete mode 100644 controllers/guidance_ctrl/Lib/posixpath.py delete mode 100644 controllers/guidance_ctrl/Lib/re.py delete mode 100644 controllers/guidance_ctrl/Lib/reprlib.py delete mode 100644 controllers/guidance_ctrl/Lib/sre_compile.py delete mode 100644 controllers/guidance_ctrl/Lib/sre_constants.py delete mode 100644 controllers/guidance_ctrl/Lib/sre_parse.py delete mode 100644 controllers/guidance_ctrl/Lib/stat.py delete mode 100644 controllers/guidance_ctrl/Lib/types.py delete mode 100644 controllers/guidance_ctrl/Lib/typing.py delete mode 100644 controllers/guidance_ctrl/build.rs delete mode 100644 controllers/guidance_ctrl/samples/gtest.py diff --git a/controllers/guidance_ctrl/Cargo.toml b/controllers/guidance_ctrl/Cargo.toml index d7a90fd6..6413d08f 100644 --- a/controllers/guidance_ctrl/Cargo.toml +++ b/controllers/guidance_ctrl/Cargo.toml @@ -8,18 +8,7 @@ aici_abi = { path = "../aici_abi" } serde = { version = "1.0.192", features = ["derive"] } serde_json = "1.0.108" anyhow = "1.0.75" -rustpython-vm = { git = "https://github.com/RustPython/RustPython", rev = "317f44945420e", default-features = false, features = ["compiler"] } -rustpython-derive = { git = "https://github.com/RustPython/RustPython", rev = "317f44945420e" } -libc = "0.2.150" -lazy_static = "1.4.0" -num-traits = "0.2.17" -crossbeam-utils = "0.8.16" -once_cell = "1.18.0" -indexmap = { version = "2.1.0", features = ["std"] } [[bin]] name = "aici_guidance_ctrl" path = "src/gctrl.rs" - -[build-dependencies] -glob = "0.3.1" diff --git a/controllers/guidance_ctrl/Lib/LICENSE b/controllers/guidance_ctrl/Lib/LICENSE deleted file mode 100644 index 2c1db46f..00000000 --- a/controllers/guidance_ctrl/Lib/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2020 RustPython Team - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/controllers/guidance_ctrl/Lib/_collections_abc.py b/controllers/guidance_ctrl/Lib/_collections_abc.py deleted file mode 100644 index e96e4c35..00000000 --- a/controllers/guidance_ctrl/Lib/_collections_abc.py +++ /dev/null @@ -1,1121 +0,0 @@ -# Copyright 2007 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Abstract Base Classes (ABCs) for collections, according to PEP 3119. - -Unit tests are in test_collections. -""" - -from abc import ABCMeta, abstractmethod -import sys - -GenericAlias = type(list[int]) -EllipsisType = type(...) -def _f(): pass -FunctionType = type(_f) -del _f - -__all__ = ["Awaitable", "Coroutine", - "AsyncIterable", "AsyncIterator", "AsyncGenerator", - "Hashable", "Iterable", "Iterator", "Generator", "Reversible", - "Sized", "Container", "Callable", "Collection", - "Set", "MutableSet", - "Mapping", "MutableMapping", - "MappingView", "KeysView", "ItemsView", "ValuesView", - "Sequence", "MutableSequence", - "ByteString", - ] - -# This module has been renamed from collections.abc to _collections_abc to -# speed up interpreter startup. Some of the types such as MutableMapping are -# required early but collections module imports a lot of other modules. -# See issue #19218 -__name__ = "collections.abc" - -# Private list of types that we want to register with the various ABCs -# so that they will pass tests like: -# it = iter(somebytearray) -# assert isinstance(it, Iterable) -# Note: in other implementations, these types might not be distinct -# and they may have their own implementation specific types that -# are not included on this list. -bytes_iterator = type(iter(b'')) -bytearray_iterator = type(iter(bytearray())) -#callable_iterator = ??? -dict_keyiterator = type(iter({}.keys())) -dict_valueiterator = type(iter({}.values())) -dict_itemiterator = type(iter({}.items())) -list_iterator = type(iter([])) -list_reverseiterator = type(iter(reversed([]))) -range_iterator = type(iter(range(0))) -longrange_iterator = type(iter(range(1 << 1000))) -set_iterator = type(iter(set())) -str_iterator = type(iter("")) -tuple_iterator = type(iter(())) -zip_iterator = type(iter(zip())) -## views ## -dict_keys = type({}.keys()) -dict_values = type({}.values()) -dict_items = type({}.items()) -## misc ## -mappingproxy = type(type.__dict__) -generator = type((lambda: (yield))()) -## coroutine ## -async def _coro(): pass -_coro = _coro() -coroutine = type(_coro) -_coro.close() # Prevent ResourceWarning -del _coro -## asynchronous generator ## -async def _ag(): yield -_ag = _ag() -async_generator = type(_ag) -del _ag - - -### ONE-TRICK PONIES ### - -def _check_methods(C, *methods): - mro = C.__mro__ - for method in methods: - for B in mro: - if method in B.__dict__: - if B.__dict__[method] is None: - return NotImplemented - break - else: - return NotImplemented - return True - -class Hashable(metaclass=ABCMeta): - - __slots__ = () - - @abstractmethod - def __hash__(self): - return 0 - - @classmethod - def __subclasshook__(cls, C): - if cls is Hashable: - return _check_methods(C, "__hash__") - return NotImplemented - - -class Awaitable(metaclass=ABCMeta): - - __slots__ = () - - @abstractmethod - def __await__(self): - yield - - @classmethod - def __subclasshook__(cls, C): - if cls is Awaitable: - return _check_methods(C, "__await__") - return NotImplemented - - __class_getitem__ = classmethod(GenericAlias) - - -class Coroutine(Awaitable): - - __slots__ = () - - @abstractmethod - def send(self, value): - """Send a value into the coroutine. - Return next yielded value or raise StopIteration. - """ - raise StopIteration - - @abstractmethod - def throw(self, typ, val=None, tb=None): - """Raise an exception in the coroutine. - Return next yielded value or raise StopIteration. - """ - if val is None: - if tb is None: - raise typ - val = typ() - if tb is not None: - val = val.with_traceback(tb) - raise val - - def close(self): - """Raise GeneratorExit inside coroutine. - """ - try: - self.throw(GeneratorExit) - except (GeneratorExit, StopIteration): - pass - else: - raise RuntimeError("coroutine ignored GeneratorExit") - - @classmethod - def __subclasshook__(cls, C): - if cls is Coroutine: - return _check_methods(C, '__await__', 'send', 'throw', 'close') - return NotImplemented - - -Coroutine.register(coroutine) - - -class AsyncIterable(metaclass=ABCMeta): - - __slots__ = () - - @abstractmethod - def __aiter__(self): - return AsyncIterator() - - @classmethod - def __subclasshook__(cls, C): - if cls is AsyncIterable: - return _check_methods(C, "__aiter__") - return NotImplemented - - __class_getitem__ = classmethod(GenericAlias) - - -class AsyncIterator(AsyncIterable): - - __slots__ = () - - @abstractmethod - async def __anext__(self): - """Return the next item or raise StopAsyncIteration when exhausted.""" - raise StopAsyncIteration - - def __aiter__(self): - return self - - @classmethod - def __subclasshook__(cls, C): - if cls is AsyncIterator: - return _check_methods(C, "__anext__", "__aiter__") - return NotImplemented - - -class AsyncGenerator(AsyncIterator): - - __slots__ = () - - async def __anext__(self): - """Return the next item from the asynchronous generator. - When exhausted, raise StopAsyncIteration. - """ - return await self.asend(None) - - @abstractmethod - async def asend(self, value): - """Send a value into the asynchronous generator. - Return next yielded value or raise StopAsyncIteration. - """ - raise StopAsyncIteration - - @abstractmethod - async def athrow(self, typ, val=None, tb=None): - """Raise an exception in the asynchronous generator. - Return next yielded value or raise StopAsyncIteration. - """ - if val is None: - if tb is None: - raise typ - val = typ() - if tb is not None: - val = val.with_traceback(tb) - raise val - - async def aclose(self): - """Raise GeneratorExit inside coroutine. - """ - try: - await self.athrow(GeneratorExit) - except (GeneratorExit, StopAsyncIteration): - pass - else: - raise RuntimeError("asynchronous generator ignored GeneratorExit") - - @classmethod - def __subclasshook__(cls, C): - if cls is AsyncGenerator: - return _check_methods(C, '__aiter__', '__anext__', - 'asend', 'athrow', 'aclose') - return NotImplemented - - -AsyncGenerator.register(async_generator) - - -class Iterable(metaclass=ABCMeta): - - __slots__ = () - - @abstractmethod - def __iter__(self): - while False: - yield None - - @classmethod - def __subclasshook__(cls, C): - if cls is Iterable: - return _check_methods(C, "__iter__") - return NotImplemented - - __class_getitem__ = classmethod(GenericAlias) - - -class Iterator(Iterable): - - __slots__ = () - - @abstractmethod - def __next__(self): - 'Return the next item from the iterator. When exhausted, raise StopIteration' - raise StopIteration - - def __iter__(self): - return self - - @classmethod - def __subclasshook__(cls, C): - if cls is Iterator: - return _check_methods(C, '__iter__', '__next__') - return NotImplemented - - -Iterator.register(bytes_iterator) -Iterator.register(bytearray_iterator) -#Iterator.register(callable_iterator) -Iterator.register(dict_keyiterator) -Iterator.register(dict_valueiterator) -Iterator.register(dict_itemiterator) -Iterator.register(list_iterator) -Iterator.register(list_reverseiterator) -Iterator.register(range_iterator) -Iterator.register(longrange_iterator) -Iterator.register(set_iterator) -Iterator.register(str_iterator) -Iterator.register(tuple_iterator) -Iterator.register(zip_iterator) - - -class Reversible(Iterable): - - __slots__ = () - - @abstractmethod - def __reversed__(self): - while False: - yield None - - @classmethod - def __subclasshook__(cls, C): - if cls is Reversible: - return _check_methods(C, "__reversed__", "__iter__") - return NotImplemented - - -class Generator(Iterator): - - __slots__ = () - - def __next__(self): - """Return the next item from the generator. - When exhausted, raise StopIteration. - """ - return self.send(None) - - @abstractmethod - def send(self, value): - """Send a value into the generator. - Return next yielded value or raise StopIteration. - """ - raise StopIteration - - @abstractmethod - def throw(self, typ, val=None, tb=None): - """Raise an exception in the generator. - Return next yielded value or raise StopIteration. - """ - if val is None: - if tb is None: - raise typ - val = typ() - if tb is not None: - val = val.with_traceback(tb) - raise val - - def close(self): - """Raise GeneratorExit inside generator. - """ - try: - self.throw(GeneratorExit) - except (GeneratorExit, StopIteration): - pass - else: - raise RuntimeError("generator ignored GeneratorExit") - - @classmethod - def __subclasshook__(cls, C): - if cls is Generator: - return _check_methods(C, '__iter__', '__next__', - 'send', 'throw', 'close') - return NotImplemented - - -Generator.register(generator) - - -class Sized(metaclass=ABCMeta): - - __slots__ = () - - @abstractmethod - def __len__(self): - return 0 - - @classmethod - def __subclasshook__(cls, C): - if cls is Sized: - return _check_methods(C, "__len__") - return NotImplemented - - -class Container(metaclass=ABCMeta): - - __slots__ = () - - @abstractmethod - def __contains__(self, x): - return False - - @classmethod - def __subclasshook__(cls, C): - if cls is Container: - return _check_methods(C, "__contains__") - return NotImplemented - - __class_getitem__ = classmethod(GenericAlias) - - -class Collection(Sized, Iterable, Container): - - __slots__ = () - - @classmethod - def __subclasshook__(cls, C): - if cls is Collection: - return _check_methods(C, "__len__", "__iter__", "__contains__") - return NotImplemented - - -class _CallableGenericAlias(GenericAlias): - """ Represent `Callable[argtypes, resulttype]`. - - This sets ``__args__`` to a tuple containing the flattened ``argtypes`` - followed by ``resulttype``. - - Example: ``Callable[[int, str], float]`` sets ``__args__`` to - ``(int, str, float)``. - """ - - __slots__ = () - - def __new__(cls, origin, args): - if not (isinstance(args, tuple) and len(args) == 2): - raise TypeError( - "Callable must be used as Callable[[arg, ...], result].") - t_args, t_result = args - if isinstance(t_args, (tuple, list)): - args = (*t_args, t_result) - elif not _is_param_expr(t_args): - raise TypeError(f"Expected a list of types, an ellipsis, " - f"ParamSpec, or Concatenate. Got {t_args}") - return super().__new__(cls, origin, args) - - def __repr__(self): - if len(self.__args__) == 2 and _is_param_expr(self.__args__[0]): - return super().__repr__() - return (f'collections.abc.Callable' - f'[[{", ".join([_type_repr(a) for a in self.__args__[:-1]])}], ' - f'{_type_repr(self.__args__[-1])}]') - - def __reduce__(self): - args = self.__args__ - if not (len(args) == 2 and _is_param_expr(args[0])): - args = list(args[:-1]), args[-1] - return _CallableGenericAlias, (Callable, args) - - def __getitem__(self, item): - # Called during TypeVar substitution, returns the custom subclass - # rather than the default types.GenericAlias object. Most of the - # code is copied from typing's _GenericAlias and the builtin - # types.GenericAlias. - - if not isinstance(item, tuple): - item = (item,) - # A special case in PEP 612 where if X = Callable[P, int], - # then X[int, str] == X[[int, str]]. - if (len(self.__parameters__) == 1 - and _is_param_expr(self.__parameters__[0]) - and item and not _is_param_expr(item[0])): - item = (item,) - - new_args = super().__getitem__(item).__args__ - - # args[0] occurs due to things like Z[[int, str, bool]] from PEP 612 - if not isinstance(new_args[0], (tuple, list)): - t_result = new_args[-1] - t_args = new_args[:-1] - new_args = (t_args, t_result) - return _CallableGenericAlias(Callable, tuple(new_args)) - -def _is_param_expr(obj): - """Checks if obj matches either a list of types, ``...``, ``ParamSpec`` or - ``_ConcatenateGenericAlias`` from typing.py - """ - if obj is Ellipsis: - return True - if isinstance(obj, list): - return True - obj = type(obj) - names = ('ParamSpec', '_ConcatenateGenericAlias') - return obj.__module__ == 'typing' and any(obj.__name__ == name for name in names) - -def _type_repr(obj): - """Return the repr() of an object, special-casing types (internal helper). - - Copied from :mod:`typing` since collections.abc - shouldn't depend on that module. - """ - if isinstance(obj, GenericAlias): - return repr(obj) - if isinstance(obj, type): - if obj.__module__ == 'builtins': - return obj.__qualname__ - return f'{obj.__module__}.{obj.__qualname__}' - if obj is Ellipsis: - return '...' - if isinstance(obj, FunctionType): - return obj.__name__ - return repr(obj) - - -class Callable(metaclass=ABCMeta): - - __slots__ = () - - @abstractmethod - def __call__(self, *args, **kwds): - return False - - @classmethod - def __subclasshook__(cls, C): - if cls is Callable: - return _check_methods(C, "__call__") - return NotImplemented - - __class_getitem__ = classmethod(_CallableGenericAlias) - - -### SETS ### - - -class Set(Collection): - """A set is a finite, iterable container. - - This class provides concrete generic implementations of all - methods except for __contains__, __iter__ and __len__. - - To override the comparisons (presumably for speed, as the - semantics are fixed), redefine __le__ and __ge__, - then the other operations will automatically follow suit. - """ - - __slots__ = () - - def __le__(self, other): - if not isinstance(other, Set): - return NotImplemented - if len(self) > len(other): - return False - for elem in self: - if elem not in other: - return False - return True - - def __lt__(self, other): - if not isinstance(other, Set): - return NotImplemented - return len(self) < len(other) and self.__le__(other) - - def __gt__(self, other): - if not isinstance(other, Set): - return NotImplemented - return len(self) > len(other) and self.__ge__(other) - - def __ge__(self, other): - if not isinstance(other, Set): - return NotImplemented - if len(self) < len(other): - return False - for elem in other: - if elem not in self: - return False - return True - - def __eq__(self, other): - if not isinstance(other, Set): - return NotImplemented - return len(self) == len(other) and self.__le__(other) - - @classmethod - def _from_iterable(cls, it): - '''Construct an instance of the class from any iterable input. - - Must override this method if the class constructor signature - does not accept an iterable for an input. - ''' - return cls(it) - - def __and__(self, other): - if not isinstance(other, Iterable): - return NotImplemented - return self._from_iterable(value for value in other if value in self) - - __rand__ = __and__ - - def isdisjoint(self, other): - 'Return True if two sets have a null intersection.' - for value in other: - if value in self: - return False - return True - - def __or__(self, other): - if not isinstance(other, Iterable): - return NotImplemented - chain = (e for s in (self, other) for e in s) - return self._from_iterable(chain) - - __ror__ = __or__ - - def __sub__(self, other): - if not isinstance(other, Set): - if not isinstance(other, Iterable): - return NotImplemented - other = self._from_iterable(other) - return self._from_iterable(value for value in self - if value not in other) - - def __rsub__(self, other): - if not isinstance(other, Set): - if not isinstance(other, Iterable): - return NotImplemented - other = self._from_iterable(other) - return self._from_iterable(value for value in other - if value not in self) - - def __xor__(self, other): - if not isinstance(other, Set): - if not isinstance(other, Iterable): - return NotImplemented - other = self._from_iterable(other) - return (self - other) | (other - self) - - __rxor__ = __xor__ - - def _hash(self): - """Compute the hash value of a set. - - Note that we don't define __hash__: not all sets are hashable. - But if you define a hashable set type, its __hash__ should - call this function. - - This must be compatible __eq__. - - All sets ought to compare equal if they contain the same - elements, regardless of how they are implemented, and - regardless of the order of the elements; so there's not much - freedom for __eq__ or __hash__. We match the algorithm used - by the built-in frozenset type. - """ - MAX = sys.maxsize - MASK = 2 * MAX + 1 - n = len(self) - h = 1927868237 * (n + 1) - h &= MASK - for x in self: - hx = hash(x) - h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167 - h &= MASK - h ^= (h >> 11) ^ (h >> 25) - h = h * 69069 + 907133923 - h &= MASK - if h > MAX: - h -= MASK + 1 - if h == -1: - h = 590923713 - return h - - -Set.register(frozenset) - - -class MutableSet(Set): - """A mutable set is a finite, iterable container. - - This class provides concrete generic implementations of all - methods except for __contains__, __iter__, __len__, - add(), and discard(). - - To override the comparisons (presumably for speed, as the - semantics are fixed), all you have to do is redefine __le__ and - then the other operations will automatically follow suit. - """ - - __slots__ = () - - @abstractmethod - def add(self, value): - """Add an element.""" - raise NotImplementedError - - @abstractmethod - def discard(self, value): - """Remove an element. Do not raise an exception if absent.""" - raise NotImplementedError - - def remove(self, value): - """Remove an element. If not a member, raise a KeyError.""" - if value not in self: - raise KeyError(value) - self.discard(value) - - def pop(self): - """Return the popped value. Raise KeyError if empty.""" - it = iter(self) - try: - value = next(it) - except StopIteration: - raise KeyError from None - self.discard(value) - return value - - def clear(self): - """This is slow (creates N new iterators!) but effective.""" - try: - while True: - self.pop() - except KeyError: - pass - - def __ior__(self, it): - for value in it: - self.add(value) - return self - - def __iand__(self, it): - for value in (self - it): - self.discard(value) - return self - - def __ixor__(self, it): - if it is self: - self.clear() - else: - if not isinstance(it, Set): - it = self._from_iterable(it) - for value in it: - if value in self: - self.discard(value) - else: - self.add(value) - return self - - def __isub__(self, it): - if it is self: - self.clear() - else: - for value in it: - self.discard(value) - return self - - -MutableSet.register(set) - - -### MAPPINGS ### - -class Mapping(Collection): - """A Mapping is a generic container for associating key/value - pairs. - - This class provides concrete generic implementations of all - methods except for __getitem__, __iter__, and __len__. - """ - - __slots__ = () - - # Tell ABCMeta.__new__ that this class should have TPFLAGS_MAPPING set. - __abc_tpflags__ = 1 << 6 # Py_TPFLAGS_MAPPING - - @abstractmethod - def __getitem__(self, key): - raise KeyError - - def get(self, key, default=None): - 'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.' - try: - return self[key] - except KeyError: - return default - - def __contains__(self, key): - try: - self[key] - except KeyError: - return False - else: - return True - - def keys(self): - "D.keys() -> a set-like object providing a view on D's keys" - return KeysView(self) - - def items(self): - "D.items() -> a set-like object providing a view on D's items" - return ItemsView(self) - - def values(self): - "D.values() -> an object providing a view on D's values" - return ValuesView(self) - - def __eq__(self, other): - if not isinstance(other, Mapping): - return NotImplemented - return dict(self.items()) == dict(other.items()) - - __reversed__ = None - -Mapping.register(mappingproxy) - - -class MappingView(Sized): - - __slots__ = '_mapping', - - def __init__(self, mapping): - self._mapping = mapping - - def __len__(self): - return len(self._mapping) - - def __repr__(self): - return '{0.__class__.__name__}({0._mapping!r})'.format(self) - - __class_getitem__ = classmethod(GenericAlias) - - -class KeysView(MappingView, Set): - - __slots__ = () - - @classmethod - def _from_iterable(cls, it): - return set(it) - - def __contains__(self, key): - return key in self._mapping - - def __iter__(self): - yield from self._mapping - - -KeysView.register(dict_keys) - - -class ItemsView(MappingView, Set): - - __slots__ = () - - @classmethod - def _from_iterable(cls, it): - return set(it) - - def __contains__(self, item): - key, value = item - try: - v = self._mapping[key] - except KeyError: - return False - else: - return v is value or v == value - - def __iter__(self): - for key in self._mapping: - yield (key, self._mapping[key]) - - -ItemsView.register(dict_items) - - -class ValuesView(MappingView, Collection): - - __slots__ = () - - def __contains__(self, value): - for key in self._mapping: - v = self._mapping[key] - if v is value or v == value: - return True - return False - - def __iter__(self): - for key in self._mapping: - yield self._mapping[key] - - -ValuesView.register(dict_values) - - -class MutableMapping(Mapping): - """A MutableMapping is a generic container for associating - key/value pairs. - - This class provides concrete generic implementations of all - methods except for __getitem__, __setitem__, __delitem__, - __iter__, and __len__. - """ - - __slots__ = () - - @abstractmethod - def __setitem__(self, key, value): - raise KeyError - - @abstractmethod - def __delitem__(self, key): - raise KeyError - - __marker = object() - - def pop(self, key, default=__marker): - '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value. - If key is not found, d is returned if given, otherwise KeyError is raised. - ''' - try: - value = self[key] - except KeyError: - if default is self.__marker: - raise - return default - else: - del self[key] - return value - - def popitem(self): - '''D.popitem() -> (k, v), remove and return some (key, value) pair - as a 2-tuple; but raise KeyError if D is empty. - ''' - try: - key = next(iter(self)) - except StopIteration: - raise KeyError from None - value = self[key] - del self[key] - return key, value - - def clear(self): - 'D.clear() -> None. Remove all items from D.' - try: - while True: - self.popitem() - except KeyError: - pass - - def update(self, other=(), /, **kwds): - ''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F. - If E present and has a .keys() method, does: for k in E: D[k] = E[k] - If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v - In either case, this is followed by: for k, v in F.items(): D[k] = v - ''' - if isinstance(other, Mapping): - for key in other: - self[key] = other[key] - elif hasattr(other, "keys"): - for key in other.keys(): - self[key] = other[key] - else: - for key, value in other: - self[key] = value - for key, value in kwds.items(): - self[key] = value - - def setdefault(self, key, default=None): - 'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D' - try: - return self[key] - except KeyError: - self[key] = default - return default - - -MutableMapping.register(dict) - - -### SEQUENCES ### - -class Sequence(Reversible, Collection): - """All the operations on a read-only sequence. - - Concrete subclasses must override __new__ or __init__, - __getitem__, and __len__. - """ - - __slots__ = () - - # Tell ABCMeta.__new__ that this class should have TPFLAGS_SEQUENCE set. - __abc_tpflags__ = 1 << 5 # Py_TPFLAGS_SEQUENCE - - @abstractmethod - def __getitem__(self, index): - raise IndexError - - def __iter__(self): - i = 0 - try: - while True: - v = self[i] - yield v - i += 1 - except IndexError: - return - - def __contains__(self, value): - for v in self: - if v is value or v == value: - return True - return False - - def __reversed__(self): - for i in reversed(range(len(self))): - yield self[i] - - def index(self, value, start=0, stop=None): - '''S.index(value, [start, [stop]]) -> integer -- return first index of value. - Raises ValueError if the value is not present. - - Supporting start and stop arguments is optional, but - recommended. - ''' - if start is not None and start < 0: - start = max(len(self) + start, 0) - if stop is not None and stop < 0: - stop += len(self) - - i = start - while stop is None or i < stop: - try: - v = self[i] - except IndexError: - break - if v is value or v == value: - return i - i += 1 - raise ValueError - - def count(self, value): - 'S.count(value) -> integer -- return number of occurrences of value' - return sum(1 for v in self if v is value or v == value) - -Sequence.register(tuple) -Sequence.register(str) -Sequence.register(range) -Sequence.register(memoryview) - - -class ByteString(Sequence): - """This unifies bytes and bytearray. - - XXX Should add all their methods. - """ - - __slots__ = () - -ByteString.register(bytes) -ByteString.register(bytearray) - - -class MutableSequence(Sequence): - """All the operations on a read-write sequence. - - Concrete subclasses must provide __new__ or __init__, - __getitem__, __setitem__, __delitem__, __len__, and insert(). - """ - - __slots__ = () - - @abstractmethod - def __setitem__(self, index, value): - raise IndexError - - @abstractmethod - def __delitem__(self, index): - raise IndexError - - @abstractmethod - def insert(self, index, value): - 'S.insert(index, value) -- insert value before index' - raise IndexError - - def append(self, value): - 'S.append(value) -- append value to the end of the sequence' - self.insert(len(self), value) - - def clear(self): - 'S.clear() -> None -- remove all items from S' - try: - while True: - self.pop() - except IndexError: - pass - - def reverse(self): - 'S.reverse() -- reverse *IN PLACE*' - n = len(self) - for i in range(n//2): - self[i], self[n-i-1] = self[n-i-1], self[i] - - def extend(self, values): - 'S.extend(iterable) -- extend sequence by appending elements from the iterable' - if values is self: - values = list(values) - for v in values: - self.append(v) - - def pop(self, index=-1): - '''S.pop([index]) -> item -- remove and return item at index (default last). - Raise IndexError if list is empty or index is out of range. - ''' - v = self[index] - del self[index] - return v - - def remove(self, value): - '''S.remove(value) -- remove first occurrence of value. - Raise ValueError if the value is not present. - ''' - del self[self.index(value)] - - def __iadd__(self, values): - self.extend(values) - return self - - -MutableSequence.register(list) -MutableSequence.register(bytearray) # Multiply inheriting, see ByteString diff --git a/controllers/guidance_ctrl/Lib/_py_abc.py b/controllers/guidance_ctrl/Lib/_py_abc.py deleted file mode 100644 index c870ae90..00000000 --- a/controllers/guidance_ctrl/Lib/_py_abc.py +++ /dev/null @@ -1,147 +0,0 @@ -from _weakrefset import WeakSet - - -def get_cache_token(): - """Returns the current ABC cache token. - - The token is an opaque object (supporting equality testing) identifying the - current version of the ABC cache for virtual subclasses. The token changes - with every call to ``register()`` on any ABC. - """ - return ABCMeta._abc_invalidation_counter - - -class ABCMeta(type): - """Metaclass for defining Abstract Base Classes (ABCs). - - Use this metaclass to create an ABC. An ABC can be subclassed - directly, and then acts as a mix-in class. You can also register - unrelated concrete classes (even built-in classes) and unrelated - ABCs as 'virtual subclasses' -- these and their descendants will - be considered subclasses of the registering ABC by the built-in - issubclass() function, but the registering ABC won't show up in - their MRO (Method Resolution Order) nor will method - implementations defined by the registering ABC be callable (not - even via super()). - """ - - # A global counter that is incremented each time a class is - # registered as a virtual subclass of anything. It forces the - # negative cache to be cleared before its next use. - # Note: this counter is private. Use `abc.get_cache_token()` for - # external code. - _abc_invalidation_counter = 0 - - def __new__(mcls, name, bases, namespace, /, **kwargs): - cls = super().__new__(mcls, name, bases, namespace, **kwargs) - # Compute set of abstract method names - abstracts = {name - for name, value in namespace.items() - if getattr(value, "__isabstractmethod__", False)} - for base in bases: - for name in getattr(base, "__abstractmethods__", set()): - value = getattr(cls, name, None) - if getattr(value, "__isabstractmethod__", False): - abstracts.add(name) - cls.__abstractmethods__ = frozenset(abstracts) - # Set up inheritance registry - cls._abc_registry = WeakSet() - cls._abc_cache = WeakSet() - cls._abc_negative_cache = WeakSet() - cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter - return cls - - def register(cls, subclass): - """Register a virtual subclass of an ABC. - - Returns the subclass, to allow usage as a class decorator. - """ - if not isinstance(subclass, type): - raise TypeError("Can only register classes") - if issubclass(subclass, cls): - return subclass # Already a subclass - # Subtle: test for cycles *after* testing for "already a subclass"; - # this means we allow X.register(X) and interpret it as a no-op. - if issubclass(cls, subclass): - # This would create a cycle, which is bad for the algorithm below - raise RuntimeError("Refusing to create an inheritance cycle") - cls._abc_registry.add(subclass) - ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache - return subclass - - def _dump_registry(cls, file=None): - """Debug helper to print the ABC registry.""" - print(f"Class: {cls.__module__}.{cls.__qualname__}", file=file) - print(f"Inv. counter: {get_cache_token()}", file=file) - for name in cls.__dict__: - if name.startswith("_abc_"): - value = getattr(cls, name) - if isinstance(value, WeakSet): - value = set(value) - print(f"{name}: {value!r}", file=file) - - def _abc_registry_clear(cls): - """Clear the registry (for debugging or testing).""" - cls._abc_registry.clear() - - def _abc_caches_clear(cls): - """Clear the caches (for debugging or testing).""" - cls._abc_cache.clear() - cls._abc_negative_cache.clear() - - def __instancecheck__(cls, instance): - """Override for isinstance(instance, cls).""" - # Inline the cache checking - subclass = instance.__class__ - if subclass in cls._abc_cache: - return True - subtype = type(instance) - if subtype is subclass: - if (cls._abc_negative_cache_version == - ABCMeta._abc_invalidation_counter and - subclass in cls._abc_negative_cache): - return False - # Fall back to the subclass check. - return cls.__subclasscheck__(subclass) - return any(cls.__subclasscheck__(c) for c in (subclass, subtype)) - - def __subclasscheck__(cls, subclass): - """Override for issubclass(subclass, cls).""" - if not isinstance(subclass, type): - raise TypeError('issubclass() arg 1 must be a class') - # Check cache - if subclass in cls._abc_cache: - return True - # Check negative cache; may have to invalidate - if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter: - # Invalidate the negative cache - cls._abc_negative_cache = WeakSet() - cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter - elif subclass in cls._abc_negative_cache: - return False - # Check the subclass hook - ok = cls.__subclasshook__(subclass) - if ok is not NotImplemented: - assert isinstance(ok, bool) - if ok: - cls._abc_cache.add(subclass) - else: - cls._abc_negative_cache.add(subclass) - return ok - # Check if it's a direct subclass - if cls in getattr(subclass, '__mro__', ()): - cls._abc_cache.add(subclass) - return True - # Check if it's a subclass of a registered class (recursive) - for rcls in cls._abc_registry: - if issubclass(subclass, rcls): - cls._abc_cache.add(subclass) - return True - # Check if it's a subclass of a subclass (recursive) - for scls in cls.__subclasses__(): - if issubclass(subclass, scls): - cls._abc_cache.add(subclass) - return True - # No dice; update negative cache - cls._abc_negative_cache.add(subclass) - return False diff --git a/controllers/guidance_ctrl/Lib/_weakrefset.py b/controllers/guidance_ctrl/Lib/_weakrefset.py deleted file mode 100644 index 2a276843..00000000 --- a/controllers/guidance_ctrl/Lib/_weakrefset.py +++ /dev/null @@ -1,206 +0,0 @@ -# Access WeakSet through the weakref module. -# This code is separated-out because it is needed -# by abc.py to load everything else at startup. - -from _weakref import ref -from types import GenericAlias - -__all__ = ['WeakSet'] - - -class _IterationGuard: - # This context manager registers itself in the current iterators of the - # weak container, such as to delay all removals until the context manager - # exits. - # This technique should be relatively thread-safe (since sets are). - - def __init__(self, weakcontainer): - # Don't create cycles - self.weakcontainer = ref(weakcontainer) - - def __enter__(self): - w = self.weakcontainer() - if w is not None: - w._iterating.add(self) - return self - - def __exit__(self, e, t, b): - w = self.weakcontainer() - if w is not None: - s = w._iterating - s.remove(self) - if not s: - w._commit_removals() - - -class WeakSet: - def __init__(self, data=None): - self.data = set() - def _remove(item, selfref=ref(self)): - self = selfref() - if self is not None: - if self._iterating: - self._pending_removals.append(item) - else: - self.data.discard(item) - self._remove = _remove - # A list of keys to be removed - self._pending_removals = [] - self._iterating = set() - if data is not None: - self.update(data) - - def _commit_removals(self): - pop = self._pending_removals.pop - discard = self.data.discard - while True: - try: - item = pop() - except IndexError: - return - discard(item) - - def __iter__(self): - with _IterationGuard(self): - for itemref in self.data: - item = itemref() - if item is not None: - # Caveat: the iterator will keep a strong reference to - # `item` until it is resumed or closed. - yield item - - def __len__(self): - return len(self.data) - len(self._pending_removals) - - def __contains__(self, item): - try: - wr = ref(item) - except TypeError: - return False - return wr in self.data - - def __reduce__(self): - return (self.__class__, (list(self),), - getattr(self, '__dict__', None)) - - def add(self, item): - if self._pending_removals: - self._commit_removals() - self.data.add(ref(item, self._remove)) - - def clear(self): - if self._pending_removals: - self._commit_removals() - self.data.clear() - - def copy(self): - return self.__class__(self) - - def pop(self): - if self._pending_removals: - self._commit_removals() - while True: - try: - itemref = self.data.pop() - except KeyError: - raise KeyError('pop from empty WeakSet') from None - item = itemref() - if item is not None: - return item - - def remove(self, item): - if self._pending_removals: - self._commit_removals() - self.data.remove(ref(item)) - - def discard(self, item): - if self._pending_removals: - self._commit_removals() - self.data.discard(ref(item)) - - def update(self, other): - if self._pending_removals: - self._commit_removals() - for element in other: - self.add(element) - - def __ior__(self, other): - self.update(other) - return self - - def difference(self, other): - newset = self.copy() - newset.difference_update(other) - return newset - __sub__ = difference - - def difference_update(self, other): - self.__isub__(other) - def __isub__(self, other): - if self._pending_removals: - self._commit_removals() - if self is other: - self.data.clear() - else: - self.data.difference_update(ref(item) for item in other) - return self - - def intersection(self, other): - return self.__class__(item for item in other if item in self) - __and__ = intersection - - def intersection_update(self, other): - self.__iand__(other) - def __iand__(self, other): - if self._pending_removals: - self._commit_removals() - self.data.intersection_update(ref(item) for item in other) - return self - - def issubset(self, other): - return self.data.issubset(ref(item) for item in other) - __le__ = issubset - - def __lt__(self, other): - return self.data < set(map(ref, other)) - - def issuperset(self, other): - return self.data.issuperset(ref(item) for item in other) - __ge__ = issuperset - - def __gt__(self, other): - return self.data > set(map(ref, other)) - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return self.data == set(map(ref, other)) - - def symmetric_difference(self, other): - newset = self.copy() - newset.symmetric_difference_update(other) - return newset - __xor__ = symmetric_difference - - def symmetric_difference_update(self, other): - self.__ixor__(other) - def __ixor__(self, other): - if self._pending_removals: - self._commit_removals() - if self is other: - self.data.clear() - else: - self.data.symmetric_difference_update(ref(item, self._remove) for item in other) - return self - - def union(self, other): - return self.__class__(e for s in (self, other) for e in s) - __or__ = union - - def isdisjoint(self, other): - return len(self.intersection(other)) == 0 - - def __repr__(self): - return repr(self.data) - - __class_getitem__ = classmethod(GenericAlias) diff --git a/controllers/guidance_ctrl/Lib/abc.py b/controllers/guidance_ctrl/Lib/abc.py deleted file mode 100644 index bfccab2d..00000000 --- a/controllers/guidance_ctrl/Lib/abc.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright 2007 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Abstract Base Classes (ABCs) according to PEP 3119.""" - - -def abstractmethod(funcobj): - """A decorator indicating abstract methods. - - Requires that the metaclass is ABCMeta or derived from it. A - class that has a metaclass derived from ABCMeta cannot be - instantiated unless all of its abstract methods are overridden. - The abstract methods can be called using any of the normal - 'super' call mechanisms. abstractmethod() may be used to declare - abstract methods for properties and descriptors. - - Usage: - - class C(metaclass=ABCMeta): - @abstractmethod - def my_abstract_method(self, ...): - ... - """ - funcobj.__isabstractmethod__ = True - return funcobj - - -class abstractclassmethod(classmethod): - """A decorator indicating abstract classmethods. - - Deprecated, use 'classmethod' with 'abstractmethod' instead: - - class C(ABC): - @classmethod - @abstractmethod - def my_abstract_classmethod(cls, ...): - ... - - """ - - __isabstractmethod__ = True - - def __init__(self, callable): - callable.__isabstractmethod__ = True - super().__init__(callable) - - -class abstractstaticmethod(staticmethod): - """A decorator indicating abstract staticmethods. - - Deprecated, use 'staticmethod' with 'abstractmethod' instead: - - class C(ABC): - @staticmethod - @abstractmethod - def my_abstract_staticmethod(...): - ... - - """ - - __isabstractmethod__ = True - - def __init__(self, callable): - callable.__isabstractmethod__ = True - super().__init__(callable) - - -class abstractproperty(property): - """A decorator indicating abstract properties. - - Deprecated, use 'property' with 'abstractmethod' instead: - - class C(ABC): - @property - @abstractmethod - def my_abstract_property(self): - ... - - """ - - __isabstractmethod__ = True - - -try: - from _abc import (get_cache_token, _abc_init, _abc_register, - _abc_instancecheck, _abc_subclasscheck, _get_dump, - _reset_registry, _reset_caches) -# TODO: RUSTPYTHON missing _abc module implementation. -except ModuleNotFoundError: - from _py_abc import ABCMeta, get_cache_token - ABCMeta.__module__ = 'abc' -except ImportError: - from _py_abc import ABCMeta, get_cache_token - ABCMeta.__module__ = 'abc' -else: - class ABCMeta(type): - """Metaclass for defining Abstract Base Classes (ABCs). - - Use this metaclass to create an ABC. An ABC can be subclassed - directly, and then acts as a mix-in class. You can also register - unrelated concrete classes (even built-in classes) and unrelated - ABCs as 'virtual subclasses' -- these and their descendants will - be considered subclasses of the registering ABC by the built-in - issubclass() function, but the registering ABC won't show up in - their MRO (Method Resolution Order) nor will method - implementations defined by the registering ABC be callable (not - even via super()). - """ - def __new__(mcls, name, bases, namespace, /, **kwargs): - cls = super().__new__(mcls, name, bases, namespace, **kwargs) - _abc_init(cls) - return cls - - def register(cls, subclass): - """Register a virtual subclass of an ABC. - - Returns the subclass, to allow usage as a class decorator. - """ - return _abc_register(cls, subclass) - - def __instancecheck__(cls, instance): - """Override for isinstance(instance, cls).""" - return _abc_instancecheck(cls, instance) - - def __subclasscheck__(cls, subclass): - """Override for issubclass(subclass, cls).""" - return _abc_subclasscheck(cls, subclass) - - def _dump_registry(cls, file=None): - """Debug helper to print the ABC registry.""" - print(f"Class: {cls.__module__}.{cls.__qualname__}", file=file) - print(f"Inv. counter: {get_cache_token()}", file=file) - (_abc_registry, _abc_cache, _abc_negative_cache, - _abc_negative_cache_version) = _get_dump(cls) - print(f"_abc_registry: {_abc_registry!r}", file=file) - print(f"_abc_cache: {_abc_cache!r}", file=file) - print(f"_abc_negative_cache: {_abc_negative_cache!r}", file=file) - print(f"_abc_negative_cache_version: {_abc_negative_cache_version!r}", - file=file) - - def _abc_registry_clear(cls): - """Clear the registry (for debugging or testing).""" - _reset_registry(cls) - - def _abc_caches_clear(cls): - """Clear the caches (for debugging or testing).""" - _reset_caches(cls) - - -def update_abstractmethods(cls): - """Recalculate the set of abstract methods of an abstract class. - - If a class has had one of its abstract methods implemented after the - class was created, the method will not be considered implemented until - this function is called. Alternatively, if a new abstract method has been - added to the class, it will only be considered an abstract method of the - class after this function is called. - - This function should be called before any use is made of the class, - usually in class decorators that add methods to the subject class. - - Returns cls, to allow usage as a class decorator. - - If cls is not an instance of ABCMeta, does nothing. - """ - if not hasattr(cls, '__abstractmethods__'): - # We check for __abstractmethods__ here because cls might by a C - # implementation or a python implementation (especially during - # testing), and we want to handle both cases. - return cls - - abstracts = set() - # Check the existing abstract methods of the parents, keep only the ones - # that are not implemented. - for scls in cls.__bases__: - for name in getattr(scls, '__abstractmethods__', ()): - value = getattr(cls, name, None) - if getattr(value, "__isabstractmethod__", False): - abstracts.add(name) - # Also add any other newly added abstract methods. - for name, value in cls.__dict__.items(): - if getattr(value, "__isabstractmethod__", False): - abstracts.add(name) - cls.__abstractmethods__ = frozenset(abstracts) - return cls - - -class ABC(metaclass=ABCMeta): - """Helper class that provides a standard way to create an ABC using - inheritance. - """ - __slots__ = () diff --git a/controllers/guidance_ctrl/Lib/collections/__init__.py b/controllers/guidance_ctrl/Lib/collections/__init__.py deleted file mode 100644 index 59a2d520..00000000 --- a/controllers/guidance_ctrl/Lib/collections/__init__.py +++ /dev/null @@ -1,1578 +0,0 @@ -'''This module implements specialized container datatypes providing -alternatives to Python's general purpose built-in containers, dict, -list, set, and tuple. - -* namedtuple factory function for creating tuple subclasses with named fields -* deque list-like container with fast appends and pops on either end -* ChainMap dict-like class for creating a single view of multiple mappings -* Counter dict subclass for counting hashable objects -* OrderedDict dict subclass that remembers the order entries were added -* defaultdict dict subclass that calls a factory function to supply missing values -* UserDict wrapper around dictionary objects for easier dict subclassing -* UserList wrapper around list objects for easier list subclassing -* UserString wrapper around string objects for easier string subclassing - -''' - -__all__ = [ - 'ChainMap', - 'Counter', - 'OrderedDict', - 'UserDict', - 'UserList', - 'UserString', - 'defaultdict', - 'deque', - 'namedtuple', -] - -import _collections_abc -import sys as _sys - -from itertools import chain as _chain -from itertools import repeat as _repeat -from itertools import starmap as _starmap -from keyword import iskeyword as _iskeyword -from operator import eq as _eq -from operator import itemgetter as _itemgetter -from reprlib import recursive_repr as _recursive_repr -from _weakref import proxy as _proxy - -try: - from _collections import deque -except ImportError: - pass -else: - _collections_abc.MutableSequence.register(deque) - -try: - from _collections import defaultdict -except ImportError: - # FIXME: try to implement defaultdict in collections.rs rather than in Python - # I (coolreader18) couldn't figure out some class stuff with __new__ and - # __init__ and __missing__ and subclassing built-in types from Rust, so I went - # with this instead. - from ._defaultdict import defaultdict - - -################################################################################ -### OrderedDict -################################################################################ - -class _OrderedDictKeysView(_collections_abc.KeysView): - - def __reversed__(self): - yield from reversed(self._mapping) - -class _OrderedDictItemsView(_collections_abc.ItemsView): - - def __reversed__(self): - for key in reversed(self._mapping): - yield (key, self._mapping[key]) - -class _OrderedDictValuesView(_collections_abc.ValuesView): - - def __reversed__(self): - for key in reversed(self._mapping): - yield self._mapping[key] - -class _Link(object): - __slots__ = 'prev', 'next', 'key', '__weakref__' - -class OrderedDict(dict): - 'Dictionary that remembers insertion order' - # An inherited dict maps keys to values. - # The inherited dict provides __getitem__, __len__, __contains__, and get. - # The remaining methods are order-aware. - # Big-O running times for all methods are the same as regular dictionaries. - - # The internal self.__map dict maps keys to links in a doubly linked list. - # The circular doubly linked list starts and ends with a sentinel element. - # The sentinel element never gets deleted (this simplifies the algorithm). - # The sentinel is in self.__hardroot with a weakref proxy in self.__root. - # The prev links are weakref proxies (to prevent circular references). - # Individual links are kept alive by the hard reference in self.__map. - # Those hard references disappear when a key is deleted from an OrderedDict. - - def __init__(self, other=(), /, **kwds): - '''Initialize an ordered dictionary. The signature is the same as - regular dictionaries. Keyword argument order is preserved. - ''' - try: - self.__root - except AttributeError: - self.__hardroot = _Link() - self.__root = root = _proxy(self.__hardroot) - root.prev = root.next = root - self.__map = {} - self.__update(other, **kwds) - - def __setitem__(self, key, value, - dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link): - 'od.__setitem__(i, y) <==> od[i]=y' - # Setting a new item creates a new link at the end of the linked list, - # and the inherited dictionary is updated with the new key/value pair. - if key not in self: - self.__map[key] = link = Link() - root = self.__root - last = root.prev - link.prev, link.next, link.key = last, root, key - last.next = link - root.prev = proxy(link) - dict_setitem(self, key, value) - - def __delitem__(self, key, dict_delitem=dict.__delitem__): - 'od.__delitem__(y) <==> del od[y]' - # Deleting an existing item uses self.__map to find the link which gets - # removed by updating the links in the predecessor and successor nodes. - dict_delitem(self, key) - link = self.__map.pop(key) - link_prev = link.prev - link_next = link.next - link_prev.next = link_next - link_next.prev = link_prev - link.prev = None - link.next = None - - def __iter__(self): - 'od.__iter__() <==> iter(od)' - # Traverse the linked list in order. - root = self.__root - curr = root.next - while curr is not root: - yield curr.key - curr = curr.next - - def __reversed__(self): - 'od.__reversed__() <==> reversed(od)' - # Traverse the linked list in reverse order. - root = self.__root - curr = root.prev - while curr is not root: - yield curr.key - curr = curr.prev - - def clear(self): - 'od.clear() -> None. Remove all items from od.' - root = self.__root - root.prev = root.next = root - self.__map.clear() - dict.clear(self) - - def popitem(self, last=True): - '''Remove and return a (key, value) pair from the dictionary. - - Pairs are returned in LIFO order if last is true or FIFO order if false. - ''' - if not self: - raise KeyError('dictionary is empty') - root = self.__root - if last: - link = root.prev - link_prev = link.prev - link_prev.next = root - root.prev = link_prev - else: - link = root.next - link_next = link.next - root.next = link_next - link_next.prev = root - key = link.key - del self.__map[key] - value = dict.pop(self, key) - return key, value - - def move_to_end(self, key, last=True): - '''Move an existing element to the end (or beginning if last is false). - - Raise KeyError if the element does not exist. - ''' - link = self.__map[key] - link_prev = link.prev - link_next = link.next - soft_link = link_next.prev - link_prev.next = link_next - link_next.prev = link_prev - root = self.__root - if last: - last = root.prev - link.prev = last - link.next = root - root.prev = soft_link - last.next = link - else: - first = root.next - link.prev = root - link.next = first - first.prev = soft_link - root.next = link - - def __sizeof__(self): - sizeof = _sys.getsizeof - n = len(self) + 1 # number of links including root - size = sizeof(self.__dict__) # instance dictionary - size += sizeof(self.__map) * 2 # internal dict and inherited dict - size += sizeof(self.__hardroot) * n # link objects - size += sizeof(self.__root) * n # proxy objects - return size - - update = __update = _collections_abc.MutableMapping.update - - def keys(self): - "D.keys() -> a set-like object providing a view on D's keys" - return _OrderedDictKeysView(self) - - def items(self): - "D.items() -> a set-like object providing a view on D's items" - return _OrderedDictItemsView(self) - - def values(self): - "D.values() -> an object providing a view on D's values" - return _OrderedDictValuesView(self) - - __ne__ = _collections_abc.MutableMapping.__ne__ - - __marker = object() - - def pop(self, key, default=__marker): - '''od.pop(k[,d]) -> v, remove specified key and return the corresponding - value. If key is not found, d is returned if given, otherwise KeyError - is raised. - - ''' - marker = self.__marker - result = dict.pop(self, key, marker) - if result is not marker: - # The same as in __delitem__(). - link = self.__map.pop(key) - link_prev = link.prev - link_next = link.next - link_prev.next = link_next - link_next.prev = link_prev - link.prev = None - link.next = None - return result - if default is marker: - raise KeyError(key) - return default - - def setdefault(self, key, default=None): - '''Insert key with a value of default if key is not in the dictionary. - - Return the value for key if key is in the dictionary, else default. - ''' - if key in self: - return self[key] - self[key] = default - return default - - @_recursive_repr() - def __repr__(self): - 'od.__repr__() <==> repr(od)' - if not self: - return '%s()' % (self.__class__.__name__,) - return '%s(%r)' % (self.__class__.__name__, list(self.items())) - - def __reduce__(self): - 'Return state information for pickling' - state = self.__getstate__() - if state: - if isinstance(state, tuple): - state, slots = state - else: - slots = {} - state = state.copy() - slots = slots.copy() - for k in vars(OrderedDict()): - state.pop(k, None) - slots.pop(k, None) - if slots: - state = state, slots - else: - state = state or None - return self.__class__, (), state, None, iter(self.items()) - - def copy(self): - 'od.copy() -> a shallow copy of od' - return self.__class__(self) - - @classmethod - def fromkeys(cls, iterable, value=None): - '''Create a new ordered dictionary with keys from iterable and values set to value. - ''' - self = cls() - for key in iterable: - self[key] = value - return self - - def __eq__(self, other): - '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive - while comparison to a regular mapping is order-insensitive. - - ''' - if isinstance(other, OrderedDict): - return dict.__eq__(self, other) and all(map(_eq, self, other)) - return dict.__eq__(self, other) - - def __ior__(self, other): - self.update(other) - return self - - def __or__(self, other): - if not isinstance(other, dict): - return NotImplemented - new = self.__class__(self) - new.update(other) - return new - - def __ror__(self, other): - if not isinstance(other, dict): - return NotImplemented - new = self.__class__(other) - new.update(self) - return new - - -try: - from _collections import OrderedDict -except ImportError: - # Leave the pure Python version in place. - pass - - -################################################################################ -### namedtuple -################################################################################ - -try: - from _collections import _tuplegetter -except ImportError: - _tuplegetter = lambda index, doc: property(_itemgetter(index), doc=doc) - -def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None): - """Returns a new subclass of tuple with named fields. - - >>> Point = namedtuple('Point', ['x', 'y']) - >>> Point.__doc__ # docstring for the new class - 'Point(x, y)' - >>> p = Point(11, y=22) # instantiate with positional args or keywords - >>> p[0] + p[1] # indexable like a plain tuple - 33 - >>> x, y = p # unpack like a regular tuple - >>> x, y - (11, 22) - >>> p.x + p.y # fields also accessible by name - 33 - >>> d = p._asdict() # convert to a dictionary - >>> d['x'] - 11 - >>> Point(**d) # convert from a dictionary - Point(x=11, y=22) - >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields - Point(x=100, y=22) - - """ - - # Validate the field names. At the user's option, either generate an error - # message or automatically replace the field name with a valid name. - if isinstance(field_names, str): - field_names = field_names.replace(',', ' ').split() - field_names = list(map(str, field_names)) - typename = _sys.intern(str(typename)) - - if rename: - seen = set() - for index, name in enumerate(field_names): - if (not name.isidentifier() - or _iskeyword(name) - or name.startswith('_') - or name in seen): - field_names[index] = f'_{index}' - seen.add(name) - - for name in [typename] + field_names: - if type(name) is not str: - raise TypeError('Type names and field names must be strings') - if not name.isidentifier(): - raise ValueError('Type names and field names must be valid ' - f'identifiers: {name!r}') - if _iskeyword(name): - raise ValueError('Type names and field names cannot be a ' - f'keyword: {name!r}') - - seen = set() - for name in field_names: - if name.startswith('_') and not rename: - raise ValueError('Field names cannot start with an underscore: ' - f'{name!r}') - if name in seen: - raise ValueError(f'Encountered duplicate field name: {name!r}') - seen.add(name) - - field_defaults = {} - if defaults is not None: - defaults = tuple(defaults) - if len(defaults) > len(field_names): - raise TypeError('Got more default values than field names') - field_defaults = dict(reversed(list(zip(reversed(field_names), - reversed(defaults))))) - - # Variables used in the methods and docstrings - field_names = tuple(map(_sys.intern, field_names)) - num_fields = len(field_names) - arg_list = ', '.join(field_names) - if num_fields == 1: - arg_list += ',' - repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')' - tuple_new = tuple.__new__ - _dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip - - # Create all the named tuple methods to be added to the class namespace - - namespace = { - '_tuple_new': tuple_new, - '__builtins__': {}, - '__name__': f'namedtuple_{typename}', - } - code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))' - __new__ = eval(code, namespace) - __new__.__name__ = '__new__' - __new__.__doc__ = f'Create new instance of {typename}({arg_list})' - if defaults is not None: - __new__.__defaults__ = defaults - - @classmethod - def _make(cls, iterable): - result = tuple_new(cls, iterable) - if _len(result) != num_fields: - raise TypeError(f'Expected {num_fields} arguments, got {len(result)}') - return result - - _make.__func__.__doc__ = (f'Make a new {typename} object from a sequence ' - 'or iterable') - - def _replace(self, /, **kwds): - result = self._make(_map(kwds.pop, field_names, self)) - if kwds: - raise ValueError(f'Got unexpected field names: {list(kwds)!r}') - return result - - _replace.__doc__ = (f'Return a new {typename} object replacing specified ' - 'fields with new values') - - def __repr__(self): - 'Return a nicely formatted representation string' - return self.__class__.__name__ + repr_fmt % self - - def _asdict(self): - 'Return a new dict which maps field names to their values.' - return _dict(_zip(self._fields, self)) - - def __getnewargs__(self): - 'Return self as a plain tuple. Used by copy and pickle.' - return _tuple(self) - - # Modify function metadata to help with introspection and debugging - for method in ( - __new__, - _make.__func__, - _replace, - __repr__, - _asdict, - __getnewargs__, - ): - method.__qualname__ = f'{typename}.{method.__name__}' - - # Build-up the class namespace dictionary - # and use type() to build the result class - class_namespace = { - '__doc__': f'{typename}({arg_list})', - '__slots__': (), - '_fields': field_names, - '_field_defaults': field_defaults, - '__new__': __new__, - '_make': _make, - '_replace': _replace, - '__repr__': __repr__, - '_asdict': _asdict, - '__getnewargs__': __getnewargs__, - '__match_args__': field_names, - } - for index, name in enumerate(field_names): - doc = _sys.intern(f'Alias for field number {index}') - class_namespace[name] = _tuplegetter(index, doc) - - result = type(typename, (tuple,), class_namespace) - - # For pickling to work, the __module__ variable needs to be set to the frame - # where the named tuple is created. Bypass this step in environments where - # sys._getframe is not defined (Jython for example) or sys._getframe is not - # defined for arguments greater than 0 (IronPython), or where the user has - # specified a particular module. - if module is None: - try: - module = _sys._getframe(1).f_globals.get('__name__', '__main__') - except (AttributeError, ValueError): - pass - if module is not None: - result.__module__ = module - - return result - - -######################################################################## -### Counter -######################################################################## - -def _count_elements(mapping, iterable): - 'Tally elements from the iterable.' - mapping_get = mapping.get - for elem in iterable: - mapping[elem] = mapping_get(elem, 0) + 1 - -try: # Load C helper function if available - from _collections import _count_elements -except ImportError: - pass - -class Counter(dict): - '''Dict subclass for counting hashable items. Sometimes called a bag - or multiset. Elements are stored as dictionary keys and their counts - are stored as dictionary values. - - >>> c = Counter('abcdeabcdabcaba') # count elements from a string - - >>> c.most_common(3) # three most common elements - [('a', 5), ('b', 4), ('c', 3)] - >>> sorted(c) # list all unique elements - ['a', 'b', 'c', 'd', 'e'] - >>> ''.join(sorted(c.elements())) # list elements with repetitions - 'aaaaabbbbcccdde' - >>> sum(c.values()) # total of all counts - 15 - - >>> c['a'] # count of letter 'a' - 5 - >>> for elem in 'shazam': # update counts from an iterable - ... c[elem] += 1 # by adding 1 to each element's count - >>> c['a'] # now there are seven 'a' - 7 - >>> del c['b'] # remove all 'b' - >>> c['b'] # now there are zero 'b' - 0 - - >>> d = Counter('simsalabim') # make another counter - >>> c.update(d) # add in the second counter - >>> c['a'] # now there are nine 'a' - 9 - - >>> c.clear() # empty the counter - >>> c - Counter() - - Note: If a count is set to zero or reduced to zero, it will remain - in the counter until the entry is deleted or the counter is cleared: - - >>> c = Counter('aaabbc') - >>> c['b'] -= 2 # reduce the count of 'b' by two - >>> c.most_common() # 'b' is still in, but its count is zero - [('a', 3), ('c', 1), ('b', 0)] - - ''' - # References: - # http://en.wikipedia.org/wiki/Multiset - # http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html - # http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm - # http://code.activestate.com/recipes/259174/ - # Knuth, TAOCP Vol. II section 4.6.3 - - def __init__(self, iterable=None, /, **kwds): - '''Create a new, empty Counter object. And if given, count elements - from an input iterable. Or, initialize the count from another mapping - of elements to their counts. - - >>> c = Counter() # a new, empty counter - >>> c = Counter('gallahad') # a new counter from an iterable - >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping - >>> c = Counter(a=4, b=2) # a new counter from keyword args - - ''' - super().__init__() - self.update(iterable, **kwds) - - def __missing__(self, key): - 'The count of elements not in the Counter is zero.' - # Needed so that self[missing_item] does not raise KeyError - return 0 - - def total(self): - 'Sum of the counts' - return sum(self.values()) - - def most_common(self, n=None): - '''List the n most common elements and their counts from the most - common to the least. If n is None, then list all element counts. - - >>> Counter('abracadabra').most_common(3) - [('a', 5), ('b', 2), ('r', 2)] - - ''' - # Emulate Bag.sortedByCount from Smalltalk - if n is None: - return sorted(self.items(), key=_itemgetter(1), reverse=True) - - # Lazy import to speedup Python startup time - import heapq - return heapq.nlargest(n, self.items(), key=_itemgetter(1)) - - def elements(self): - '''Iterator over elements repeating each as many times as its count. - - >>> c = Counter('ABCABC') - >>> sorted(c.elements()) - ['A', 'A', 'B', 'B', 'C', 'C'] - - # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 - >>> import math - >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) - >>> math.prod(prime_factors.elements()) - 1836 - - Note, if an element's count has been set to zero or is a negative - number, elements() will ignore it. - - ''' - # Emulate Bag.do from Smalltalk and Multiset.begin from C++. - return _chain.from_iterable(_starmap(_repeat, self.items())) - - # Override dict methods where necessary - - @classmethod - def fromkeys(cls, iterable, v=None): - # There is no equivalent method for counters because the semantics - # would be ambiguous in cases such as Counter.fromkeys('aaabbc', v=2). - # Initializing counters to zero values isn't necessary because zero - # is already the default value for counter lookups. Initializing - # to one is easily accomplished with Counter(set(iterable)). For - # more exotic cases, create a dictionary first using a dictionary - # comprehension or dict.fromkeys(). - raise NotImplementedError( - 'Counter.fromkeys() is undefined. Use Counter(iterable) instead.') - - def update(self, iterable=None, /, **kwds): - '''Like dict.update() but add counts instead of replacing them. - - Source can be an iterable, a dictionary, or another Counter instance. - - >>> c = Counter('which') - >>> c.update('witch') # add elements from another iterable - >>> d = Counter('watch') - >>> c.update(d) # add elements from another counter - >>> c['h'] # four 'h' in which, witch, and watch - 4 - - ''' - # The regular dict.update() operation makes no sense here because the - # replace behavior results in the some of original untouched counts - # being mixed-in with all of the other counts for a mismash that - # doesn't have a straight-forward interpretation in most counting - # contexts. Instead, we implement straight-addition. Both the inputs - # and outputs are allowed to contain zero and negative counts. - - if iterable is not None: - if isinstance(iterable, _collections_abc.Mapping): - if self: - self_get = self.get - for elem, count in iterable.items(): - self[elem] = count + self_get(elem, 0) - else: - # fast path when counter is empty - super().update(iterable) - else: - _count_elements(self, iterable) - if kwds: - self.update(kwds) - - def subtract(self, iterable=None, /, **kwds): - '''Like dict.update() but subtracts counts instead of replacing them. - Counts can be reduced below zero. Both the inputs and outputs are - allowed to contain zero and negative counts. - - Source can be an iterable, a dictionary, or another Counter instance. - - >>> c = Counter('which') - >>> c.subtract('witch') # subtract elements from another iterable - >>> c.subtract(Counter('watch')) # subtract elements from another counter - >>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch - 0 - >>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch - -1 - - ''' - if iterable is not None: - self_get = self.get - if isinstance(iterable, _collections_abc.Mapping): - for elem, count in iterable.items(): - self[elem] = self_get(elem, 0) - count - else: - for elem in iterable: - self[elem] = self_get(elem, 0) - 1 - if kwds: - self.subtract(kwds) - - def copy(self): - 'Return a shallow copy.' - return self.__class__(self) - - def __reduce__(self): - return self.__class__, (dict(self),) - - def __delitem__(self, elem): - 'Like dict.__delitem__() but does not raise KeyError for missing values.' - if elem in self: - super().__delitem__(elem) - - def __repr__(self): - if not self: - return f'{self.__class__.__name__}()' - try: - # dict() preserves the ordering returned by most_common() - d = dict(self.most_common()) - except TypeError: - # handle case where values are not orderable - d = dict(self) - return f'{self.__class__.__name__}({d!r})' - - # Multiset-style mathematical operations discussed in: - # Knuth TAOCP Volume II section 4.6.3 exercise 19 - # and at http://en.wikipedia.org/wiki/Multiset - # - # Outputs guaranteed to only include positive counts. - # - # To strip negative and zero counts, add-in an empty counter: - # c += Counter() - # - # Results are ordered according to when an element is first - # encountered in the left operand and then by the order - # encountered in the right operand. - # - # When the multiplicities are all zero or one, multiset operations - # are guaranteed to be equivalent to the corresponding operations - # for regular sets. - # Given counter multisets such as: - # cp = Counter(a=1, b=0, c=1) - # cq = Counter(c=1, d=0, e=1) - # The corresponding regular sets would be: - # sp = {'a', 'c'} - # sq = {'c', 'e'} - # All of the following relations would hold: - # set(cp + cq) == sp | sq - # set(cp - cq) == sp - sq - # set(cp | cq) == sp | sq - # set(cp & cq) == sp & sq - # (cp == cq) == (sp == sq) - # (cp != cq) == (sp != sq) - # (cp <= cq) == (sp <= sq) - # (cp < cq) == (sp < sq) - # (cp >= cq) == (sp >= sq) - # (cp > cq) == (sp > sq) - - def __eq__(self, other): - 'True if all counts agree. Missing counts are treated as zero.' - if not isinstance(other, Counter): - return NotImplemented - return all(self[e] == other[e] for c in (self, other) for e in c) - - def __ne__(self, other): - 'True if any counts disagree. Missing counts are treated as zero.' - if not isinstance(other, Counter): - return NotImplemented - return not self == other - - def __le__(self, other): - 'True if all counts in self are a subset of those in other.' - if not isinstance(other, Counter): - return NotImplemented - return all(self[e] <= other[e] for c in (self, other) for e in c) - - def __lt__(self, other): - 'True if all counts in self are a proper subset of those in other.' - if not isinstance(other, Counter): - return NotImplemented - return self <= other and self != other - - def __ge__(self, other): - 'True if all counts in self are a superset of those in other.' - if not isinstance(other, Counter): - return NotImplemented - return all(self[e] >= other[e] for c in (self, other) for e in c) - - def __gt__(self, other): - 'True if all counts in self are a proper superset of those in other.' - if not isinstance(other, Counter): - return NotImplemented - return self >= other and self != other - - def __add__(self, other): - '''Add counts from two counters. - - >>> Counter('abbb') + Counter('bcc') - Counter({'b': 4, 'c': 2, 'a': 1}) - - ''' - if not isinstance(other, Counter): - return NotImplemented - result = Counter() - for elem, count in self.items(): - newcount = count + other[elem] - if newcount > 0: - result[elem] = newcount - for elem, count in other.items(): - if elem not in self and count > 0: - result[elem] = count - return result - - def __sub__(self, other): - ''' Subtract count, but keep only results with positive counts. - - >>> Counter('abbbc') - Counter('bccd') - Counter({'b': 2, 'a': 1}) - - ''' - if not isinstance(other, Counter): - return NotImplemented - result = Counter() - for elem, count in self.items(): - newcount = count - other[elem] - if newcount > 0: - result[elem] = newcount - for elem, count in other.items(): - if elem not in self and count < 0: - result[elem] = 0 - count - return result - - def __or__(self, other): - '''Union is the maximum of value in either of the input counters. - - >>> Counter('abbb') | Counter('bcc') - Counter({'b': 3, 'c': 2, 'a': 1}) - - ''' - if not isinstance(other, Counter): - return NotImplemented - result = Counter() - for elem, count in self.items(): - other_count = other[elem] - newcount = other_count if count < other_count else count - if newcount > 0: - result[elem] = newcount - for elem, count in other.items(): - if elem not in self and count > 0: - result[elem] = count - return result - - def __and__(self, other): - ''' Intersection is the minimum of corresponding counts. - - >>> Counter('abbb') & Counter('bcc') - Counter({'b': 1}) - - ''' - if not isinstance(other, Counter): - return NotImplemented - result = Counter() - for elem, count in self.items(): - other_count = other[elem] - newcount = count if count < other_count else other_count - if newcount > 0: - result[elem] = newcount - return result - - def __pos__(self): - 'Adds an empty counter, effectively stripping negative and zero counts' - result = Counter() - for elem, count in self.items(): - if count > 0: - result[elem] = count - return result - - def __neg__(self): - '''Subtracts from an empty counter. Strips positive and zero counts, - and flips the sign on negative counts. - - ''' - result = Counter() - for elem, count in self.items(): - if count < 0: - result[elem] = 0 - count - return result - - def _keep_positive(self): - '''Internal method to strip elements with a negative or zero count''' - nonpositive = [elem for elem, count in self.items() if not count > 0] - for elem in nonpositive: - del self[elem] - return self - - def __iadd__(self, other): - '''Inplace add from another counter, keeping only positive counts. - - >>> c = Counter('abbb') - >>> c += Counter('bcc') - >>> c - Counter({'b': 4, 'c': 2, 'a': 1}) - - ''' - for elem, count in other.items(): - self[elem] += count - return self._keep_positive() - - def __isub__(self, other): - '''Inplace subtract counter, but keep only results with positive counts. - - >>> c = Counter('abbbc') - >>> c -= Counter('bccd') - >>> c - Counter({'b': 2, 'a': 1}) - - ''' - for elem, count in other.items(): - self[elem] -= count - return self._keep_positive() - - def __ior__(self, other): - '''Inplace union is the maximum of value from either counter. - - >>> c = Counter('abbb') - >>> c |= Counter('bcc') - >>> c - Counter({'b': 3, 'c': 2, 'a': 1}) - - ''' - for elem, other_count in other.items(): - count = self[elem] - if other_count > count: - self[elem] = other_count - return self._keep_positive() - - def __iand__(self, other): - '''Inplace intersection is the minimum of corresponding counts. - - >>> c = Counter('abbb') - >>> c &= Counter('bcc') - >>> c - Counter({'b': 1}) - - ''' - for elem, count in self.items(): - other_count = other[elem] - if other_count < count: - self[elem] = other_count - return self._keep_positive() - - -######################################################################## -### ChainMap -######################################################################## - -class ChainMap(_collections_abc.MutableMapping): - ''' A ChainMap groups multiple dicts (or other mappings) together - to create a single, updateable view. - - The underlying mappings are stored in a list. That list is public and can - be accessed or updated using the *maps* attribute. There is no other - state. - - Lookups search the underlying mappings successively until a key is found. - In contrast, writes, updates, and deletions only operate on the first - mapping. - - ''' - - def __init__(self, *maps): - '''Initialize a ChainMap by setting *maps* to the given mappings. - If no mappings are provided, a single empty dictionary is used. - - ''' - self.maps = list(maps) or [{}] # always at least one map - - def __missing__(self, key): - raise KeyError(key) - - def __getitem__(self, key): - for mapping in self.maps: - try: - return mapping[key] # can't use 'key in mapping' with defaultdict - except KeyError: - pass - return self.__missing__(key) # support subclasses that define __missing__ - - def get(self, key, default=None): - return self[key] if key in self else default - - def __len__(self): - return len(set().union(*self.maps)) # reuses stored hash values if possible - - def __iter__(self): - d = {} - for mapping in reversed(self.maps): - d.update(dict.fromkeys(mapping)) # reuses stored hash values if possible - return iter(d) - - def __contains__(self, key): - return any(key in m for m in self.maps) - - def __bool__(self): - return any(self.maps) - - @_recursive_repr() - def __repr__(self): - return f'{self.__class__.__name__}({", ".join(map(repr, self.maps))})' - - @classmethod - def fromkeys(cls, iterable, *args): - 'Create a ChainMap with a single dict created from the iterable.' - return cls(dict.fromkeys(iterable, *args)) - - def copy(self): - 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' - return self.__class__(self.maps[0].copy(), *self.maps[1:]) - - __copy__ = copy - - def new_child(self, m=None, **kwargs): # like Django's Context.push() - '''New ChainMap with a new map followed by all previous maps. - If no map is provided, an empty dict is used. - Keyword arguments update the map or new empty dict. - ''' - if m is None: - m = kwargs - elif kwargs: - m.update(kwargs) - return self.__class__(m, *self.maps) - - @property - def parents(self): # like Django's Context.pop() - 'New ChainMap from maps[1:].' - return self.__class__(*self.maps[1:]) - - def __setitem__(self, key, value): - self.maps[0][key] = value - - def __delitem__(self, key): - try: - del self.maps[0][key] - except KeyError: - raise KeyError(f'Key not found in the first mapping: {key!r}') - - def popitem(self): - 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' - try: - return self.maps[0].popitem() - except KeyError: - raise KeyError('No keys found in the first mapping.') - - def pop(self, key, *args): - 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' - try: - return self.maps[0].pop(key, *args) - except KeyError: - raise KeyError(f'Key not found in the first mapping: {key!r}') - - def clear(self): - 'Clear maps[0], leaving maps[1:] intact.' - self.maps[0].clear() - - def __ior__(self, other): - self.maps[0].update(other) - return self - - def __or__(self, other): - if not isinstance(other, _collections_abc.Mapping): - return NotImplemented - m = self.copy() - m.maps[0].update(other) - return m - - def __ror__(self, other): - if not isinstance(other, _collections_abc.Mapping): - return NotImplemented - m = dict(other) - for child in reversed(self.maps): - m.update(child) - return self.__class__(m) - - -################################################################################ -### UserDict -################################################################################ - -class UserDict(_collections_abc.MutableMapping): - - # Start by filling-out the abstract methods - def __init__(self, dict=None, /, **kwargs): - self.data = {} - if dict is not None: - self.update(dict) - if kwargs: - self.update(kwargs) - - def __len__(self): - return len(self.data) - - def __getitem__(self, key): - if key in self.data: - return self.data[key] - if hasattr(self.__class__, "__missing__"): - return self.__class__.__missing__(self, key) - raise KeyError(key) - - def __setitem__(self, key, item): - self.data[key] = item - - def __delitem__(self, key): - del self.data[key] - - def __iter__(self): - return iter(self.data) - - # Modify __contains__ to work correctly when __missing__ is present - def __contains__(self, key): - return key in self.data - - # Now, add the methods in dicts but not in MutableMapping - def __repr__(self): - return repr(self.data) - - def __or__(self, other): - if isinstance(other, UserDict): - return self.__class__(self.data | other.data) - if isinstance(other, dict): - return self.__class__(self.data | other) - return NotImplemented - - def __ror__(self, other): - if isinstance(other, UserDict): - return self.__class__(other.data | self.data) - if isinstance(other, dict): - return self.__class__(other | self.data) - return NotImplemented - - def __ior__(self, other): - if isinstance(other, UserDict): - self.data |= other.data - else: - self.data |= other - return self - - def __copy__(self): - inst = self.__class__.__new__(self.__class__) - inst.__dict__.update(self.__dict__) - # Create a copy and avoid triggering descriptors - inst.__dict__["data"] = self.__dict__["data"].copy() - return inst - - def copy(self): - if self.__class__ is UserDict: - return UserDict(self.data.copy()) - import copy - data = self.data - try: - self.data = {} - c = copy.copy(self) - finally: - self.data = data - c.update(self) - return c - - @classmethod - def fromkeys(cls, iterable, value=None): - d = cls() - for key in iterable: - d[key] = value - return d - - -################################################################################ -### UserList -################################################################################ - -class UserList(_collections_abc.MutableSequence): - """A more or less complete user-defined wrapper around list objects.""" - - def __init__(self, initlist=None): - self.data = [] - if initlist is not None: - # XXX should this accept an arbitrary sequence? - if type(initlist) == type(self.data): - self.data[:] = initlist - elif isinstance(initlist, UserList): - self.data[:] = initlist.data[:] - else: - self.data = list(initlist) - - def __repr__(self): - return repr(self.data) - - def __lt__(self, other): - return self.data < self.__cast(other) - - def __le__(self, other): - return self.data <= self.__cast(other) - - def __eq__(self, other): - return self.data == self.__cast(other) - - def __gt__(self, other): - return self.data > self.__cast(other) - - def __ge__(self, other): - return self.data >= self.__cast(other) - - def __cast(self, other): - return other.data if isinstance(other, UserList) else other - - def __contains__(self, item): - return item in self.data - - def __len__(self): - return len(self.data) - - def __getitem__(self, i): - if isinstance(i, slice): - return self.__class__(self.data[i]) - else: - return self.data[i] - - def __setitem__(self, i, item): - self.data[i] = item - - def __delitem__(self, i): - del self.data[i] - - def __add__(self, other): - if isinstance(other, UserList): - return self.__class__(self.data + other.data) - elif isinstance(other, type(self.data)): - return self.__class__(self.data + other) - return self.__class__(self.data + list(other)) - - def __radd__(self, other): - if isinstance(other, UserList): - return self.__class__(other.data + self.data) - elif isinstance(other, type(self.data)): - return self.__class__(other + self.data) - return self.__class__(list(other) + self.data) - - def __iadd__(self, other): - if isinstance(other, UserList): - self.data += other.data - elif isinstance(other, type(self.data)): - self.data += other - else: - self.data += list(other) - return self - - def __mul__(self, n): - return self.__class__(self.data * n) - - __rmul__ = __mul__ - - def __imul__(self, n): - self.data *= n - return self - - def __copy__(self): - inst = self.__class__.__new__(self.__class__) - inst.__dict__.update(self.__dict__) - # Create a copy and avoid triggering descriptors - inst.__dict__["data"] = self.__dict__["data"][:] - return inst - - def append(self, item): - self.data.append(item) - - def insert(self, i, item): - self.data.insert(i, item) - - def pop(self, i=-1): - return self.data.pop(i) - - def remove(self, item): - self.data.remove(item) - - def clear(self): - self.data.clear() - - def copy(self): - return self.__class__(self) - - def count(self, item): - return self.data.count(item) - - def index(self, item, *args): - return self.data.index(item, *args) - - def reverse(self): - self.data.reverse() - - def sort(self, /, *args, **kwds): - self.data.sort(*args, **kwds) - - def extend(self, other): - if isinstance(other, UserList): - self.data.extend(other.data) - else: - self.data.extend(other) - - -################################################################################ -### UserString -################################################################################ - -class UserString(_collections_abc.Sequence): - - def __init__(self, seq): - if isinstance(seq, str): - self.data = seq - elif isinstance(seq, UserString): - self.data = seq.data[:] - else: - self.data = str(seq) - - def __str__(self): - return str(self.data) - - def __repr__(self): - return repr(self.data) - - def __int__(self): - return int(self.data) - - def __float__(self): - return float(self.data) - - def __complex__(self): - return complex(self.data) - - def __hash__(self): - return hash(self.data) - - def __getnewargs__(self): - return (self.data[:],) - - def __eq__(self, string): - if isinstance(string, UserString): - return self.data == string.data - return self.data == string - - def __lt__(self, string): - if isinstance(string, UserString): - return self.data < string.data - return self.data < string - - def __le__(self, string): - if isinstance(string, UserString): - return self.data <= string.data - return self.data <= string - - def __gt__(self, string): - if isinstance(string, UserString): - return self.data > string.data - return self.data > string - - def __ge__(self, string): - if isinstance(string, UserString): - return self.data >= string.data - return self.data >= string - - def __contains__(self, char): - if isinstance(char, UserString): - char = char.data - return char in self.data - - def __len__(self): - return len(self.data) - - def __getitem__(self, index): - return self.__class__(self.data[index]) - - def __add__(self, other): - if isinstance(other, UserString): - return self.__class__(self.data + other.data) - elif isinstance(other, str): - return self.__class__(self.data + other) - return self.__class__(self.data + str(other)) - - def __radd__(self, other): - if isinstance(other, str): - return self.__class__(other + self.data) - return self.__class__(str(other) + self.data) - - def __mul__(self, n): - return self.__class__(self.data * n) - - __rmul__ = __mul__ - - def __mod__(self, args): - return self.__class__(self.data % args) - - def __rmod__(self, template): - return self.__class__(str(template) % self) - - # the following methods are defined in alphabetical order: - def capitalize(self): - return self.__class__(self.data.capitalize()) - - def casefold(self): - return self.__class__(self.data.casefold()) - - def center(self, width, *args): - return self.__class__(self.data.center(width, *args)) - - def count(self, sub, start=0, end=_sys.maxsize): - if isinstance(sub, UserString): - sub = sub.data - return self.data.count(sub, start, end) - - def removeprefix(self, prefix, /): - if isinstance(prefix, UserString): - prefix = prefix.data - return self.__class__(self.data.removeprefix(prefix)) - - def removesuffix(self, suffix, /): - if isinstance(suffix, UserString): - suffix = suffix.data - return self.__class__(self.data.removesuffix(suffix)) - - def encode(self, encoding='utf-8', errors='strict'): - encoding = 'utf-8' if encoding is None else encoding - errors = 'strict' if errors is None else errors - return self.data.encode(encoding, errors) - - def endswith(self, suffix, start=0, end=_sys.maxsize): - return self.data.endswith(suffix, start, end) - - def expandtabs(self, tabsize=8): - return self.__class__(self.data.expandtabs(tabsize)) - - def find(self, sub, start=0, end=_sys.maxsize): - if isinstance(sub, UserString): - sub = sub.data - return self.data.find(sub, start, end) - - def format(self, /, *args, **kwds): - return self.data.format(*args, **kwds) - - def format_map(self, mapping): - return self.data.format_map(mapping) - - def index(self, sub, start=0, end=_sys.maxsize): - return self.data.index(sub, start, end) - - def isalpha(self): - return self.data.isalpha() - - def isalnum(self): - return self.data.isalnum() - - def isascii(self): - return self.data.isascii() - - def isdecimal(self): - return self.data.isdecimal() - - def isdigit(self): - return self.data.isdigit() - - def isidentifier(self): - return self.data.isidentifier() - - def islower(self): - return self.data.islower() - - def isnumeric(self): - return self.data.isnumeric() - - def isprintable(self): - return self.data.isprintable() - - def isspace(self): - return self.data.isspace() - - def istitle(self): - return self.data.istitle() - - def isupper(self): - return self.data.isupper() - - def join(self, seq): - return self.data.join(seq) - - def ljust(self, width, *args): - return self.__class__(self.data.ljust(width, *args)) - - def lower(self): - return self.__class__(self.data.lower()) - - def lstrip(self, chars=None): - return self.__class__(self.data.lstrip(chars)) - - maketrans = str.maketrans - - def partition(self, sep): - return self.data.partition(sep) - - def replace(self, old, new, maxsplit=-1): - if isinstance(old, UserString): - old = old.data - if isinstance(new, UserString): - new = new.data - return self.__class__(self.data.replace(old, new, maxsplit)) - - def rfind(self, sub, start=0, end=_sys.maxsize): - if isinstance(sub, UserString): - sub = sub.data - return self.data.rfind(sub, start, end) - - def rindex(self, sub, start=0, end=_sys.maxsize): - return self.data.rindex(sub, start, end) - - def rjust(self, width, *args): - return self.__class__(self.data.rjust(width, *args)) - - def rpartition(self, sep): - return self.data.rpartition(sep) - - def rstrip(self, chars=None): - return self.__class__(self.data.rstrip(chars)) - - def split(self, sep=None, maxsplit=-1): - return self.data.split(sep, maxsplit) - - def rsplit(self, sep=None, maxsplit=-1): - return self.data.rsplit(sep, maxsplit) - - def splitlines(self, keepends=False): - return self.data.splitlines(keepends) - - def startswith(self, prefix, start=0, end=_sys.maxsize): - return self.data.startswith(prefix, start, end) - - def strip(self, chars=None): - return self.__class__(self.data.strip(chars)) - - def swapcase(self): - return self.__class__(self.data.swapcase()) - - def title(self): - return self.__class__(self.data.title()) - - def translate(self, *args): - return self.__class__(self.data.translate(*args)) - - def upper(self): - return self.__class__(self.data.upper()) - - def zfill(self, width): - return self.__class__(self.data.zfill(width)) diff --git a/controllers/guidance_ctrl/Lib/collections/_defaultdict.py b/controllers/guidance_ctrl/Lib/collections/_defaultdict.py deleted file mode 100644 index b9c6c496..00000000 --- a/controllers/guidance_ctrl/Lib/collections/_defaultdict.py +++ /dev/null @@ -1,58 +0,0 @@ -from reprlib import recursive_repr as _recursive_repr - -class defaultdict(dict): - def __init__(self, *args, **kwargs): - if len(args) >= 1: - default_factory = args[0] - if default_factory is not None and not callable(default_factory): - raise TypeError("first argument must be callable or None") - args = args[1:] - else: - default_factory = None - super().__init__(*args, **kwargs) - self.default_factory = default_factory - - def __missing__(self, key): - if self.default_factory is not None: - val = self.default_factory() - else: - raise KeyError(key) - self[key] = val - return val - - @_recursive_repr() - def __repr_factory(factory): - return repr(factory) - - def __repr__(self): - return f"{type(self).__name__}({defaultdict.__repr_factory(self.default_factory)}, {dict.__repr__(self)})" - - def copy(self): - return type(self)(self.default_factory, self) - - __copy__ = copy - - def __reduce__(self): - if self.default_factory is not None: - args = self.default_factory, - else: - args = () - return type(self), args, None, None, iter(self.items()) - - def __or__(self, other): - if not isinstance(other, dict): - return NotImplemented - - new = defaultdict(self.default_factory, self) - new.update(other) - return new - - def __ror__(self, other): - if not isinstance(other, dict): - return NotImplemented - - new = defaultdict(self.default_factory, other) - new.update(self) - return new - -defaultdict.__module__ = 'collections' diff --git a/controllers/guidance_ctrl/Lib/collections/abc.py b/controllers/guidance_ctrl/Lib/collections/abc.py deleted file mode 100644 index 86ca8b8a..00000000 --- a/controllers/guidance_ctrl/Lib/collections/abc.py +++ /dev/null @@ -1,3 +0,0 @@ -from _collections_abc import * -from _collections_abc import __all__ -from _collections_abc import _CallableGenericAlias diff --git a/controllers/guidance_ctrl/Lib/contextlib.py b/controllers/guidance_ctrl/Lib/contextlib.py deleted file mode 100644 index 58e9a498..00000000 --- a/controllers/guidance_ctrl/Lib/contextlib.py +++ /dev/null @@ -1,779 +0,0 @@ -"""Utilities for with-statement contexts. See PEP 343.""" -import abc -import os -import sys -import _collections_abc -from collections import deque -from functools import wraps -from types import MethodType, GenericAlias - -__all__ = ["asynccontextmanager", "contextmanager", "closing", "nullcontext", - "AbstractContextManager", "AbstractAsyncContextManager", - "AsyncExitStack", "ContextDecorator", "ExitStack", - "redirect_stdout", "redirect_stderr", "suppress", "aclosing", - "chdir"] - - -class AbstractContextManager(abc.ABC): - - """An abstract base class for context managers.""" - - __class_getitem__ = classmethod(GenericAlias) - - def __enter__(self): - """Return `self` upon entering the runtime context.""" - return self - - @abc.abstractmethod - def __exit__(self, exc_type, exc_value, traceback): - """Raise any exception triggered within the runtime context.""" - return None - - @classmethod - def __subclasshook__(cls, C): - if cls is AbstractContextManager: - return _collections_abc._check_methods(C, "__enter__", "__exit__") - return NotImplemented - - -class AbstractAsyncContextManager(abc.ABC): - - """An abstract base class for asynchronous context managers.""" - - __class_getitem__ = classmethod(GenericAlias) - - async def __aenter__(self): - """Return `self` upon entering the runtime context.""" - return self - - @abc.abstractmethod - async def __aexit__(self, exc_type, exc_value, traceback): - """Raise any exception triggered within the runtime context.""" - return None - - @classmethod - def __subclasshook__(cls, C): - if cls is AbstractAsyncContextManager: - return _collections_abc._check_methods(C, "__aenter__", - "__aexit__") - return NotImplemented - - -class ContextDecorator(object): - "A base class or mixin that enables context managers to work as decorators." - - def _recreate_cm(self): - """Return a recreated instance of self. - - Allows an otherwise one-shot context manager like - _GeneratorContextManager to support use as - a decorator via implicit recreation. - - This is a private interface just for _GeneratorContextManager. - See issue #11647 for details. - """ - return self - - def __call__(self, func): - @wraps(func) - def inner(*args, **kwds): - with self._recreate_cm(): - return func(*args, **kwds) - return inner - - -class AsyncContextDecorator(object): - "A base class or mixin that enables async context managers to work as decorators." - - def _recreate_cm(self): - """Return a recreated instance of self. - """ - return self - - def __call__(self, func): - @wraps(func) - async def inner(*args, **kwds): - async with self._recreate_cm(): - return await func(*args, **kwds) - return inner - - -class _GeneratorContextManagerBase: - """Shared functionality for @contextmanager and @asynccontextmanager.""" - - def __init__(self, func, args, kwds): - self.gen = func(*args, **kwds) - self.func, self.args, self.kwds = func, args, kwds - # Issue 19330: ensure context manager instances have good docstrings - doc = getattr(func, "__doc__", None) - if doc is None: - doc = type(self).__doc__ - self.__doc__ = doc - # Unfortunately, this still doesn't provide good help output when - # inspecting the created context manager instances, since pydoc - # currently bypasses the instance docstring and shows the docstring - # for the class instead. - # See http://bugs.python.org/issue19404 for more details. - - def _recreate_cm(self): - # _GCMB instances are one-shot context managers, so the - # CM must be recreated each time a decorated function is - # called - return self.__class__(self.func, self.args, self.kwds) - - -class _GeneratorContextManager( - _GeneratorContextManagerBase, - AbstractContextManager, - ContextDecorator, -): - """Helper for @contextmanager decorator.""" - - def __enter__(self): - # do not keep args and kwds alive unnecessarily - # they are only needed for recreation, which is not possible anymore - del self.args, self.kwds, self.func - try: - return next(self.gen) - except StopIteration: - raise RuntimeError("generator didn't yield") from None - - def __exit__(self, typ, value, traceback): - if typ is None: - try: - next(self.gen) - except StopIteration: - return False - else: - raise RuntimeError("generator didn't stop") - else: - if value is None: - # Need to force instantiation so we can reliably - # tell if we get the same exception back - value = typ() - try: - self.gen.throw(typ, value, traceback) - except StopIteration as exc: - # Suppress StopIteration *unless* it's the same exception that - # was passed to throw(). This prevents a StopIteration - # raised inside the "with" statement from being suppressed. - return exc is not value - except RuntimeError as exc: - # Don't re-raise the passed in exception. (issue27122) - if exc is value: - exc.__traceback__ = traceback - return False - # Avoid suppressing if a StopIteration exception - # was passed to throw() and later wrapped into a RuntimeError - # (see PEP 479 for sync generators; async generators also - # have this behavior). But do this only if the exception wrapped - # by the RuntimeError is actually Stop(Async)Iteration (see - # issue29692). - if ( - isinstance(value, StopIteration) - and exc.__cause__ is value - ): - value.__traceback__ = traceback - return False - raise - except BaseException as exc: - # only re-raise if it's *not* the exception that was - # passed to throw(), because __exit__() must not raise - # an exception unless __exit__() itself failed. But throw() - # has to raise the exception to signal propagation, so this - # fixes the impedance mismatch between the throw() protocol - # and the __exit__() protocol. - if exc is not value: - raise - exc.__traceback__ = traceback - return False - raise RuntimeError("generator didn't stop after throw()") - -class _AsyncGeneratorContextManager( - _GeneratorContextManagerBase, - AbstractAsyncContextManager, - AsyncContextDecorator, -): - """Helper for @asynccontextmanager decorator.""" - - async def __aenter__(self): - # do not keep args and kwds alive unnecessarily - # they are only needed for recreation, which is not possible anymore - del self.args, self.kwds, self.func - try: - return await anext(self.gen) - except StopAsyncIteration: - raise RuntimeError("generator didn't yield") from None - - async def __aexit__(self, typ, value, traceback): - if typ is None: - try: - await anext(self.gen) - except StopAsyncIteration: - return False - else: - raise RuntimeError("generator didn't stop") - else: - if value is None: - # Need to force instantiation so we can reliably - # tell if we get the same exception back - value = typ() - try: - await self.gen.athrow(typ, value, traceback) - except StopAsyncIteration as exc: - # Suppress StopIteration *unless* it's the same exception that - # was passed to throw(). This prevents a StopIteration - # raised inside the "with" statement from being suppressed. - return exc is not value - except RuntimeError as exc: - # Don't re-raise the passed in exception. (issue27122) - if exc is value: - exc.__traceback__ = traceback - return False - # Avoid suppressing if a Stop(Async)Iteration exception - # was passed to athrow() and later wrapped into a RuntimeError - # (see PEP 479 for sync generators; async generators also - # have this behavior). But do this only if the exception wrapped - # by the RuntimeError is actually Stop(Async)Iteration (see - # issue29692). - if ( - isinstance(value, (StopIteration, StopAsyncIteration)) - and exc.__cause__ is value - ): - value.__traceback__ = traceback - return False - raise - except BaseException as exc: - # only re-raise if it's *not* the exception that was - # passed to throw(), because __exit__() must not raise - # an exception unless __exit__() itself failed. But throw() - # has to raise the exception to signal propagation, so this - # fixes the impedance mismatch between the throw() protocol - # and the __exit__() protocol. - if exc is not value: - raise - exc.__traceback__ = traceback - return False - raise RuntimeError("generator didn't stop after athrow()") - - -def contextmanager(func): - """@contextmanager decorator. - - Typical usage: - - @contextmanager - def some_generator(): - - try: - yield - finally: - - - This makes this: - - with some_generator() as : - - - equivalent to this: - - - try: - = - - finally: - - """ - @wraps(func) - def helper(*args, **kwds): - return _GeneratorContextManager(func, args, kwds) - return helper - - -def asynccontextmanager(func): - """@asynccontextmanager decorator. - - Typical usage: - - @asynccontextmanager - async def some_async_generator(): - - try: - yield - finally: - - - This makes this: - - async with some_async_generator() as : - - - equivalent to this: - - - try: - = - - finally: - - """ - @wraps(func) - def helper(*args, **kwds): - return _AsyncGeneratorContextManager(func, args, kwds) - return helper - - -class closing(AbstractContextManager): - """Context to automatically close something at the end of a block. - - Code like this: - - with closing(.open()) as f: - - - is equivalent to this: - - f = .open() - try: - - finally: - f.close() - - """ - def __init__(self, thing): - self.thing = thing - def __enter__(self): - return self.thing - def __exit__(self, *exc_info): - self.thing.close() - - -class aclosing(AbstractAsyncContextManager): - """Async context manager for safely finalizing an asynchronously cleaned-up - resource such as an async generator, calling its ``aclose()`` method. - - Code like this: - - async with aclosing(.fetch()) as agen: - - - is equivalent to this: - - agen = .fetch() - try: - - finally: - await agen.aclose() - - """ - def __init__(self, thing): - self.thing = thing - async def __aenter__(self): - return self.thing - async def __aexit__(self, *exc_info): - await self.thing.aclose() - - -class _RedirectStream(AbstractContextManager): - - _stream = None - - def __init__(self, new_target): - self._new_target = new_target - # We use a list of old targets to make this CM re-entrant - self._old_targets = [] - - def __enter__(self): - self._old_targets.append(getattr(sys, self._stream)) - setattr(sys, self._stream, self._new_target) - return self._new_target - - def __exit__(self, exctype, excinst, exctb): - setattr(sys, self._stream, self._old_targets.pop()) - - -class redirect_stdout(_RedirectStream): - """Context manager for temporarily redirecting stdout to another file. - - # How to send help() to stderr - with redirect_stdout(sys.stderr): - help(dir) - - # How to write help() to a file - with open('help.txt', 'w') as f: - with redirect_stdout(f): - help(pow) - """ - - _stream = "stdout" - - -class redirect_stderr(_RedirectStream): - """Context manager for temporarily redirecting stderr to another file.""" - - _stream = "stderr" - - -class suppress(AbstractContextManager): - """Context manager to suppress specified exceptions - - After the exception is suppressed, execution proceeds with the next - statement following the with statement. - - with suppress(FileNotFoundError): - os.remove(somefile) - # Execution still resumes here if the file was already removed - """ - - def __init__(self, *exceptions): - self._exceptions = exceptions - - def __enter__(self): - pass - - def __exit__(self, exctype, excinst, exctb): - # Unlike isinstance and issubclass, CPython exception handling - # currently only looks at the concrete type hierarchy (ignoring - # the instance and subclass checking hooks). While Guido considers - # that a bug rather than a feature, it's a fairly hard one to fix - # due to various internal implementation details. suppress provides - # the simpler issubclass based semantics, rather than trying to - # exactly reproduce the limitations of the CPython interpreter. - # - # See http://bugs.python.org/issue12029 for more details - return exctype is not None and issubclass(exctype, self._exceptions) - - -class _BaseExitStack: - """A base class for ExitStack and AsyncExitStack.""" - - @staticmethod - def _create_exit_wrapper(cm, cm_exit): - return MethodType(cm_exit, cm) - - @staticmethod - def _create_cb_wrapper(callback, /, *args, **kwds): - def _exit_wrapper(exc_type, exc, tb): - callback(*args, **kwds) - return _exit_wrapper - - def __init__(self): - self._exit_callbacks = deque() - - def pop_all(self): - """Preserve the context stack by transferring it to a new instance.""" - new_stack = type(self)() - new_stack._exit_callbacks = self._exit_callbacks - self._exit_callbacks = deque() - return new_stack - - def push(self, exit): - """Registers a callback with the standard __exit__ method signature. - - Can suppress exceptions the same way __exit__ method can. - Also accepts any object with an __exit__ method (registering a call - to the method instead of the object itself). - """ - # We use an unbound method rather than a bound method to follow - # the standard lookup behaviour for special methods. - _cb_type = type(exit) - - try: - exit_method = _cb_type.__exit__ - except AttributeError: - # Not a context manager, so assume it's a callable. - self._push_exit_callback(exit) - else: - self._push_cm_exit(exit, exit_method) - return exit # Allow use as a decorator. - - def enter_context(self, cm): - """Enters the supplied context manager. - - If successful, also pushes its __exit__ method as a callback and - returns the result of the __enter__ method. - """ - # We look up the special methods on the type to match the with - # statement. - cls = type(cm) - try: - _enter = cls.__enter__ - _exit = cls.__exit__ - except AttributeError: - raise TypeError(f"'{cls.__module__}.{cls.__qualname__}' object does " - f"not support the context manager protocol") from None - result = _enter(cm) - self._push_cm_exit(cm, _exit) - return result - - def callback(self, callback, /, *args, **kwds): - """Registers an arbitrary callback and arguments. - - Cannot suppress exceptions. - """ - _exit_wrapper = self._create_cb_wrapper(callback, *args, **kwds) - - # We changed the signature, so using @wraps is not appropriate, but - # setting __wrapped__ may still help with introspection. - _exit_wrapper.__wrapped__ = callback - self._push_exit_callback(_exit_wrapper) - return callback # Allow use as a decorator - - def _push_cm_exit(self, cm, cm_exit): - """Helper to correctly register callbacks to __exit__ methods.""" - _exit_wrapper = self._create_exit_wrapper(cm, cm_exit) - self._push_exit_callback(_exit_wrapper, True) - - def _push_exit_callback(self, callback, is_sync=True): - self._exit_callbacks.append((is_sync, callback)) - - -# Inspired by discussions on http://bugs.python.org/issue13585 -class ExitStack(_BaseExitStack, AbstractContextManager): - """Context manager for dynamic management of a stack of exit callbacks. - - For example: - with ExitStack() as stack: - files = [stack.enter_context(open(fname)) for fname in filenames] - # All opened files will automatically be closed at the end of - # the with statement, even if attempts to open files later - # in the list raise an exception. - """ - - def __enter__(self): - return self - - def __exit__(self, *exc_details): - received_exc = exc_details[0] is not None - - # We manipulate the exception state so it behaves as though - # we were actually nesting multiple with statements - frame_exc = sys.exc_info()[1] - def _fix_exception_context(new_exc, old_exc): - # Context may not be correct, so find the end of the chain - while 1: - exc_context = new_exc.__context__ - if exc_context is None or exc_context is old_exc: - # Context is already set correctly (see issue 20317) - return - if exc_context is frame_exc: - break - new_exc = exc_context - # Change the end of the chain to point to the exception - # we expect it to reference - new_exc.__context__ = old_exc - - # Callbacks are invoked in LIFO order to match the behaviour of - # nested context managers - suppressed_exc = False - pending_raise = False - while self._exit_callbacks: - is_sync, cb = self._exit_callbacks.pop() - assert is_sync - try: - if cb(*exc_details): - suppressed_exc = True - pending_raise = False - exc_details = (None, None, None) - except: - new_exc_details = sys.exc_info() - # simulate the stack of exceptions by setting the context - _fix_exception_context(new_exc_details[1], exc_details[1]) - pending_raise = True - exc_details = new_exc_details - if pending_raise: - try: - # bare "raise exc_details[1]" replaces our carefully - # set-up context - fixed_ctx = exc_details[1].__context__ - raise exc_details[1] - except BaseException: - exc_details[1].__context__ = fixed_ctx - raise - return received_exc and suppressed_exc - - def close(self): - """Immediately unwind the context stack.""" - self.__exit__(None, None, None) - - -# Inspired by discussions on https://bugs.python.org/issue29302 -class AsyncExitStack(_BaseExitStack, AbstractAsyncContextManager): - """Async context manager for dynamic management of a stack of exit - callbacks. - - For example: - async with AsyncExitStack() as stack: - connections = [await stack.enter_async_context(get_connection()) - for i in range(5)] - # All opened connections will automatically be released at the - # end of the async with statement, even if attempts to open a - # connection later in the list raise an exception. - """ - - @staticmethod - def _create_async_exit_wrapper(cm, cm_exit): - return MethodType(cm_exit, cm) - - @staticmethod - def _create_async_cb_wrapper(callback, /, *args, **kwds): - async def _exit_wrapper(exc_type, exc, tb): - await callback(*args, **kwds) - return _exit_wrapper - - async def enter_async_context(self, cm): - """Enters the supplied async context manager. - - If successful, also pushes its __aexit__ method as a callback and - returns the result of the __aenter__ method. - """ - cls = type(cm) - try: - _enter = cls.__aenter__ - _exit = cls.__aexit__ - except AttributeError: - raise TypeError(f"'{cls.__module__}.{cls.__qualname__}' object does " - f"not support the asynchronous context manager protocol" - ) from None - result = await _enter(cm) - self._push_async_cm_exit(cm, _exit) - return result - - def push_async_exit(self, exit): - """Registers a coroutine function with the standard __aexit__ method - signature. - - Can suppress exceptions the same way __aexit__ method can. - Also accepts any object with an __aexit__ method (registering a call - to the method instead of the object itself). - """ - _cb_type = type(exit) - try: - exit_method = _cb_type.__aexit__ - except AttributeError: - # Not an async context manager, so assume it's a coroutine function - self._push_exit_callback(exit, False) - else: - self._push_async_cm_exit(exit, exit_method) - return exit # Allow use as a decorator - - def push_async_callback(self, callback, /, *args, **kwds): - """Registers an arbitrary coroutine function and arguments. - - Cannot suppress exceptions. - """ - _exit_wrapper = self._create_async_cb_wrapper(callback, *args, **kwds) - - # We changed the signature, so using @wraps is not appropriate, but - # setting __wrapped__ may still help with introspection. - _exit_wrapper.__wrapped__ = callback - self._push_exit_callback(_exit_wrapper, False) - return callback # Allow use as a decorator - - async def aclose(self): - """Immediately unwind the context stack.""" - await self.__aexit__(None, None, None) - - def _push_async_cm_exit(self, cm, cm_exit): - """Helper to correctly register coroutine function to __aexit__ - method.""" - _exit_wrapper = self._create_async_exit_wrapper(cm, cm_exit) - self._push_exit_callback(_exit_wrapper, False) - - async def __aenter__(self): - return self - - async def __aexit__(self, *exc_details): - received_exc = exc_details[0] is not None - - # We manipulate the exception state so it behaves as though - # we were actually nesting multiple with statements - frame_exc = sys.exc_info()[1] - def _fix_exception_context(new_exc, old_exc): - # Context may not be correct, so find the end of the chain - while 1: - exc_context = new_exc.__context__ - if exc_context is None or exc_context is old_exc: - # Context is already set correctly (see issue 20317) - return - if exc_context is frame_exc: - break - new_exc = exc_context - # Change the end of the chain to point to the exception - # we expect it to reference - new_exc.__context__ = old_exc - - # Callbacks are invoked in LIFO order to match the behaviour of - # nested context managers - suppressed_exc = False - pending_raise = False - while self._exit_callbacks: - is_sync, cb = self._exit_callbacks.pop() - try: - if is_sync: - cb_suppress = cb(*exc_details) - else: - cb_suppress = await cb(*exc_details) - - if cb_suppress: - suppressed_exc = True - pending_raise = False - exc_details = (None, None, None) - except: - new_exc_details = sys.exc_info() - # simulate the stack of exceptions by setting the context - _fix_exception_context(new_exc_details[1], exc_details[1]) - pending_raise = True - exc_details = new_exc_details - if pending_raise: - try: - # bare "raise exc_details[1]" replaces our carefully - # set-up context - fixed_ctx = exc_details[1].__context__ - raise exc_details[1] - except BaseException: - exc_details[1].__context__ = fixed_ctx - raise - return received_exc and suppressed_exc - - -class nullcontext(AbstractContextManager, AbstractAsyncContextManager): - """Context manager that does no additional processing. - - Used as a stand-in for a normal context manager, when a particular - block of code is only sometimes used with a normal context manager: - - cm = optional_cm if condition else nullcontext() - with cm: - # Perform operation, using optional_cm if condition is True - """ - - def __init__(self, enter_result=None): - self.enter_result = enter_result - - def __enter__(self): - return self.enter_result - - def __exit__(self, *excinfo): - pass - - async def __aenter__(self): - return self.enter_result - - async def __aexit__(self, *excinfo): - pass - - -class chdir(AbstractContextManager): - """Non thread-safe context manager to change the current working directory.""" - - def __init__(self, path): - self.path = path - self._old_cwd = [] - - def __enter__(self): - self._old_cwd.append(os.getcwd()) - os.chdir(self.path) - - def __exit__(self, *excinfo): - os.chdir(self._old_cwd.pop()) diff --git a/controllers/guidance_ctrl/Lib/copy.sh b/controllers/guidance_ctrl/Lib/copy.sh deleted file mode 100755 index 0237b638..00000000 --- a/controllers/guidance_ctrl/Lib/copy.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/sh - -for f in `find -name \*.py | xargs cat | grep -E '^(import |from \S+ import)' | awk '{print $2}' | sort | uniq` ; do -test -f $f.py && continue -test -f $f/__init__.py && continue -if test -f ../../RustPython/pylib/Lib/$f.py ; then - echo cp ../../RustPython/pylib/Lib/$f.py $f.py - continue -fi -echo "? $f" -done diff --git a/controllers/guidance_ctrl/Lib/copyreg.py b/controllers/guidance_ctrl/Lib/copyreg.py deleted file mode 100644 index dfc463c4..00000000 --- a/controllers/guidance_ctrl/Lib/copyreg.py +++ /dev/null @@ -1,209 +0,0 @@ -"""Helper to provide extensibility for pickle. - -This is only useful to add pickle support for extension types defined in -C, not for instances of user-defined classes. -""" - -__all__ = ["pickle", "constructor", - "add_extension", "remove_extension", "clear_extension_cache"] - -dispatch_table = {} - -def pickle(ob_type, pickle_function, constructor_ob=None): - if not callable(pickle_function): - raise TypeError("reduction functions must be callable") - dispatch_table[ob_type] = pickle_function - - # The constructor_ob function is a vestige of safe for unpickling. - # There is no reason for the caller to pass it anymore. - if constructor_ob is not None: - constructor(constructor_ob) - -def constructor(object): - if not callable(object): - raise TypeError("constructors must be callable") - -# Example: provide pickling support for complex numbers. - -try: - complex -except NameError: - pass -else: - - def pickle_complex(c): - return complex, (c.real, c.imag) - - pickle(complex, pickle_complex, complex) - -# Support for pickling new-style objects - -def _reconstructor(cls, base, state): - if base is object: - obj = object.__new__(cls) - else: - obj = base.__new__(cls, state) - if base.__init__ != object.__init__: - base.__init__(obj, state) - return obj - -_HEAPTYPE = 1<<9 - -# Python code for object.__reduce_ex__ for protocols 0 and 1 - -def _reduce_ex(self, proto): - assert proto < 2 - cls = self.__class__ - for base in cls.__mro__: - if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE: - break - else: - base = object # not really reachable - if base is object: - state = None - else: - if base is cls: - raise TypeError(f"cannot pickle {cls.__name__!r} object") - state = base(self) - args = (cls, base, state) - try: - getstate = self.__getstate__ - except AttributeError: - if getattr(self, "__slots__", None): - raise TypeError(f"cannot pickle {cls.__name__!r} object: " - f"a class that defines __slots__ without " - f"defining __getstate__ cannot be pickled " - f"with protocol {proto}") from None - try: - dict = self.__dict__ - except AttributeError: - dict = None - else: - dict = getstate() - if dict: - return _reconstructor, args, dict - else: - return _reconstructor, args - -# Helper for __reduce_ex__ protocol 2 - -def __newobj__(cls, *args): - return cls.__new__(cls, *args) - -def __newobj_ex__(cls, args, kwargs): - """Used by pickle protocol 4, instead of __newobj__ to allow classes with - keyword-only arguments to be pickled correctly. - """ - return cls.__new__(cls, *args, **kwargs) - -def _slotnames(cls): - """Return a list of slot names for a given class. - - This needs to find slots defined by the class and its bases, so we - can't simply return the __slots__ attribute. We must walk down - the Method Resolution Order and concatenate the __slots__ of each - class found there. (This assumes classes don't modify their - __slots__ attribute to misrepresent their slots after the class is - defined.) - """ - - # Get the value from a cache in the class if possible - names = cls.__dict__.get("__slotnames__") - if names is not None: - return names - - # Not cached -- calculate the value - names = [] - if not hasattr(cls, "__slots__"): - # This class has no slots - pass - else: - # Slots found -- gather slot names from all base classes - for c in cls.__mro__: - if "__slots__" in c.__dict__: - slots = c.__dict__['__slots__'] - # if class has a single slot, it can be given as a string - if isinstance(slots, str): - slots = (slots,) - for name in slots: - # special descriptors - if name in ("__dict__", "__weakref__"): - continue - # mangled names - elif name.startswith('__') and not name.endswith('__'): - stripped = c.__name__.lstrip('_') - if stripped: - names.append('_%s%s' % (stripped, name)) - else: - names.append(name) - else: - names.append(name) - - # Cache the outcome in the class if at all possible - try: - cls.__slotnames__ = names - except: - pass # But don't die if we can't - - return names - -# A registry of extension codes. This is an ad-hoc compression -# mechanism. Whenever a global reference to , is about -# to be pickled, the (, ) tuple is looked up here to see -# if it is a registered extension code for it. Extension codes are -# universal, so that the meaning of a pickle does not depend on -# context. (There are also some codes reserved for local use that -# don't have this restriction.) Codes are positive ints; 0 is -# reserved. - -_extension_registry = {} # key -> code -_inverted_registry = {} # code -> key -_extension_cache = {} # code -> object -# Don't ever rebind those names: pickling grabs a reference to them when -# it's initialized, and won't see a rebinding. - -def add_extension(module, name, code): - """Register an extension code.""" - code = int(code) - if not 1 <= code <= 0x7fffffff: - raise ValueError("code out of range") - key = (module, name) - if (_extension_registry.get(key) == code and - _inverted_registry.get(code) == key): - return # Redundant registrations are benign - if key in _extension_registry: - raise ValueError("key %s is already registered with code %s" % - (key, _extension_registry[key])) - if code in _inverted_registry: - raise ValueError("code %s is already in use for key %s" % - (code, _inverted_registry[code])) - _extension_registry[key] = code - _inverted_registry[code] = key - -def remove_extension(module, name, code): - """Unregister an extension code. For testing only.""" - key = (module, name) - if (_extension_registry.get(key) != code or - _inverted_registry.get(code) != key): - raise ValueError("key %s is not registered with code %s" % - (key, code)) - del _extension_registry[key] - del _inverted_registry[code] - if code in _extension_cache: - del _extension_cache[code] - -def clear_extension_cache(): - _extension_cache.clear() - -# Standard extension code assignments - -# Reserved ranges - -# First Last Count Purpose -# 1 127 127 Reserved for Python standard library -# 128 191 64 Reserved for Zope -# 192 239 48 Reserved for 3rd parties -# 240 255 16 Reserved for private use (will never be assigned) -# 256 Inf Inf Reserved for future assignment - -# Extension codes are assigned by the Python Software Foundation. diff --git a/controllers/guidance_ctrl/Lib/enum.py b/controllers/guidance_ctrl/Lib/enum.py deleted file mode 100644 index f8803826..00000000 --- a/controllers/guidance_ctrl/Lib/enum.py +++ /dev/null @@ -1,2081 +0,0 @@ -import sys -import builtins as bltns -from types import MappingProxyType, DynamicClassAttribute -from operator import or_ as _or_ -from functools import reduce - - -__all__ = [ - 'EnumType', 'EnumMeta', - 'Enum', 'IntEnum', 'StrEnum', 'Flag', 'IntFlag', 'ReprEnum', - 'auto', 'unique', 'property', 'verify', 'member', 'nonmember', - 'FlagBoundary', 'STRICT', 'CONFORM', 'EJECT', 'KEEP', - 'global_flag_repr', 'global_enum_repr', 'global_str', 'global_enum', - 'EnumCheck', 'CONTINUOUS', 'NAMED_FLAGS', 'UNIQUE', - 'pickle_by_global_name', 'pickle_by_enum_name', - ] - - -# Dummy value for Enum and Flag as there are explicit checks for them -# before they have been created. -# This is also why there are checks in EnumType like `if Enum is not None` -Enum = Flag = EJECT = _stdlib_enums = ReprEnum = None - -class nonmember(object): - """ - Protects item from becoming an Enum member during class creation. - """ - def __init__(self, value): - self.value = value - -class member(object): - """ - Forces item to become an Enum member during class creation. - """ - def __init__(self, value): - self.value = value - -def _is_descriptor(obj): - """ - Returns True if obj is a descriptor, False otherwise. - """ - return ( - hasattr(obj, '__get__') or - hasattr(obj, '__set__') or - hasattr(obj, '__delete__') - ) - -def _is_dunder(name): - """ - Returns True if a __dunder__ name, False otherwise. - """ - return ( - len(name) > 4 and - name[:2] == name[-2:] == '__' and - name[2] != '_' and - name[-3] != '_' - ) - -def _is_sunder(name): - """ - Returns True if a _sunder_ name, False otherwise. - """ - return ( - len(name) > 2 and - name[0] == name[-1] == '_' and - name[1:2] != '_' and - name[-2:-1] != '_' - ) - -def _is_internal_class(cls_name, obj): - # do not use `re` as `re` imports `enum` - if not isinstance(obj, type): - return False - qualname = getattr(obj, '__qualname__', '') - s_pattern = cls_name + '.' + getattr(obj, '__name__', '') - e_pattern = '.' + s_pattern - return qualname == s_pattern or qualname.endswith(e_pattern) - -def _is_private(cls_name, name): - # do not use `re` as `re` imports `enum` - pattern = '_%s__' % (cls_name, ) - pat_len = len(pattern) - if ( - len(name) > pat_len - and name.startswith(pattern) - and name[pat_len:pat_len+1] != ['_'] - and (name[-1] != '_' or name[-2] != '_') - ): - return True - else: - return False - -def _is_single_bit(num): - """ - True if only one bit set in num (should be an int) - """ - if num == 0: - return False - num &= num - 1 - return num == 0 - -def _make_class_unpicklable(obj): - """ - Make the given obj un-picklable. - - obj should be either a dictionary, or an Enum - """ - def _break_on_call_reduce(self, proto): - raise TypeError('%r cannot be pickled' % self) - if isinstance(obj, dict): - obj['__reduce_ex__'] = _break_on_call_reduce - obj['__module__'] = '' - else: - setattr(obj, '__reduce_ex__', _break_on_call_reduce) - setattr(obj, '__module__', '') - -def _iter_bits_lsb(num): - # num must be a positive integer - original = num - if isinstance(num, Enum): - num = num.value - if num < 0: - raise ValueError('%r is not a positive integer' % original) - while num: - b = num & (~num + 1) - yield b - num ^= b - -def show_flag_values(value): - return list(_iter_bits_lsb(value)) - -def bin(num, max_bits=None): - """ - Like built-in bin(), except negative values are represented in - twos-compliment, and the leading bit always indicates sign - (0=positive, 1=negative). - - >>> bin(10) - '0b0 1010' - >>> bin(~10) # ~10 is -11 - '0b1 0101' - """ - - ceiling = 2 ** (num).bit_length() - if num >= 0: - s = bltns.bin(num + ceiling).replace('1', '0', 1) - else: - s = bltns.bin(~num ^ (ceiling - 1) + ceiling) - sign = s[:3] - digits = s[3:] - if max_bits is not None: - if len(digits) < max_bits: - digits = (sign[-1] * max_bits + digits)[-max_bits:] - return "%s %s" % (sign, digits) - -def _dedent(text): - """ - Like textwrap.dedent. Rewritten because we cannot import textwrap. - """ - lines = text.split('\n') - blanks = 0 - for i, ch in enumerate(lines[0]): - if ch != ' ': - break - for j, l in enumerate(lines): - lines[j] = l[i:] - return '\n'.join(lines) - -class _auto_null: - def __repr__(self): - return '_auto_null' -_auto_null = _auto_null() - -class auto: - """ - Instances are replaced with an appropriate value in Enum class suites. - """ - def __init__(self, value=_auto_null): - self.value = value - - def __repr__(self): - return "auto(%r)" % self.value - -class property(DynamicClassAttribute): - """ - This is a descriptor, used to define attributes that act differently - when accessed through an enum member and through an enum class. - Instance access is the same as property(), but access to an attribute - through the enum class will instead look in the class' _member_map_ for - a corresponding enum member. - """ - - member = None - _attr_type = None - _cls_type = None - - def __get__(self, instance, ownerclass=None): - if instance is None: - if self.member is not None: - return self.member - else: - raise AttributeError( - '%r has no attribute %r' % (ownerclass, self.name) - ) - if self.fget is not None: - # use previous enum.property - return self.fget(instance) - elif self._attr_type == 'attr': - # look up previous attribute - return getattr(self._cls_type, self.name) - elif self._attr_type == 'desc': - # use previous descriptor - return getattr(instance._value_, self.name) - # look for a member by this name. - try: - return ownerclass._member_map_[self.name] - except KeyError: - raise AttributeError( - '%r has no attribute %r' % (ownerclass, self.name) - ) from None - - def __set__(self, instance, value): - if self.fset is not None: - return self.fset(instance, value) - raise AttributeError( - " cannot set attribute %r" % (self.clsname, self.name) - ) - - def __delete__(self, instance): - if self.fdel is not None: - return self.fdel(instance) - raise AttributeError( - " cannot delete attribute %r" % (self.clsname, self.name) - ) - - def __set_name__(self, ownerclass, name): - self.name = name - self.clsname = ownerclass.__name__ - - -class _proto_member: - """ - intermediate step for enum members between class execution and final creation - """ - - def __init__(self, value): - self.value = value - - def __set_name__(self, enum_class, member_name): - """ - convert each quasi-member into an instance of the new enum class - """ - # first step: remove ourself from enum_class - delattr(enum_class, member_name) - # second step: create member based on enum_class - value = self.value - if not isinstance(value, tuple): - args = (value, ) - else: - args = value - if enum_class._member_type_ is tuple: # special case for tuple enums - args = (args, ) # wrap it one more time - if not enum_class._use_args_: - enum_member = enum_class._new_member_(enum_class) - else: - enum_member = enum_class._new_member_(enum_class, *args) - if not hasattr(enum_member, '_value_'): - if enum_class._member_type_ is object: - enum_member._value_ = value - else: - try: - enum_member._value_ = enum_class._member_type_(*args) - except Exception as exc: - new_exc = TypeError( - '_value_ not set in __new__, unable to create it' - ) - new_exc.__cause__ = exc - raise new_exc - value = enum_member._value_ - enum_member._name_ = member_name - enum_member.__objclass__ = enum_class - enum_member.__init__(*args) - enum_member._sort_order_ = len(enum_class._member_names_) - - if Flag is not None and issubclass(enum_class, Flag): - enum_class._flag_mask_ |= value - if _is_single_bit(value): - enum_class._singles_mask_ |= value - enum_class._all_bits_ = 2 ** ((enum_class._flag_mask_).bit_length()) - 1 - - # If another member with the same value was already defined, the - # new member becomes an alias to the existing one. - try: - try: - # try to do a fast lookup to avoid the quadratic loop - enum_member = enum_class._value2member_map_[value] - except TypeError: - for name, canonical_member in enum_class._member_map_.items(): - if canonical_member._value_ == value: - enum_member = canonical_member - break - else: - raise KeyError - except KeyError: - # this could still be an alias if the value is multi-bit and the - # class is a flag class - if ( - Flag is None - or not issubclass(enum_class, Flag) - ): - # no other instances found, record this member in _member_names_ - enum_class._member_names_.append(member_name) - elif ( - Flag is not None - and issubclass(enum_class, Flag) - and _is_single_bit(value) - ): - # no other instances found, record this member in _member_names_ - enum_class._member_names_.append(member_name) - # if necessary, get redirect in place and then add it to _member_map_ - found_descriptor = None - descriptor_type = None - class_type = None - for base in enum_class.__mro__[1:]: - attr = base.__dict__.get(member_name) - if attr is not None: - if isinstance(attr, (property, DynamicClassAttribute)): - found_descriptor = attr - class_type = base - descriptor_type = 'enum' - break - elif _is_descriptor(attr): - found_descriptor = attr - descriptor_type = descriptor_type or 'desc' - class_type = class_type or base - continue - else: - descriptor_type = 'attr' - class_type = base - if found_descriptor: - redirect = property() - redirect.member = enum_member - redirect.__set_name__(enum_class, member_name) - if descriptor_type in ('enum','desc'): - # earlier descriptor found; copy fget, fset, fdel to this one. - redirect.fget = getattr(found_descriptor, 'fget', None) - redirect._get = getattr(found_descriptor, '__get__', None) - redirect.fset = getattr(found_descriptor, 'fset', None) - redirect._set = getattr(found_descriptor, '__set__', None) - redirect.fdel = getattr(found_descriptor, 'fdel', None) - redirect._del = getattr(found_descriptor, '__delete__', None) - redirect._attr_type = descriptor_type - redirect._cls_type = class_type - setattr(enum_class, member_name, redirect) - else: - setattr(enum_class, member_name, enum_member) - # now add to _member_map_ (even aliases) - enum_class._member_map_[member_name] = enum_member - try: - # This may fail if value is not hashable. We can't add the value - # to the map, and by-value lookups for this value will be - # linear. - enum_class._value2member_map_.setdefault(value, enum_member) - except TypeError: - # keep track of the value in a list so containment checks are quick - enum_class._unhashable_values_.append(value) - - -class _EnumDict(dict): - """ - Track enum member order and ensure member names are not reused. - - EnumType will use the names found in self._member_names as the - enumeration member names. - """ - def __init__(self): - super().__init__() - self._member_names = {} # use a dict to keep insertion order - self._last_values = [] - self._ignore = [] - self._auto_called = False - - def __setitem__(self, key, value): - """ - Changes anything not dundered or not a descriptor. - - If an enum member name is used twice, an error is raised; duplicate - values are not checked for. - - Single underscore (sunder) names are reserved. - """ - if _is_internal_class(self._cls_name, value): - import warnings - warnings.warn( - "In 3.13 classes created inside an enum will not become a member. " - "Use the `member` decorator to keep the current behavior.", - DeprecationWarning, - stacklevel=2, - ) - if _is_private(self._cls_name, key): - # also do nothing, name will be a normal attribute - pass - elif _is_sunder(key): - if key not in ( - '_order_', - '_generate_next_value_', '_numeric_repr_', '_missing_', '_ignore_', - '_iter_member_', '_iter_member_by_value_', '_iter_member_by_def_', - ): - raise ValueError( - '_sunder_ names, such as %r, are reserved for future Enum use' - % (key, ) - ) - if key == '_generate_next_value_': - # check if members already defined as auto() - if self._auto_called: - raise TypeError("_generate_next_value_ must be defined before members") - _gnv = value.__func__ if isinstance(value, staticmethod) else value - setattr(self, '_generate_next_value', _gnv) - elif key == '_ignore_': - if isinstance(value, str): - value = value.replace(',',' ').split() - else: - value = list(value) - self._ignore = value - already = set(value) & set(self._member_names) - if already: - raise ValueError( - '_ignore_ cannot specify already set names: %r' - % (already, ) - ) - elif _is_dunder(key): - if key == '__order__': - key = '_order_' - elif key in self._member_names: - # descriptor overwriting an enum? - raise TypeError('%r already defined as %r' % (key, self[key])) - elif key in self._ignore: - pass - elif isinstance(value, nonmember): - # unwrap value here; it won't be processed by the below `else` - value = value.value - elif _is_descriptor(value): - pass - # TODO: uncomment next three lines in 3.13 - # elif _is_internal_class(self._cls_name, value): - # # do nothing, name will be a normal attribute - # pass - else: - if key in self: - # enum overwriting a descriptor? - raise TypeError('%r already defined as %r' % (key, self[key])) - elif isinstance(value, member): - # unwrap value here -- it will become a member - value = value.value - non_auto_store = True - single = False - if isinstance(value, auto): - single = True - value = (value, ) - if type(value) is tuple and any(isinstance(v, auto) for v in value): - # insist on an actual tuple, no subclasses, in keeping with only supporting - # top-level auto() usage (not contained in any other data structure) - auto_valued = [] - for v in value: - if isinstance(v, auto): - non_auto_store = False - if v.value == _auto_null: - v.value = self._generate_next_value( - key, 1, len(self._member_names), self._last_values[:], - ) - self._auto_called = True - v = v.value - self._last_values.append(v) - auto_valued.append(v) - if single: - value = auto_valued[0] - else: - value = tuple(auto_valued) - self._member_names[key] = None - if non_auto_store: - self._last_values.append(value) - super().__setitem__(key, value) - - def update(self, members, **more_members): - try: - for name in members.keys(): - self[name] = members[name] - except AttributeError: - for name, value in members: - self[name] = value - for name, value in more_members.items(): - self[name] = value - - -class EnumType(type): - """ - Metaclass for Enum - """ - - @classmethod - def __prepare__(metacls, cls, bases, **kwds): - # check that previous enum members do not exist - metacls._check_for_existing_members_(cls, bases) - # create the namespace dict - enum_dict = _EnumDict() - enum_dict._cls_name = cls - # inherit previous flags and _generate_next_value_ function - member_type, first_enum = metacls._get_mixins_(cls, bases) - if first_enum is not None: - enum_dict['_generate_next_value_'] = getattr( - first_enum, '_generate_next_value_', None, - ) - return enum_dict - - def __new__(metacls, cls, bases, classdict, *, boundary=None, _simple=False, **kwds): - # an Enum class is final once enumeration items have been defined; it - # cannot be mixed with other types (int, float, etc.) if it has an - # inherited __new__ unless a new __new__ is defined (or the resulting - # class will fail). - # - if _simple: - return super().__new__(metacls, cls, bases, classdict, **kwds) - # - # remove any keys listed in _ignore_ - classdict.setdefault('_ignore_', []).append('_ignore_') - ignore = classdict['_ignore_'] - for key in ignore: - classdict.pop(key, None) - # - # grab member names - member_names = classdict._member_names - # - # check for illegal enum names (any others?) - invalid_names = set(member_names) & {'mro', ''} - if invalid_names: - raise ValueError('invalid enum member name(s) %s' % ( - ','.join(repr(n) for n in invalid_names) - )) - # - # adjust the sunders - _order_ = classdict.pop('_order_', None) - _gnv = classdict.get('_generate_next_value_') - if _gnv is not None and type(_gnv) is not staticmethod: - _gnv = staticmethod(_gnv) - # convert to normal dict - classdict = dict(classdict.items()) - if _gnv is not None: - classdict['_generate_next_value_'] = _gnv - # - # data type of member and the controlling Enum class - member_type, first_enum = metacls._get_mixins_(cls, bases) - __new__, save_new, use_args = metacls._find_new_( - classdict, member_type, first_enum, - ) - classdict['_new_member_'] = __new__ - classdict['_use_args_'] = use_args - # - # convert future enum members into temporary _proto_members - for name in member_names: - value = classdict[name] - classdict[name] = _proto_member(value) - # - # house-keeping structures - classdict['_member_names_'] = [] - classdict['_member_map_'] = {} - classdict['_value2member_map_'] = {} - classdict['_unhashable_values_'] = [] - classdict['_member_type_'] = member_type - # now set the __repr__ for the value - classdict['_value_repr_'] = metacls._find_data_repr_(cls, bases) - # - # Flag structures (will be removed if final class is not a Flag - classdict['_boundary_'] = ( - boundary - or getattr(first_enum, '_boundary_', None) - ) - classdict['_flag_mask_'] = 0 - classdict['_singles_mask_'] = 0 - classdict['_all_bits_'] = 0 - classdict['_inverted_'] = None - try: - exc = None - enum_class = super().__new__(metacls, cls, bases, classdict, **kwds) - except RuntimeError as e: - # any exceptions raised by member.__new__ will get converted to a - # RuntimeError, so get that original exception back and raise it instead - exc = e.__cause__ or e - if exc is not None: - raise exc - # - # update classdict with any changes made by __init_subclass__ - classdict.update(enum_class.__dict__) - # - # double check that repr and friends are not the mixin's or various - # things break (such as pickle) - # however, if the method is defined in the Enum itself, don't replace - # it - # - # Also, special handling for ReprEnum - if ReprEnum is not None and ReprEnum in bases: - if member_type is object: - raise TypeError( - 'ReprEnum subclasses must be mixed with a data type (i.e.' - ' int, str, float, etc.)' - ) - if '__format__' not in classdict: - enum_class.__format__ = member_type.__format__ - classdict['__format__'] = enum_class.__format__ - if '__str__' not in classdict: - method = member_type.__str__ - if method is object.__str__: - # if member_type does not define __str__, object.__str__ will use - # its __repr__ instead, so we'll also use its __repr__ - method = member_type.__repr__ - enum_class.__str__ = method - classdict['__str__'] = enum_class.__str__ - for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'): - if name not in classdict: - # check for mixin overrides before replacing - enum_method = getattr(first_enum, name) - found_method = getattr(enum_class, name) - object_method = getattr(object, name) - data_type_method = getattr(member_type, name) - if found_method in (data_type_method, object_method): - setattr(enum_class, name, enum_method) - # - # for Flag, add __or__, __and__, __xor__, and __invert__ - if Flag is not None and issubclass(enum_class, Flag): - for name in ( - '__or__', '__and__', '__xor__', - '__ror__', '__rand__', '__rxor__', - '__invert__' - ): - if name not in classdict: - enum_method = getattr(Flag, name) - setattr(enum_class, name, enum_method) - classdict[name] = enum_method - # - # replace any other __new__ with our own (as long as Enum is not None, - # anyway) -- again, this is to support pickle - if Enum is not None: - # if the user defined their own __new__, save it before it gets - # clobbered in case they subclass later - if save_new: - enum_class.__new_member__ = __new__ - enum_class.__new__ = Enum.__new__ - # - # py3 support for definition order (helps keep py2/py3 code in sync) - # - # _order_ checking is spread out into three/four steps - # - if enum_class is a Flag: - # - remove any non-single-bit flags from _order_ - # - remove any aliases from _order_ - # - check that _order_ and _member_names_ match - # - # step 1: ensure we have a list - if _order_ is not None: - if isinstance(_order_, str): - _order_ = _order_.replace(',', ' ').split() - # - # remove Flag structures if final class is not a Flag - if ( - Flag is None and cls != 'Flag' - or Flag is not None and not issubclass(enum_class, Flag) - ): - delattr(enum_class, '_boundary_') - delattr(enum_class, '_flag_mask_') - delattr(enum_class, '_singles_mask_') - delattr(enum_class, '_all_bits_') - delattr(enum_class, '_inverted_') - elif Flag is not None and issubclass(enum_class, Flag): - # set correct __iter__ - member_list = [m._value_ for m in enum_class] - if member_list != sorted(member_list): - enum_class._iter_member_ = enum_class._iter_member_by_def_ - if _order_: - # _order_ step 2: remove any items from _order_ that are not single-bit - _order_ = [ - o - for o in _order_ - if o not in enum_class._member_map_ or _is_single_bit(enum_class[o]._value_) - ] - # - if _order_: - # _order_ step 3: remove aliases from _order_ - _order_ = [ - o - for o in _order_ - if ( - o not in enum_class._member_map_ - or - (o in enum_class._member_map_ and o in enum_class._member_names_) - )] - # _order_ step 4: verify that _order_ and _member_names_ match - if _order_ != enum_class._member_names_: - raise TypeError( - 'member order does not match _order_:\n %r\n %r' - % (enum_class._member_names_, _order_) - ) - # - return enum_class - - def __bool__(cls): - """ - classes/types should always be True. - """ - return True - - def __call__(cls, value, names=None, *values, module=None, qualname=None, type=None, start=1, boundary=None): - """ - Either returns an existing member, or creates a new enum class. - - This method is used both when an enum class is given a value to match - to an enumeration member (i.e. Color(3)) and for the functional API - (i.e. Color = Enum('Color', names='RED GREEN BLUE')). - - The value lookup branch is chosen if the enum is final. - - When used for the functional API: - - `value` will be the name of the new class. - - `names` should be either a string of white-space/comma delimited names - (values will start at `start`), or an iterator/mapping of name, value pairs. - - `module` should be set to the module this class is being created in; - if it is not set, an attempt to find that module will be made, but if - it fails the class will not be picklable. - - `qualname` should be set to the actual location this class can be found - at in its module; by default it is set to the global scope. If this is - not correct, unpickling will fail in some circumstances. - - `type`, if set, will be mixed in as the first base class. - """ - if cls._member_map_: - # simple value lookup if members exist - if names: - value = (value, names) + values - return cls.__new__(cls, value) - # otherwise, functional API: we're creating a new Enum type - if names is None and type is None: - # no body? no data-type? possibly wrong usage - raise TypeError( - f"{cls} has no members; specify `names=()` if you meant to create a new, empty, enum" - ) - return cls._create_( - class_name=value, - names=names, - module=module, - qualname=qualname, - type=type, - start=start, - boundary=boundary, - ) - - def __contains__(cls, value): - """Return True if `value` is in `cls`. - - `value` is in `cls` if: - 1) `value` is a member of `cls`, or - 2) `value` is the value of one of the `cls`'s members. - """ - if isinstance(value, cls): - return True - return value in cls._value2member_map_ or value in cls._unhashable_values_ - - def __delattr__(cls, attr): - # nicer error message when someone tries to delete an attribute - # (see issue19025). - if attr in cls._member_map_: - raise AttributeError("%r cannot delete member %r." % (cls.__name__, attr)) - super().__delattr__(attr) - - def __dir__(cls): - interesting = set([ - '__class__', '__contains__', '__doc__', '__getitem__', - '__iter__', '__len__', '__members__', '__module__', - '__name__', '__qualname__', - ] - + cls._member_names_ - ) - if cls._new_member_ is not object.__new__: - interesting.add('__new__') - if cls.__init_subclass__ is not object.__init_subclass__: - interesting.add('__init_subclass__') - if cls._member_type_ is object: - return sorted(interesting) - else: - # return whatever mixed-in data type has - return sorted(set(dir(cls._member_type_)) | interesting) - - def __getitem__(cls, name): - """ - Return the member matching `name`. - """ - return cls._member_map_[name] - - def __iter__(cls): - """ - Return members in definition order. - """ - return (cls._member_map_[name] for name in cls._member_names_) - - def __len__(cls): - """ - Return the number of members (no aliases) - """ - return len(cls._member_names_) - - @bltns.property - def __members__(cls): - """ - Returns a mapping of member name->value. - - This mapping lists all enum members, including aliases. Note that this - is a read-only view of the internal mapping. - """ - return MappingProxyType(cls._member_map_) - - def __repr__(cls): - if Flag is not None and issubclass(cls, Flag): - return "" % cls.__name__ - else: - return "" % cls.__name__ - - def __reversed__(cls): - """ - Return members in reverse definition order. - """ - return (cls._member_map_[name] for name in reversed(cls._member_names_)) - - def __setattr__(cls, name, value): - """ - Block attempts to reassign Enum members. - - A simple assignment to the class namespace only changes one of the - several possible ways to get an Enum member from the Enum class, - resulting in an inconsistent Enumeration. - """ - member_map = cls.__dict__.get('_member_map_', {}) - if name in member_map: - raise AttributeError('cannot reassign member %r' % (name, )) - super().__setattr__(name, value) - - def _create_(cls, class_name, names, *, module=None, qualname=None, type=None, start=1, boundary=None): - """ - Convenience method to create a new Enum class. - - `names` can be: - - * A string containing member names, separated either with spaces or - commas. Values are incremented by 1 from `start`. - * An iterable of member names. Values are incremented by 1 from `start`. - * An iterable of (member name, value) pairs. - * A mapping of member name -> value pairs. - """ - metacls = cls.__class__ - bases = (cls, ) if type is None else (type, cls) - _, first_enum = cls._get_mixins_(class_name, bases) - classdict = metacls.__prepare__(class_name, bases) - - # special processing needed for names? - if isinstance(names, str): - names = names.replace(',', ' ').split() - if isinstance(names, (tuple, list)) and names and isinstance(names[0], str): - original_names, names = names, [] - last_values = [] - for count, name in enumerate(original_names): - value = first_enum._generate_next_value_(name, start, count, last_values[:]) - last_values.append(value) - names.append((name, value)) - if names is None: - names = () - - # Here, names is either an iterable of (name, value) or a mapping. - for item in names: - if isinstance(item, str): - member_name, member_value = item, names[item] - else: - member_name, member_value = item - classdict[member_name] = member_value - - if module is None: - try: - module = sys._getframemodulename(2) - except AttributeError: - # Fall back on _getframe if _getframemodulename is missing - try: - module = sys._getframe(2).f_globals['__name__'] - except (AttributeError, ValueError, KeyError): - pass - if module is None: - _make_class_unpicklable(classdict) - else: - classdict['__module__'] = module - if qualname is not None: - classdict['__qualname__'] = qualname - - return metacls.__new__(metacls, class_name, bases, classdict, boundary=boundary) - - def _convert_(cls, name, module, filter, source=None, *, boundary=None, as_global=False): - """ - Create a new Enum subclass that replaces a collection of global constants - """ - # convert all constants from source (or module) that pass filter() to - # a new Enum called name, and export the enum and its members back to - # module; - # also, replace the __reduce_ex__ method so unpickling works in - # previous Python versions - module_globals = sys.modules[module].__dict__ - if source: - source = source.__dict__ - else: - source = module_globals - # _value2member_map_ is populated in the same order every time - # for a consistent reverse mapping of number to name when there - # are multiple names for the same number. - members = [ - (name, value) - for name, value in source.items() - if filter(name)] - try: - # sort by value - members.sort(key=lambda t: (t[1], t[0])) - except TypeError: - # unless some values aren't comparable, in which case sort by name - members.sort(key=lambda t: t[0]) - body = {t[0]: t[1] for t in members} - body['__module__'] = module - tmp_cls = type(name, (object, ), body) - cls = _simple_enum(etype=cls, boundary=boundary or KEEP)(tmp_cls) - if as_global: - global_enum(cls) - else: - sys.modules[cls.__module__].__dict__.update(cls.__members__) - module_globals[name] = cls - return cls - - @classmethod - def _check_for_existing_members_(mcls, class_name, bases): - for chain in bases: - for base in chain.__mro__: - if isinstance(base, EnumType) and base._member_names_: - raise TypeError( - " cannot extend %r" - % (class_name, base) - ) - - @classmethod - def _get_mixins_(mcls, class_name, bases): - """ - Returns the type for creating enum members, and the first inherited - enum class. - - bases: the tuple of bases that was given to __new__ - """ - if not bases: - return object, Enum - # ensure final parent class is an Enum derivative, find any concrete - # data type, and check that Enum has no members - first_enum = bases[-1] - if not isinstance(first_enum, EnumType): - raise TypeError("new enumerations should be created as " - "`EnumName([mixin_type, ...] [data_type,] enum_type)`") - member_type = mcls._find_data_type_(class_name, bases) or object - return member_type, first_enum - - @classmethod - def _find_data_repr_(mcls, class_name, bases): - for chain in bases: - for base in chain.__mro__: - if base is object: - continue - elif isinstance(base, EnumType): - # if we hit an Enum, use it's _value_repr_ - return base._value_repr_ - elif '__repr__' in base.__dict__: - # this is our data repr - # double-check if a dataclass with a default __repr__ - if ( - '__dataclass_fields__' in base.__dict__ - and '__dataclass_params__' in base.__dict__ - and base.__dict__['__dataclass_params__'].repr - ): - return _dataclass_repr - else: - return base.__dict__['__repr__'] - return None - - @classmethod - def _find_data_type_(mcls, class_name, bases): - # a datatype has a __new__ method, or a __dataclass_fields__ attribute - data_types = set() - base_chain = set() - for chain in bases: - candidate = None - for base in chain.__mro__: - base_chain.add(base) - if base is object: - continue - elif isinstance(base, EnumType): - if base._member_type_ is not object: - data_types.add(base._member_type_) - break - elif '__new__' in base.__dict__ or '__dataclass_fields__' in base.__dict__: - data_types.add(candidate or base) - break - else: - candidate = candidate or base - if len(data_types) > 1: - raise TypeError('too many data types for %r: %r' % (class_name, data_types)) - elif data_types: - return data_types.pop() - else: - return None - - @classmethod - def _find_new_(mcls, classdict, member_type, first_enum): - """ - Returns the __new__ to be used for creating the enum members. - - classdict: the class dictionary given to __new__ - member_type: the data type whose __new__ will be used by default - first_enum: enumeration to check for an overriding __new__ - """ - # now find the correct __new__, checking to see of one was defined - # by the user; also check earlier enum classes in case a __new__ was - # saved as __new_member__ - __new__ = classdict.get('__new__', None) - - # should __new__ be saved as __new_member__ later? - save_new = first_enum is not None and __new__ is not None - - if __new__ is None: - # check all possibles for __new_member__ before falling back to - # __new__ - for method in ('__new_member__', '__new__'): - for possible in (member_type, first_enum): - target = getattr(possible, method, None) - if target not in { - None, - None.__new__, - object.__new__, - Enum.__new__, - }: - __new__ = target - break - if __new__ is not None: - break - else: - __new__ = object.__new__ - - # if a non-object.__new__ is used then whatever value/tuple was - # assigned to the enum member name will be passed to __new__ and to the - # new enum member's __init__ - if first_enum is None or __new__ in (Enum.__new__, object.__new__): - use_args = False - else: - use_args = True - return __new__, save_new, use_args -EnumMeta = EnumType - - -class Enum(metaclass=EnumType): - """ - Create a collection of name/value pairs. - - Example enumeration: - - >>> class Color(Enum): - ... RED = 1 - ... BLUE = 2 - ... GREEN = 3 - - Access them by: - - - attribute access: - - >>> Color.RED - - - - value lookup: - - >>> Color(1) - - - - name lookup: - - >>> Color['RED'] - - - Enumerations can be iterated over, and know how many members they have: - - >>> len(Color) - 3 - - >>> list(Color) - [, , ] - - Methods can be added to enumerations, and members can have their own - attributes -- see the documentation for details. - """ - - @classmethod - def __signature__(cls): - if cls._member_names_: - return '(*values)' - else: - return '(new_class_name, /, names, *, module=None, qualname=None, type=None, start=1, boundary=None)' - - def __new__(cls, value): - # all enum instances are actually created during class construction - # without calling this method; this method is called by the metaclass' - # __call__ (i.e. Color(3) ), and by pickle - if type(value) is cls: - # For lookups like Color(Color.RED) - return value - # by-value search for a matching enum member - # see if it's in the reverse mapping (for hashable values) - try: - return cls._value2member_map_[value] - except KeyError: - # Not found, no need to do long O(n) search - pass - except TypeError: - # not there, now do long search -- O(n) behavior - for member in cls._member_map_.values(): - if member._value_ == value: - return member - # still not found -- verify that members exist, in-case somebody got here mistakenly - # (such as via super when trying to override __new__) - if not cls._member_map_: - raise TypeError("%r has no members defined" % cls) - # - # still not found -- try _missing_ hook - try: - exc = None - result = cls._missing_(value) - except Exception as e: - exc = e - result = None - try: - if isinstance(result, cls): - return result - elif ( - Flag is not None and issubclass(cls, Flag) - and cls._boundary_ is EJECT and isinstance(result, int) - ): - return result - else: - ve_exc = ValueError("%r is not a valid %s" % (value, cls.__qualname__)) - if result is None and exc is None: - raise ve_exc - elif exc is None: - exc = TypeError( - 'error in %s._missing_: returned %r instead of None or a valid member' - % (cls.__name__, result) - ) - if not isinstance(exc, ValueError): - exc.__context__ = ve_exc - raise exc - finally: - # ensure all variables that could hold an exception are destroyed - exc = None - ve_exc = None - - def __init__(self, *args, **kwds): - pass - - @staticmethod - def _generate_next_value_(name, start, count, last_values): - """ - Generate the next value when not given. - - name: the name of the member - start: the initial start value or None - count: the number of existing members - last_values: the list of values assigned - """ - if not last_values: - return start - try: - last = last_values[-1] - last_values.sort() - if last == last_values[-1]: - # no difference between old and new methods - return last + 1 - else: - # trigger old method (with warning) - raise TypeError - except TypeError: - import warnings - warnings.warn( - "In 3.13 the default `auto()`/`_generate_next_value_` will require all values to be sortable and support adding +1\n" - "and the value returned will be the largest value in the enum incremented by 1", - DeprecationWarning, - stacklevel=3, - ) - for v in reversed(last_values): - try: - return v + 1 - except TypeError: - pass - return start - - @classmethod - def _missing_(cls, value): - return None - - def __repr__(self): - v_repr = self.__class__._value_repr_ or repr - return "<%s.%s: %s>" % (self.__class__.__name__, self._name_, v_repr(self._value_)) - - def __str__(self): - return "%s.%s" % (self.__class__.__name__, self._name_, ) - - def __dir__(self): - """ - Returns all members and all public methods - """ - if self.__class__._member_type_ is object: - interesting = set(['__class__', '__doc__', '__eq__', '__hash__', '__module__', 'name', 'value']) - else: - interesting = set(object.__dir__(self)) - for name in getattr(self, '__dict__', []): - if name[0] != '_': - interesting.add(name) - for cls in self.__class__.mro(): - for name, obj in cls.__dict__.items(): - if name[0] == '_': - continue - if isinstance(obj, property): - # that's an enum.property - if obj.fget is not None or name not in self._member_map_: - interesting.add(name) - else: - # in case it was added by `dir(self)` - interesting.discard(name) - else: - interesting.add(name) - names = sorted( - set(['__class__', '__doc__', '__eq__', '__hash__', '__module__']) - | interesting - ) - return names - - def __format__(self, format_spec): - return str.__format__(str(self), format_spec) - - def __hash__(self): - return hash(self._name_) - - def __reduce_ex__(self, proto): - return self.__class__, (self._value_, ) - - def __deepcopy__(self,memo): - return self - - def __copy__(self): - return self - - # enum.property is used to provide access to the `name` and - # `value` attributes of enum members while keeping some measure of - # protection from modification, while still allowing for an enumeration - # to have members named `name` and `value`. This works because each - # instance of enum.property saves its companion member, which it returns - # on class lookup; on instance lookup it either executes a provided function - # or raises an AttributeError. - - @property - def name(self): - """The name of the Enum member.""" - return self._name_ - - @property - def value(self): - """The value of the Enum member.""" - return self._value_ - - -class ReprEnum(Enum): - """ - Only changes the repr(), leaving str() and format() to the mixed-in type. - """ - - -class IntEnum(int, ReprEnum): - """ - Enum where members are also (and must be) ints - """ - - -class StrEnum(str, ReprEnum): - """ - Enum where members are also (and must be) strings - """ - - def __new__(cls, *values): - "values must already be of type `str`" - if len(values) > 3: - raise TypeError('too many arguments for str(): %r' % (values, )) - if len(values) == 1: - # it must be a string - if not isinstance(values[0], str): - raise TypeError('%r is not a string' % (values[0], )) - if len(values) >= 2: - # check that encoding argument is a string - if not isinstance(values[1], str): - raise TypeError('encoding must be a string, not %r' % (values[1], )) - if len(values) == 3: - # check that errors argument is a string - if not isinstance(values[2], str): - raise TypeError('errors must be a string, not %r' % (values[2])) - value = str(*values) - member = str.__new__(cls, value) - member._value_ = value - return member - - @staticmethod - def _generate_next_value_(name, start, count, last_values): - """ - Return the lower-cased version of the member name. - """ - return name.lower() - - -def pickle_by_global_name(self, proto): - # should not be used with Flag-type enums - return self.name -_reduce_ex_by_global_name = pickle_by_global_name - -def pickle_by_enum_name(self, proto): - # should not be used with Flag-type enums - return getattr, (self.__class__, self._name_) - -class FlagBoundary(StrEnum): - """ - control how out of range values are handled - "strict" -> error is raised [default for Flag] - "conform" -> extra bits are discarded - "eject" -> lose flag status - "keep" -> keep flag status and all bits [default for IntFlag] - """ - STRICT = auto() - CONFORM = auto() - EJECT = auto() - KEEP = auto() -STRICT, CONFORM, EJECT, KEEP = FlagBoundary - - -class Flag(Enum, boundary=STRICT): - """ - Support for flags - """ - - _numeric_repr_ = repr - - @staticmethod - def _generate_next_value_(name, start, count, last_values): - """ - Generate the next value when not given. - - name: the name of the member - start: the initial start value or None - count: the number of existing members - last_values: the last value assigned or None - """ - if not count: - return start if start is not None else 1 - last_value = max(last_values) - try: - high_bit = _high_bit(last_value) - except Exception: - raise TypeError('invalid flag value %r' % last_value) from None - return 2 ** (high_bit+1) - - @classmethod - def _iter_member_by_value_(cls, value): - """ - Extract all members from the value in definition (i.e. increasing value) order. - """ - for val in _iter_bits_lsb(value & cls._flag_mask_): - yield cls._value2member_map_.get(val) - - _iter_member_ = _iter_member_by_value_ - - @classmethod - def _iter_member_by_def_(cls, value): - """ - Extract all members from the value in definition order. - """ - yield from sorted( - cls._iter_member_by_value_(value), - key=lambda m: m._sort_order_, - ) - - @classmethod - def _missing_(cls, value): - """ - Create a composite member containing all canonical members present in `value`. - - If non-member values are present, result depends on `_boundary_` setting. - """ - if not isinstance(value, int): - raise ValueError( - "%r is not a valid %s" % (value, cls.__qualname__) - ) - # check boundaries - # - value must be in range (e.g. -16 <-> +15, i.e. ~15 <-> 15) - # - value must not include any skipped flags (e.g. if bit 2 is not - # defined, then 0d10 is invalid) - flag_mask = cls._flag_mask_ - singles_mask = cls._singles_mask_ - all_bits = cls._all_bits_ - neg_value = None - if ( - not ~all_bits <= value <= all_bits - or value & (all_bits ^ flag_mask) - ): - if cls._boundary_ is STRICT: - max_bits = max(value.bit_length(), flag_mask.bit_length()) - raise ValueError( - "%r invalid value %r\n given %s\n allowed %s" % ( - cls, value, bin(value, max_bits), bin(flag_mask, max_bits), - )) - elif cls._boundary_ is CONFORM: - value = value & flag_mask - elif cls._boundary_ is EJECT: - return value - elif cls._boundary_ is KEEP: - if value < 0: - value = ( - max(all_bits+1, 2**(value.bit_length())) - + value - ) - else: - raise ValueError( - '%r unknown flag boundary %r' % (cls, cls._boundary_, ) - ) - if value < 0: - neg_value = value - value = all_bits + 1 + value - # get members and unknown - unknown = value & ~flag_mask - aliases = value & ~singles_mask - member_value = value & singles_mask - if unknown and cls._boundary_ is not KEEP: - raise ValueError( - '%s(%r) --> unknown values %r [%s]' - % (cls.__name__, value, unknown, bin(unknown)) - ) - # normal Flag? - if cls._member_type_ is object: - # construct a singleton enum pseudo-member - pseudo_member = object.__new__(cls) - else: - pseudo_member = cls._member_type_.__new__(cls, value) - if not hasattr(pseudo_member, '_value_'): - pseudo_member._value_ = value - if member_value or aliases: - members = [] - combined_value = 0 - for m in cls._iter_member_(member_value): - members.append(m) - combined_value |= m._value_ - if aliases: - value = member_value | aliases - for n, pm in cls._member_map_.items(): - if pm not in members and pm._value_ and pm._value_ & value == pm._value_: - members.append(pm) - combined_value |= pm._value_ - unknown = value ^ combined_value - pseudo_member._name_ = '|'.join([m._name_ for m in members]) - if not combined_value: - pseudo_member._name_ = None - elif unknown and cls._boundary_ is STRICT: - raise ValueError('%r: no members with value %r' % (cls, unknown)) - elif unknown: - pseudo_member._name_ += '|%s' % cls._numeric_repr_(unknown) - else: - pseudo_member._name_ = None - # use setdefault in case another thread already created a composite - # with this value - # note: zero is a special case -- always add it - pseudo_member = cls._value2member_map_.setdefault(value, pseudo_member) - if neg_value is not None: - cls._value2member_map_[neg_value] = pseudo_member - return pseudo_member - - def __contains__(self, other): - """ - Returns True if self has at least the same flags set as other. - """ - if not isinstance(other, self.__class__): - raise TypeError( - "unsupported operand type(s) for 'in': %r and %r" % ( - type(other).__qualname__, self.__class__.__qualname__)) - return other._value_ & self._value_ == other._value_ - - def __iter__(self): - """ - Returns flags in definition order. - """ - yield from self._iter_member_(self._value_) - - def __len__(self): - return self._value_.bit_count() - - def __repr__(self): - cls_name = self.__class__.__name__ - v_repr = self.__class__._value_repr_ or repr - if self._name_ is None: - return "<%s: %s>" % (cls_name, v_repr(self._value_)) - else: - return "<%s.%s: %s>" % (cls_name, self._name_, v_repr(self._value_)) - - def __str__(self): - cls_name = self.__class__.__name__ - if self._name_ is None: - return '%s(%r)' % (cls_name, self._value_) - else: - return "%s.%s" % (cls_name, self._name_) - - def __bool__(self): - return bool(self._value_) - - def __or__(self, other): - if isinstance(other, self.__class__): - other = other._value_ - elif self._member_type_ is not object and isinstance(other, self._member_type_): - other = other - else: - return NotImplemented - value = self._value_ - return self.__class__(value | other) - - def __and__(self, other): - if isinstance(other, self.__class__): - other = other._value_ - elif self._member_type_ is not object and isinstance(other, self._member_type_): - other = other - else: - return NotImplemented - value = self._value_ - return self.__class__(value & other) - - def __xor__(self, other): - if isinstance(other, self.__class__): - other = other._value_ - elif self._member_type_ is not object and isinstance(other, self._member_type_): - other = other - else: - return NotImplemented - value = self._value_ - return self.__class__(value ^ other) - - def __invert__(self): - if self._inverted_ is None: - if self._boundary_ in (EJECT, KEEP): - self._inverted_ = self.__class__(~self._value_) - else: - self._inverted_ = self.__class__(self._singles_mask_ & ~self._value_) - return self._inverted_ - - __rand__ = __and__ - __ror__ = __or__ - __rxor__ = __xor__ - - -class IntFlag(int, ReprEnum, Flag, boundary=KEEP): - """ - Support for integer-based Flags - """ - - -def _high_bit(value): - """ - returns index of highest bit, or -1 if value is zero or negative - """ - return value.bit_length() - 1 - -def unique(enumeration): - """ - Class decorator for enumerations ensuring unique member values. - """ - duplicates = [] - for name, member in enumeration.__members__.items(): - if name != member.name: - duplicates.append((name, member.name)) - if duplicates: - alias_details = ', '.join( - ["%s -> %s" % (alias, name) for (alias, name) in duplicates]) - raise ValueError('duplicate values found in %r: %s' % - (enumeration, alias_details)) - return enumeration - -def _dataclass_repr(self): - dcf = self.__dataclass_fields__ - return ', '.join( - '%s=%r' % (k, getattr(self, k)) - for k in dcf.keys() - if dcf[k].repr - ) - -def global_enum_repr(self): - """ - use module.enum_name instead of class.enum_name - - the module is the last module in case of a multi-module name - """ - module = self.__class__.__module__.split('.')[-1] - return '%s.%s' % (module, self._name_) - -def global_flag_repr(self): - """ - use module.flag_name instead of class.flag_name - - the module is the last module in case of a multi-module name - """ - module = self.__class__.__module__.split('.')[-1] - cls_name = self.__class__.__name__ - if self._name_ is None: - return "%s.%s(%r)" % (module, cls_name, self._value_) - if _is_single_bit(self): - return '%s.%s' % (module, self._name_) - if self._boundary_ is not FlagBoundary.KEEP: - return '|'.join(['%s.%s' % (module, name) for name in self.name.split('|')]) - else: - name = [] - for n in self._name_.split('|'): - if n[0].isdigit(): - name.append(n) - else: - name.append('%s.%s' % (module, n)) - return '|'.join(name) - -def global_str(self): - """ - use enum_name instead of class.enum_name - """ - if self._name_ is None: - cls_name = self.__class__.__name__ - return "%s(%r)" % (cls_name, self._value_) - else: - return self._name_ - -def global_enum(cls, update_str=False): - """ - decorator that makes the repr() of an enum member reference its module - instead of its class; also exports all members to the enum's module's - global namespace - """ - if issubclass(cls, Flag): - cls.__repr__ = global_flag_repr - else: - cls.__repr__ = global_enum_repr - if not issubclass(cls, ReprEnum) or update_str: - cls.__str__ = global_str - sys.modules[cls.__module__].__dict__.update(cls.__members__) - return cls - -def _simple_enum(etype=Enum, *, boundary=None, use_args=None): - """ - Class decorator that converts a normal class into an :class:`Enum`. No - safety checks are done, and some advanced behavior (such as - :func:`__init_subclass__`) is not available. Enum creation can be faster - using :func:`simple_enum`. - - >>> from enum import Enum, _simple_enum - >>> @_simple_enum(Enum) - ... class Color: - ... RED = auto() - ... GREEN = auto() - ... BLUE = auto() - >>> Color - - """ - def convert_class(cls): - nonlocal use_args - cls_name = cls.__name__ - if use_args is None: - use_args = etype._use_args_ - __new__ = cls.__dict__.get('__new__') - if __new__ is not None: - new_member = __new__.__func__ - else: - new_member = etype._member_type_.__new__ - attrs = {} - body = {} - if __new__ is not None: - body['__new_member__'] = new_member - body['_new_member_'] = new_member - body['_use_args_'] = use_args - body['_generate_next_value_'] = gnv = etype._generate_next_value_ - body['_member_names_'] = member_names = [] - body['_member_map_'] = member_map = {} - body['_value2member_map_'] = value2member_map = {} - body['_unhashable_values_'] = [] - body['_member_type_'] = member_type = etype._member_type_ - body['_value_repr_'] = etype._value_repr_ - if issubclass(etype, Flag): - body['_boundary_'] = boundary or etype._boundary_ - body['_flag_mask_'] = None - body['_all_bits_'] = None - body['_singles_mask_'] = None - body['_inverted_'] = None - body['__or__'] = Flag.__or__ - body['__xor__'] = Flag.__xor__ - body['__and__'] = Flag.__and__ - body['__ror__'] = Flag.__ror__ - body['__rxor__'] = Flag.__rxor__ - body['__rand__'] = Flag.__rand__ - body['__invert__'] = Flag.__invert__ - for name, obj in cls.__dict__.items(): - if name in ('__dict__', '__weakref__'): - continue - if _is_dunder(name) or _is_private(cls_name, name) or _is_sunder(name) or _is_descriptor(obj): - body[name] = obj - else: - attrs[name] = obj - if cls.__dict__.get('__doc__') is None: - body['__doc__'] = 'An enumeration.' - # - # double check that repr and friends are not the mixin's or various - # things break (such as pickle) - # however, if the method is defined in the Enum itself, don't replace - # it - enum_class = type(cls_name, (etype, ), body, boundary=boundary, _simple=True) - for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'): - if name not in body: - # check for mixin overrides before replacing - enum_method = getattr(etype, name) - found_method = getattr(enum_class, name) - object_method = getattr(object, name) - data_type_method = getattr(member_type, name) - if found_method in (data_type_method, object_method): - setattr(enum_class, name, enum_method) - gnv_last_values = [] - if issubclass(enum_class, Flag): - # Flag / IntFlag - single_bits = multi_bits = 0 - for name, value in attrs.items(): - if isinstance(value, auto) and auto.value is _auto_null: - value = gnv(name, 1, len(member_names), gnv_last_values) - if value in value2member_map: - # an alias to an existing member - member = value2member_map[value] - redirect = property() - redirect.member = member - redirect.__set_name__(enum_class, name) - setattr(enum_class, name, redirect) - member_map[name] = member - else: - # create the member - if use_args: - if not isinstance(value, tuple): - value = (value, ) - member = new_member(enum_class, *value) - value = value[0] - else: - member = new_member(enum_class) - if __new__ is None: - member._value_ = value - member._name_ = name - member.__objclass__ = enum_class - member.__init__(value) - redirect = property() - redirect.member = member - redirect.__set_name__(enum_class, name) - setattr(enum_class, name, redirect) - member_map[name] = member - member._sort_order_ = len(member_names) - value2member_map[value] = member - if _is_single_bit(value): - # not a multi-bit alias, record in _member_names_ and _flag_mask_ - member_names.append(name) - single_bits |= value - else: - multi_bits |= value - gnv_last_values.append(value) - enum_class._flag_mask_ = single_bits | multi_bits - enum_class._singles_mask_ = single_bits - enum_class._all_bits_ = 2 ** ((single_bits|multi_bits).bit_length()) - 1 - # set correct __iter__ - member_list = [m._value_ for m in enum_class] - if member_list != sorted(member_list): - enum_class._iter_member_ = enum_class._iter_member_by_def_ - else: - # Enum / IntEnum / StrEnum - for name, value in attrs.items(): - if isinstance(value, auto): - if value.value is _auto_null: - value.value = gnv(name, 1, len(member_names), gnv_last_values) - value = value.value - if value in value2member_map: - # an alias to an existing member - member = value2member_map[value] - redirect = property() - redirect.member = member - redirect.__set_name__(enum_class, name) - setattr(enum_class, name, redirect) - member_map[name] = member - else: - # create the member - if use_args: - if not isinstance(value, tuple): - value = (value, ) - member = new_member(enum_class, *value) - value = value[0] - else: - member = new_member(enum_class) - if __new__ is None: - member._value_ = value - member._name_ = name - member.__objclass__ = enum_class - member.__init__(value) - member._sort_order_ = len(member_names) - redirect = property() - redirect.member = member - redirect.__set_name__(enum_class, name) - setattr(enum_class, name, redirect) - member_map[name] = member - value2member_map[value] = member - member_names.append(name) - gnv_last_values.append(value) - if '__new__' in body: - enum_class.__new_member__ = enum_class.__new__ - enum_class.__new__ = Enum.__new__ - return enum_class - return convert_class - -@_simple_enum(StrEnum) -class EnumCheck: - """ - various conditions to check an enumeration for - """ - CONTINUOUS = "no skipped integer values" - NAMED_FLAGS = "multi-flag aliases may not contain unnamed flags" - UNIQUE = "one name per value" -CONTINUOUS, NAMED_FLAGS, UNIQUE = EnumCheck - - -class verify: - """ - Check an enumeration for various constraints. (see EnumCheck) - """ - def __init__(self, *checks): - self.checks = checks - def __call__(self, enumeration): - checks = self.checks - cls_name = enumeration.__name__ - if Flag is not None and issubclass(enumeration, Flag): - enum_type = 'flag' - elif issubclass(enumeration, Enum): - enum_type = 'enum' - else: - raise TypeError("the 'verify' decorator only works with Enum and Flag") - for check in checks: - if check is UNIQUE: - # check for duplicate names - duplicates = [] - for name, member in enumeration.__members__.items(): - if name != member.name: - duplicates.append((name, member.name)) - if duplicates: - alias_details = ', '.join( - ["%s -> %s" % (alias, name) for (alias, name) in duplicates]) - raise ValueError('aliases found in %r: %s' % - (enumeration, alias_details)) - elif check is CONTINUOUS: - values = set(e.value for e in enumeration) - if len(values) < 2: - continue - low, high = min(values), max(values) - missing = [] - if enum_type == 'flag': - # check for powers of two - for i in range(_high_bit(low)+1, _high_bit(high)): - if 2**i not in values: - missing.append(2**i) - elif enum_type == 'enum': - # check for powers of one - for i in range(low+1, high): - if i not in values: - missing.append(i) - else: - raise Exception('verify: unknown type %r' % enum_type) - if missing: - raise ValueError(('invalid %s %r: missing values %s' % ( - enum_type, cls_name, ', '.join((str(m) for m in missing))) - )[:256]) - # limit max length to protect against DOS attacks - elif check is NAMED_FLAGS: - # examine each alias and check for unnamed flags - member_names = enumeration._member_names_ - member_values = [m.value for m in enumeration] - missing_names = [] - missing_value = 0 - for name, alias in enumeration._member_map_.items(): - if name in member_names: - # not an alias - continue - if alias.value < 0: - # negative numbers are not checked - continue - values = list(_iter_bits_lsb(alias.value)) - missed = [v for v in values if v not in member_values] - if missed: - missing_names.append(name) - missing_value |= reduce(_or_, missed) - if missing_names: - if len(missing_names) == 1: - alias = 'alias %s is missing' % missing_names[0] - else: - alias = 'aliases %s and %s are missing' % ( - ', '.join(missing_names[:-1]), missing_names[-1] - ) - if _is_single_bit(missing_value): - value = 'value 0x%x' % missing_value - else: - value = 'combined values of 0x%x' % missing_value - raise ValueError( - 'invalid Flag %r: %s %s [use enum.show_flag_values(value) for details]' - % (cls_name, alias, value) - ) - return enumeration - -def _test_simple_enum(checked_enum, simple_enum): - """ - A function that can be used to test an enum created with :func:`_simple_enum` - against the version created by subclassing :class:`Enum`:: - - >>> from enum import Enum, _simple_enum, _test_simple_enum - >>> @_simple_enum(Enum) - ... class Color: - ... RED = auto() - ... GREEN = auto() - ... BLUE = auto() - >>> class CheckedColor(Enum): - ... RED = auto() - ... GREEN = auto() - ... BLUE = auto() - ... # TODO: RUSTPYTHON - >>> _test_simple_enum(CheckedColor, Color) # doctest: +SKIP - - If differences are found, a :exc:`TypeError` is raised. - """ - failed = [] - if checked_enum.__dict__ != simple_enum.__dict__: - checked_dict = checked_enum.__dict__ - checked_keys = list(checked_dict.keys()) - simple_dict = simple_enum.__dict__ - simple_keys = list(simple_dict.keys()) - member_names = set( - list(checked_enum._member_map_.keys()) - + list(simple_enum._member_map_.keys()) - ) - for key in set(checked_keys + simple_keys): - if key in ('__module__', '_member_map_', '_value2member_map_', '__doc__'): - # keys known to be different, or very long - continue - elif key in member_names: - # members are checked below - continue - elif key not in simple_keys: - failed.append("missing key: %r" % (key, )) - elif key not in checked_keys: - failed.append("extra key: %r" % (key, )) - else: - checked_value = checked_dict[key] - simple_value = simple_dict[key] - if callable(checked_value) or isinstance(checked_value, bltns.property): - continue - if key == '__doc__': - # remove all spaces/tabs - compressed_checked_value = checked_value.replace(' ','').replace('\t','') - compressed_simple_value = simple_value.replace(' ','').replace('\t','') - if compressed_checked_value != compressed_simple_value: - failed.append("%r:\n %s\n %s" % ( - key, - "checked -> %r" % (checked_value, ), - "simple -> %r" % (simple_value, ), - )) - elif checked_value != simple_value: - failed.append("%r:\n %s\n %s" % ( - key, - "checked -> %r" % (checked_value, ), - "simple -> %r" % (simple_value, ), - )) - failed.sort() - for name in member_names: - failed_member = [] - if name not in simple_keys: - failed.append('missing member from simple enum: %r' % name) - elif name not in checked_keys: - failed.append('extra member in simple enum: %r' % name) - else: - checked_member_dict = checked_enum[name].__dict__ - checked_member_keys = list(checked_member_dict.keys()) - simple_member_dict = simple_enum[name].__dict__ - simple_member_keys = list(simple_member_dict.keys()) - for key in set(checked_member_keys + simple_member_keys): - if key in ('__module__', '__objclass__', '_inverted_'): - # keys known to be different or absent - continue - elif key not in simple_member_keys: - failed_member.append("missing key %r not in the simple enum member %r" % (key, name)) - elif key not in checked_member_keys: - failed_member.append("extra key %r in simple enum member %r" % (key, name)) - else: - checked_value = checked_member_dict[key] - simple_value = simple_member_dict[key] - if checked_value != simple_value: - failed_member.append("%r:\n %s\n %s" % ( - key, - "checked member -> %r" % (checked_value, ), - "simple member -> %r" % (simple_value, ), - )) - if failed_member: - failed.append('%r member mismatch:\n %s' % ( - name, '\n '.join(failed_member), - )) - for method in ( - '__str__', '__repr__', '__reduce_ex__', '__format__', - '__getnewargs_ex__', '__getnewargs__', '__reduce_ex__', '__reduce__' - ): - if method in simple_keys and method in checked_keys: - # cannot compare functions, and it exists in both, so we're good - continue - elif method not in simple_keys and method not in checked_keys: - # method is inherited -- check it out - checked_method = getattr(checked_enum, method, None) - simple_method = getattr(simple_enum, method, None) - if hasattr(checked_method, '__func__'): - checked_method = checked_method.__func__ - simple_method = simple_method.__func__ - if checked_method != simple_method: - failed.append("%r: %-30s %s" % ( - method, - "checked -> %r" % (checked_method, ), - "simple -> %r" % (simple_method, ), - )) - else: - # if the method existed in only one of the enums, it will have been caught - # in the first checks above - pass - if failed: - raise TypeError('enum mismatch:\n %s' % '\n '.join(failed)) - -def _old_convert_(etype, name, module, filter, source=None, *, boundary=None): - """ - Create a new Enum subclass that replaces a collection of global constants - """ - # convert all constants from source (or module) that pass filter() to - # a new Enum called name, and export the enum and its members back to - # module; - # also, replace the __reduce_ex__ method so unpickling works in - # previous Python versions - module_globals = sys.modules[module].__dict__ - if source: - source = source.__dict__ - else: - source = module_globals - # _value2member_map_ is populated in the same order every time - # for a consistent reverse mapping of number to name when there - # are multiple names for the same number. - members = [ - (name, value) - for name, value in source.items() - if filter(name)] - try: - # sort by value - members.sort(key=lambda t: (t[1], t[0])) - except TypeError: - # unless some values aren't comparable, in which case sort by name - members.sort(key=lambda t: t[0]) - cls = etype(name, members, module=module, boundary=boundary or KEEP) - return cls - -_stdlib_enums = IntEnum, StrEnum, IntFlag diff --git a/controllers/guidance_ctrl/Lib/functools.py b/controllers/guidance_ctrl/Lib/functools.py deleted file mode 100644 index 2ae4290f..00000000 --- a/controllers/guidance_ctrl/Lib/functools.py +++ /dev/null @@ -1,1006 +0,0 @@ -"""functools.py - Tools for working with functions and callable objects -""" -# Python module wrapper for _functools C module -# to allow utilities written in Python to be added -# to the functools module. -# Written by Nick Coghlan , -# Raymond Hettinger , -# and Łukasz Langa . -# Copyright (C) 2006-2013 Python Software Foundation. -# See C source code for _functools credits/copyright - -__all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES', - 'total_ordering', 'cache', 'cmp_to_key', 'lru_cache', 'reduce', - 'partial', 'partialmethod', 'singledispatch', 'singledispatchmethod', - 'cached_property'] - -from abc import get_cache_token -from collections import namedtuple -# import types, weakref # Deferred to single_dispatch() -from reprlib import recursive_repr -from _thread import RLock -from types import GenericAlias - - -################################################################################ -### update_wrapper() and wraps() decorator -################################################################################ - -# update_wrapper() and wraps() are tools to help write -# wrapper functions that can handle naive introspection - -WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__qualname__', '__doc__', - '__annotations__', '__type_params__') -WRAPPER_UPDATES = ('__dict__',) -def update_wrapper(wrapper, - wrapped, - assigned = WRAPPER_ASSIGNMENTS, - updated = WRAPPER_UPDATES): - """Update a wrapper function to look like the wrapped function - - wrapper is the function to be updated - wrapped is the original function - assigned is a tuple naming the attributes assigned directly - from the wrapped function to the wrapper function (defaults to - functools.WRAPPER_ASSIGNMENTS) - updated is a tuple naming the attributes of the wrapper that - are updated with the corresponding attribute from the wrapped - function (defaults to functools.WRAPPER_UPDATES) - """ - for attr in assigned: - try: - value = getattr(wrapped, attr) - except AttributeError: - pass - else: - setattr(wrapper, attr, value) - for attr in updated: - getattr(wrapper, attr).update(getattr(wrapped, attr, {})) - # Issue #17482: set __wrapped__ last so we don't inadvertently copy it - # from the wrapped function when updating __dict__ - wrapper.__wrapped__ = wrapped - # Return the wrapper so this can be used as a decorator via partial() - return wrapper - -def wraps(wrapped, - assigned = WRAPPER_ASSIGNMENTS, - updated = WRAPPER_UPDATES): - """Decorator factory to apply update_wrapper() to a wrapper function - - Returns a decorator that invokes update_wrapper() with the decorated - function as the wrapper argument and the arguments to wraps() as the - remaining arguments. Default arguments are as for update_wrapper(). - This is a convenience function to simplify applying partial() to - update_wrapper(). - """ - return partial(update_wrapper, wrapped=wrapped, - assigned=assigned, updated=updated) - - -################################################################################ -### total_ordering class decorator -################################################################################ - -# The total ordering functions all invoke the root magic method directly -# rather than using the corresponding operator. This avoids possible -# infinite recursion that could occur when the operator dispatch logic -# detects a NotImplemented result and then calls a reflected method. - -def _gt_from_lt(self, other): - 'Return a > b. Computed by @total_ordering from (not a < b) and (a != b).' - op_result = type(self).__lt__(self, other) - if op_result is NotImplemented: - return op_result - return not op_result and self != other - -def _le_from_lt(self, other): - 'Return a <= b. Computed by @total_ordering from (a < b) or (a == b).' - op_result = type(self).__lt__(self, other) - if op_result is NotImplemented: - return op_result - return op_result or self == other - -def _ge_from_lt(self, other): - 'Return a >= b. Computed by @total_ordering from (not a < b).' - op_result = type(self).__lt__(self, other) - if op_result is NotImplemented: - return op_result - return not op_result - -def _ge_from_le(self, other): - 'Return a >= b. Computed by @total_ordering from (not a <= b) or (a == b).' - op_result = type(self).__le__(self, other) - if op_result is NotImplemented: - return op_result - return not op_result or self == other - -def _lt_from_le(self, other): - 'Return a < b. Computed by @total_ordering from (a <= b) and (a != b).' - op_result = type(self).__le__(self, other) - if op_result is NotImplemented: - return op_result - return op_result and self != other - -def _gt_from_le(self, other): - 'Return a > b. Computed by @total_ordering from (not a <= b).' - op_result = type(self).__le__(self, other) - if op_result is NotImplemented: - return op_result - return not op_result - -def _lt_from_gt(self, other): - 'Return a < b. Computed by @total_ordering from (not a > b) and (a != b).' - op_result = type(self).__gt__(self, other) - if op_result is NotImplemented: - return op_result - return not op_result and self != other - -def _ge_from_gt(self, other): - 'Return a >= b. Computed by @total_ordering from (a > b) or (a == b).' - op_result = type(self).__gt__(self, other) - if op_result is NotImplemented: - return op_result - return op_result or self == other - -def _le_from_gt(self, other): - 'Return a <= b. Computed by @total_ordering from (not a > b).' - op_result = type(self).__gt__(self, other) - if op_result is NotImplemented: - return op_result - return not op_result - -def _le_from_ge(self, other): - 'Return a <= b. Computed by @total_ordering from (not a >= b) or (a == b).' - op_result = type(self).__ge__(self, other) - if op_result is NotImplemented: - return op_result - return not op_result or self == other - -def _gt_from_ge(self, other): - 'Return a > b. Computed by @total_ordering from (a >= b) and (a != b).' - op_result = type(self).__ge__(self, other) - if op_result is NotImplemented: - return op_result - return op_result and self != other - -def _lt_from_ge(self, other): - 'Return a < b. Computed by @total_ordering from (not a >= b).' - op_result = type(self).__ge__(self, other) - if op_result is NotImplemented: - return op_result - return not op_result - -_convert = { - '__lt__': [('__gt__', _gt_from_lt), - ('__le__', _le_from_lt), - ('__ge__', _ge_from_lt)], - '__le__': [('__ge__', _ge_from_le), - ('__lt__', _lt_from_le), - ('__gt__', _gt_from_le)], - '__gt__': [('__lt__', _lt_from_gt), - ('__ge__', _ge_from_gt), - ('__le__', _le_from_gt)], - '__ge__': [('__le__', _le_from_ge), - ('__gt__', _gt_from_ge), - ('__lt__', _lt_from_ge)] -} - -def total_ordering(cls): - """Class decorator that fills in missing ordering methods""" - # Find user-defined comparisons (not those inherited from object). - roots = {op for op in _convert if getattr(cls, op, None) is not getattr(object, op, None)} - if not roots: - raise ValueError('must define at least one ordering operation: < > <= >=') - root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__ - for opname, opfunc in _convert[root]: - if opname not in roots: - opfunc.__name__ = opname - setattr(cls, opname, opfunc) - return cls - - -################################################################################ -### cmp_to_key() function converter -################################################################################ - -def cmp_to_key(mycmp): - """Convert a cmp= function into a key= function""" - class K(object): - __slots__ = ['obj'] - def __init__(self, obj): - self.obj = obj - def __lt__(self, other): - return mycmp(self.obj, other.obj) < 0 - def __gt__(self, other): - return mycmp(self.obj, other.obj) > 0 - def __eq__(self, other): - return mycmp(self.obj, other.obj) == 0 - def __le__(self, other): - return mycmp(self.obj, other.obj) <= 0 - def __ge__(self, other): - return mycmp(self.obj, other.obj) >= 0 - __hash__ = None - return K - -try: - from _functools import cmp_to_key -except ImportError: - pass - - -################################################################################ -### reduce() sequence to a single item -################################################################################ - -_initial_missing = object() - -def reduce(function, sequence, initial=_initial_missing): - """ - reduce(function, iterable[, initial]) -> value - - Apply a function of two arguments cumulatively to the items of a sequence - or iterable, from left to right, so as to reduce the iterable to a single - value. For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates - ((((1+2)+3)+4)+5). If initial is present, it is placed before the items - of the iterable in the calculation, and serves as a default when the - iterable is empty. - """ - - it = iter(sequence) - - if initial is _initial_missing: - try: - value = next(it) - except StopIteration: - raise TypeError( - "reduce() of empty iterable with no initial value") from None - else: - value = initial - - for element in it: - value = function(value, element) - - return value - -try: - from _functools import reduce -except ImportError: - pass - - -################################################################################ -### partial() argument application -################################################################################ - -# Purely functional, no descriptor behaviour -class partial: - """New function with partial application of the given arguments - and keywords. - """ - - __slots__ = "func", "args", "keywords", "__dict__", "__weakref__" - - def __new__(cls, func, /, *args, **keywords): - if not callable(func): - raise TypeError("the first argument must be callable") - - if hasattr(func, "func"): - args = func.args + args - keywords = {**func.keywords, **keywords} - func = func.func - - self = super(partial, cls).__new__(cls) - - self.func = func - self.args = args - self.keywords = keywords - return self - - def __call__(self, /, *args, **keywords): - keywords = {**self.keywords, **keywords} - return self.func(*self.args, *args, **keywords) - - @recursive_repr() - def __repr__(self): - qualname = type(self).__qualname__ - args = [repr(self.func)] - args.extend(repr(x) for x in self.args) - args.extend(f"{k}={v!r}" for (k, v) in self.keywords.items()) - if type(self).__module__ == "functools": - return f"functools.{qualname}({', '.join(args)})" - return f"{qualname}({', '.join(args)})" - - def __reduce__(self): - return type(self), (self.func,), (self.func, self.args, - self.keywords or None, self.__dict__ or None) - - def __setstate__(self, state): - if not isinstance(state, tuple): - raise TypeError("argument to __setstate__ must be a tuple") - if len(state) != 4: - raise TypeError(f"expected 4 items in state, got {len(state)}") - func, args, kwds, namespace = state - if (not callable(func) or not isinstance(args, tuple) or - (kwds is not None and not isinstance(kwds, dict)) or - (namespace is not None and not isinstance(namespace, dict))): - raise TypeError("invalid partial state") - - args = tuple(args) # just in case it's a subclass - if kwds is None: - kwds = {} - elif type(kwds) is not dict: # XXX does it need to be *exactly* dict? - kwds = dict(kwds) - if namespace is None: - namespace = {} - - self.__dict__ = namespace - self.func = func - self.args = args - self.keywords = kwds - -try: - from _functools import partial -except ImportError: - pass - -# Descriptor version -class partialmethod(object): - """Method descriptor with partial application of the given arguments - and keywords. - - Supports wrapping existing descriptors and handles non-descriptor - callables as instance methods. - """ - - def __init__(self, func, /, *args, **keywords): - if not callable(func) and not hasattr(func, "__get__"): - raise TypeError("{!r} is not callable or a descriptor" - .format(func)) - - # func could be a descriptor like classmethod which isn't callable, - # so we can't inherit from partial (it verifies func is callable) - if isinstance(func, partialmethod): - # flattening is mandatory in order to place cls/self before all - # other arguments - # it's also more efficient since only one function will be called - self.func = func.func - self.args = func.args + args - self.keywords = {**func.keywords, **keywords} - else: - self.func = func - self.args = args - self.keywords = keywords - - def __repr__(self): - args = ", ".join(map(repr, self.args)) - keywords = ", ".join("{}={!r}".format(k, v) - for k, v in self.keywords.items()) - format_string = "{module}.{cls}({func}, {args}, {keywords})" - return format_string.format(module=self.__class__.__module__, - cls=self.__class__.__qualname__, - func=self.func, - args=args, - keywords=keywords) - - def _make_unbound_method(self): - def _method(cls_or_self, /, *args, **keywords): - keywords = {**self.keywords, **keywords} - return self.func(cls_or_self, *self.args, *args, **keywords) - _method.__isabstractmethod__ = self.__isabstractmethod__ - _method._partialmethod = self - return _method - - def __get__(self, obj, cls=None): - get = getattr(self.func, "__get__", None) - result = None - if get is not None: - new_func = get(obj, cls) - if new_func is not self.func: - # Assume __get__ returning something new indicates the - # creation of an appropriate callable - result = partial(new_func, *self.args, **self.keywords) - try: - result.__self__ = new_func.__self__ - except AttributeError: - pass - if result is None: - # If the underlying descriptor didn't do anything, treat this - # like an instance method - result = self._make_unbound_method().__get__(obj, cls) - return result - - @property - def __isabstractmethod__(self): - return getattr(self.func, "__isabstractmethod__", False) - - __class_getitem__ = classmethod(GenericAlias) - - -# Helper functions - -def _unwrap_partial(func): - while isinstance(func, partial): - func = func.func - return func - -################################################################################ -### LRU Cache function decorator -################################################################################ - -_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"]) - -class _HashedSeq(list): - """ This class guarantees that hash() will be called no more than once - per element. This is important because the lru_cache() will hash - the key multiple times on a cache miss. - - """ - - __slots__ = 'hashvalue' - - def __init__(self, tup, hash=hash): - self[:] = tup - self.hashvalue = hash(tup) - - def __hash__(self): - return self.hashvalue - -def _make_key(args, kwds, typed, - kwd_mark = (object(),), - fasttypes = {int, str}, - tuple=tuple, type=type, len=len): - """Make a cache key from optionally typed positional and keyword arguments - - The key is constructed in a way that is flat as possible rather than - as a nested structure that would take more memory. - - If there is only a single argument and its data type is known to cache - its hash value, then that argument is returned without a wrapper. This - saves space and improves lookup speed. - - """ - # All of code below relies on kwds preserving the order input by the user. - # Formerly, we sorted() the kwds before looping. The new way is *much* - # faster; however, it means that f(x=1, y=2) will now be treated as a - # distinct call from f(y=2, x=1) which will be cached separately. - key = args - if kwds: - key += kwd_mark - for item in kwds.items(): - key += item - if typed: - key += tuple(type(v) for v in args) - if kwds: - key += tuple(type(v) for v in kwds.values()) - elif len(key) == 1 and type(key[0]) in fasttypes: - return key[0] - return _HashedSeq(key) - -def lru_cache(maxsize=128, typed=False): - """Least-recently-used cache decorator. - - If *maxsize* is set to None, the LRU features are disabled and the cache - can grow without bound. - - If *typed* is True, arguments of different types will be cached separately. - For example, f(3.0) and f(3) will be treated as distinct calls with - distinct results. - - Arguments to the cached function must be hashable. - - View the cache statistics named tuple (hits, misses, maxsize, currsize) - with f.cache_info(). Clear the cache and statistics with f.cache_clear(). - Access the underlying function with f.__wrapped__. - - See: https://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU) - - """ - - # Users should only access the lru_cache through its public API: - # cache_info, cache_clear, and f.__wrapped__ - # The internals of the lru_cache are encapsulated for thread safety and - # to allow the implementation to change (including a possible C version). - - if isinstance(maxsize, int): - # Negative maxsize is treated as 0 - if maxsize < 0: - maxsize = 0 - elif callable(maxsize) and isinstance(typed, bool): - # The user_function was passed in directly via the maxsize argument - user_function, maxsize = maxsize, 128 - wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo) - wrapper.cache_parameters = lambda : {'maxsize': maxsize, 'typed': typed} - return update_wrapper(wrapper, user_function) - elif maxsize is not None: - raise TypeError( - 'Expected first argument to be an integer, a callable, or None') - - def decorating_function(user_function): - wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo) - wrapper.cache_parameters = lambda : {'maxsize': maxsize, 'typed': typed} - return update_wrapper(wrapper, user_function) - - return decorating_function - -def _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo): - # Constants shared by all lru cache instances: - sentinel = object() # unique object used to signal cache misses - make_key = _make_key # build a key from the function arguments - PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields - - cache = {} - hits = misses = 0 - full = False - cache_get = cache.get # bound method to lookup a key or return None - cache_len = cache.__len__ # get cache size without calling len() - lock = RLock() # because linkedlist updates aren't threadsafe - root = [] # root of the circular doubly linked list - root[:] = [root, root, None, None] # initialize by pointing to self - - if maxsize == 0: - - def wrapper(*args, **kwds): - # No caching -- just a statistics update - nonlocal misses - misses += 1 - result = user_function(*args, **kwds) - return result - - elif maxsize is None: - - def wrapper(*args, **kwds): - # Simple caching without ordering or size limit - nonlocal hits, misses - key = make_key(args, kwds, typed) - result = cache_get(key, sentinel) - if result is not sentinel: - hits += 1 - return result - misses += 1 - result = user_function(*args, **kwds) - cache[key] = result - return result - - else: - - def wrapper(*args, **kwds): - # Size limited caching that tracks accesses by recency - nonlocal root, hits, misses, full - key = make_key(args, kwds, typed) - with lock: - link = cache_get(key) - if link is not None: - # Move the link to the front of the circular queue - link_prev, link_next, _key, result = link - link_prev[NEXT] = link_next - link_next[PREV] = link_prev - last = root[PREV] - last[NEXT] = root[PREV] = link - link[PREV] = last - link[NEXT] = root - hits += 1 - return result - misses += 1 - result = user_function(*args, **kwds) - with lock: - if key in cache: - # Getting here means that this same key was added to the - # cache while the lock was released. Since the link - # update is already done, we need only return the - # computed result and update the count of misses. - pass - elif full: - # Use the old root to store the new key and result. - oldroot = root - oldroot[KEY] = key - oldroot[RESULT] = result - # Empty the oldest link and make it the new root. - # Keep a reference to the old key and old result to - # prevent their ref counts from going to zero during the - # update. That will prevent potentially arbitrary object - # clean-up code (i.e. __del__) from running while we're - # still adjusting the links. - root = oldroot[NEXT] - oldkey = root[KEY] - oldresult = root[RESULT] - root[KEY] = root[RESULT] = None - # Now update the cache dictionary. - del cache[oldkey] - # Save the potentially reentrant cache[key] assignment - # for last, after the root and links have been put in - # a consistent state. - cache[key] = oldroot - else: - # Put result in a new link at the front of the queue. - last = root[PREV] - link = [last, root, key, result] - last[NEXT] = root[PREV] = cache[key] = link - # Use the cache_len bound method instead of the len() function - # which could potentially be wrapped in an lru_cache itself. - full = (cache_len() >= maxsize) - return result - - def cache_info(): - """Report cache statistics""" - with lock: - return _CacheInfo(hits, misses, maxsize, cache_len()) - - def cache_clear(): - """Clear the cache and cache statistics""" - nonlocal hits, misses, full - with lock: - cache.clear() - root[:] = [root, root, None, None] - hits = misses = 0 - full = False - - wrapper.cache_info = cache_info - wrapper.cache_clear = cache_clear - return wrapper - -try: - from _functools import _lru_cache_wrapper -except ImportError: - pass - - -################################################################################ -### cache -- simplified access to the infinity cache -################################################################################ - -def cache(user_function, /): - 'Simple lightweight unbounded cache. Sometimes called "memoize".' - return lru_cache(maxsize=None)(user_function) - - -################################################################################ -### singledispatch() - single-dispatch generic function decorator -################################################################################ - -def _c3_merge(sequences): - """Merges MROs in *sequences* to a single MRO using the C3 algorithm. - - Adapted from https://www.python.org/download/releases/2.3/mro/. - - """ - result = [] - while True: - sequences = [s for s in sequences if s] # purge empty sequences - if not sequences: - return result - for s1 in sequences: # find merge candidates among seq heads - candidate = s1[0] - for s2 in sequences: - if candidate in s2[1:]: - candidate = None - break # reject the current head, it appears later - else: - break - if candidate is None: - raise RuntimeError("Inconsistent hierarchy") - result.append(candidate) - # remove the chosen candidate - for seq in sequences: - if seq[0] == candidate: - del seq[0] - -def _c3_mro(cls, abcs=None): - """Computes the method resolution order using extended C3 linearization. - - If no *abcs* are given, the algorithm works exactly like the built-in C3 - linearization used for method resolution. - - If given, *abcs* is a list of abstract base classes that should be inserted - into the resulting MRO. Unrelated ABCs are ignored and don't end up in the - result. The algorithm inserts ABCs where their functionality is introduced, - i.e. issubclass(cls, abc) returns True for the class itself but returns - False for all its direct base classes. Implicit ABCs for a given class - (either registered or inferred from the presence of a special method like - __len__) are inserted directly after the last ABC explicitly listed in the - MRO of said class. If two implicit ABCs end up next to each other in the - resulting MRO, their ordering depends on the order of types in *abcs*. - - """ - for i, base in enumerate(reversed(cls.__bases__)): - if hasattr(base, '__abstractmethods__'): - boundary = len(cls.__bases__) - i - break # Bases up to the last explicit ABC are considered first. - else: - boundary = 0 - abcs = list(abcs) if abcs else [] - explicit_bases = list(cls.__bases__[:boundary]) - abstract_bases = [] - other_bases = list(cls.__bases__[boundary:]) - for base in abcs: - if issubclass(cls, base) and not any( - issubclass(b, base) for b in cls.__bases__ - ): - # If *cls* is the class that introduces behaviour described by - # an ABC *base*, insert said ABC to its MRO. - abstract_bases.append(base) - for base in abstract_bases: - abcs.remove(base) - explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases] - abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases] - other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases] - return _c3_merge( - [[cls]] + - explicit_c3_mros + abstract_c3_mros + other_c3_mros + - [explicit_bases] + [abstract_bases] + [other_bases] - ) - -def _compose_mro(cls, types): - """Calculates the method resolution order for a given class *cls*. - - Includes relevant abstract base classes (with their respective bases) from - the *types* iterable. Uses a modified C3 linearization algorithm. - - """ - bases = set(cls.__mro__) - # Remove entries which are already present in the __mro__ or unrelated. - def is_related(typ): - return (typ not in bases and hasattr(typ, '__mro__') - and not isinstance(typ, GenericAlias) - and issubclass(cls, typ)) - types = [n for n in types if is_related(n)] - # Remove entries which are strict bases of other entries (they will end up - # in the MRO anyway. - def is_strict_base(typ): - for other in types: - if typ != other and typ in other.__mro__: - return True - return False - types = [n for n in types if not is_strict_base(n)] - # Subclasses of the ABCs in *types* which are also implemented by - # *cls* can be used to stabilize ABC ordering. - type_set = set(types) - mro = [] - for typ in types: - found = [] - for sub in typ.__subclasses__(): - if sub not in bases and issubclass(cls, sub): - found.append([s for s in sub.__mro__ if s in type_set]) - if not found: - mro.append(typ) - continue - # Favor subclasses with the biggest number of useful bases - found.sort(key=len, reverse=True) - for sub in found: - for subcls in sub: - if subcls not in mro: - mro.append(subcls) - return _c3_mro(cls, abcs=mro) - -def _find_impl(cls, registry): - """Returns the best matching implementation from *registry* for type *cls*. - - Where there is no registered implementation for a specific type, its method - resolution order is used to find a more generic implementation. - - Note: if *registry* does not contain an implementation for the base - *object* type, this function may return None. - - """ - mro = _compose_mro(cls, registry.keys()) - match = None - for t in mro: - if match is not None: - # If *match* is an implicit ABC but there is another unrelated, - # equally matching implicit ABC, refuse the temptation to guess. - if (t in registry and t not in cls.__mro__ - and match not in cls.__mro__ - and not issubclass(match, t)): - raise RuntimeError("Ambiguous dispatch: {} or {}".format( - match, t)) - break - if t in registry: - match = t - return registry.get(match) - -def singledispatch(func): - """Single-dispatch generic function decorator. - - Transforms a function into a generic function, which can have different - behaviours depending upon the type of its first argument. The decorated - function acts as the default implementation, and additional - implementations can be registered using the register() attribute of the - generic function. - """ - # There are many programs that use functools without singledispatch, so we - # trade-off making singledispatch marginally slower for the benefit of - # making start-up of such applications slightly faster. - import types, weakref - - registry = {} - dispatch_cache = weakref.WeakKeyDictionary() - cache_token = None - - def dispatch(cls): - """generic_func.dispatch(cls) -> - - Runs the dispatch algorithm to return the best available implementation - for the given *cls* registered on *generic_func*. - - """ - nonlocal cache_token - if cache_token is not None: - current_token = get_cache_token() - if cache_token != current_token: - dispatch_cache.clear() - cache_token = current_token - try: - impl = dispatch_cache[cls] - except KeyError: - try: - impl = registry[cls] - except KeyError: - impl = _find_impl(cls, registry) - dispatch_cache[cls] = impl - return impl - - def _is_union_type(cls): - from typing import get_origin, Union - return get_origin(cls) in {Union, types.UnionType} - - def _is_valid_dispatch_type(cls): - if isinstance(cls, type): - return True - from typing import get_args - return (_is_union_type(cls) and - all(isinstance(arg, type) for arg in get_args(cls))) - - def register(cls, func=None): - """generic_func.register(cls, func) -> func - - Registers a new implementation for the given *cls* on a *generic_func*. - - """ - nonlocal cache_token - if _is_valid_dispatch_type(cls): - if func is None: - return lambda f: register(cls, f) - else: - if func is not None: - raise TypeError( - f"Invalid first argument to `register()`. " - f"{cls!r} is not a class or union type." - ) - ann = getattr(cls, '__annotations__', {}) - if not ann: - raise TypeError( - f"Invalid first argument to `register()`: {cls!r}. " - f"Use either `@register(some_class)` or plain `@register` " - f"on an annotated function." - ) - func = cls - - # only import typing if annotation parsing is necessary - from typing import get_type_hints - argname, cls = next(iter(get_type_hints(func).items())) - if not _is_valid_dispatch_type(cls): - if _is_union_type(cls): - raise TypeError( - f"Invalid annotation for {argname!r}. " - f"{cls!r} not all arguments are classes." - ) - else: - raise TypeError( - f"Invalid annotation for {argname!r}. " - f"{cls!r} is not a class." - ) - - if _is_union_type(cls): - from typing import get_args - - for arg in get_args(cls): - registry[arg] = func - else: - registry[cls] = func - if cache_token is None and hasattr(cls, '__abstractmethods__'): - cache_token = get_cache_token() - dispatch_cache.clear() - return func - - def wrapper(*args, **kw): - if not args: - raise TypeError(f'{funcname} requires at least ' - '1 positional argument') - - return dispatch(args[0].__class__)(*args, **kw) - - funcname = getattr(func, '__name__', 'singledispatch function') - registry[object] = func - wrapper.register = register - wrapper.dispatch = dispatch - wrapper.registry = types.MappingProxyType(registry) - wrapper._clear_cache = dispatch_cache.clear - update_wrapper(wrapper, func) - return wrapper - - -# Descriptor version -class singledispatchmethod: - """Single-dispatch generic method descriptor. - - Supports wrapping existing descriptors and handles non-descriptor - callables as instance methods. - """ - - def __init__(self, func): - if not callable(func) and not hasattr(func, "__get__"): - raise TypeError(f"{func!r} is not callable or a descriptor") - - self.dispatcher = singledispatch(func) - self.func = func - - def register(self, cls, method=None): - """generic_method.register(cls, func) -> func - - Registers a new implementation for the given *cls* on a *generic_method*. - """ - return self.dispatcher.register(cls, func=method) - - def __get__(self, obj, cls=None): - def _method(*args, **kwargs): - method = self.dispatcher.dispatch(args[0].__class__) - return method.__get__(obj, cls)(*args, **kwargs) - - _method.__isabstractmethod__ = self.__isabstractmethod__ - _method.register = self.register - update_wrapper(_method, self.func) - return _method - - @property - def __isabstractmethod__(self): - return getattr(self.func, '__isabstractmethod__', False) - - -################################################################################ -### cached_property() - property result cached as instance attribute -################################################################################ - -_NOT_FOUND = object() - -class cached_property: - def __init__(self, func): - self.func = func - self.attrname = None - self.__doc__ = func.__doc__ - - def __set_name__(self, owner, name): - if self.attrname is None: - self.attrname = name - elif name != self.attrname: - raise TypeError( - "Cannot assign the same cached_property to two different names " - f"({self.attrname!r} and {name!r})." - ) - - def __get__(self, instance, owner=None): - if instance is None: - return self - if self.attrname is None: - raise TypeError( - "Cannot use cached_property instance without calling __set_name__ on it.") - try: - cache = instance.__dict__ - except AttributeError: # not all objects have __dict__ (e.g. class defines slots) - msg = ( - f"No '__dict__' attribute on {type(instance).__name__!r} " - f"instance to cache {self.attrname!r} property." - ) - raise TypeError(msg) from None - val = cache.get(self.attrname, _NOT_FOUND) - if val is _NOT_FOUND: - val = self.func(instance) - try: - cache[self.attrname] = val - except TypeError: - msg = ( - f"The '__dict__' attribute on {type(instance).__name__!r} instance " - f"does not support item assignment for caching {self.attrname!r} property." - ) - raise TypeError(msg) from None - return val - - __class_getitem__ = classmethod(GenericAlias) diff --git a/controllers/guidance_ctrl/Lib/genericpath.py b/controllers/guidance_ctrl/Lib/genericpath.py deleted file mode 100644 index 309759af..00000000 --- a/controllers/guidance_ctrl/Lib/genericpath.py +++ /dev/null @@ -1,158 +0,0 @@ -""" -Path operations common to more than one OS -Do not use directly. The OS specific modules import the appropriate -functions from this module themselves. -""" -try: - import os -except ImportError: - import _dummy_os as os -import stat - -__all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime', - 'getsize', 'isdir', 'isfile', 'samefile', 'sameopenfile', - 'samestat'] - - -# Does a path exist? -# This is false for dangling symbolic links on systems that support them. -def exists(path): - """Test whether a path exists. Returns False for broken symbolic links""" - try: - os.stat(path) - except (OSError, ValueError): - return False - return True - - -# This follows symbolic links, so both islink() and isdir() can be true -# for the same path on systems that support symlinks -def isfile(path): - """Test whether a path is a regular file""" - try: - st = os.stat(path) - except (OSError, ValueError): - return False - return stat.S_ISREG(st.st_mode) - - -# Is a path a directory? -# This follows symbolic links, so both islink() and isdir() -# can be true for the same path on systems that support symlinks -def isdir(s): - """Return true if the pathname refers to an existing directory.""" - try: - st = os.stat(s) - except (OSError, ValueError): - return False - return stat.S_ISDIR(st.st_mode) - - -def getsize(filename): - """Return the size of a file, reported by os.stat().""" - return os.stat(filename).st_size - - -def getmtime(filename): - """Return the last modification time of a file, reported by os.stat().""" - return os.stat(filename).st_mtime - - -def getatime(filename): - """Return the last access time of a file, reported by os.stat().""" - return os.stat(filename).st_atime - - -def getctime(filename): - """Return the metadata change time of a file, reported by os.stat().""" - return os.stat(filename).st_ctime - - -# Return the longest prefix of all list elements. -def commonprefix(m): - "Given a list of pathnames, returns the longest common leading component" - if not m: return '' - # Some people pass in a list of pathname parts to operate in an OS-agnostic - # fashion; don't try to translate in that case as that's an abuse of the - # API and they are already doing what they need to be OS-agnostic and so - # they most likely won't be using an os.PathLike object in the sublists. - if not isinstance(m[0], (list, tuple)): - m = tuple(map(os.fspath, m)) - s1 = min(m) - s2 = max(m) - for i, c in enumerate(s1): - if c != s2[i]: - return s1[:i] - return s1 - -# Are two stat buffers (obtained from stat, fstat or lstat) -# describing the same file? -def samestat(s1, s2): - """Test whether two stat buffers reference the same file""" - return (s1.st_ino == s2.st_ino and - s1.st_dev == s2.st_dev) - - -# Are two filenames really pointing to the same file? -def samefile(f1, f2): - """Test whether two pathnames reference the same actual file or directory - - This is determined by the device number and i-node number and - raises an exception if an os.stat() call on either pathname fails. - """ - s1 = os.stat(f1) - s2 = os.stat(f2) - return samestat(s1, s2) - - -# Are two open files really referencing the same file? -# (Not necessarily the same file descriptor!) -def sameopenfile(fp1, fp2): - """Test whether two open file objects reference the same file""" - s1 = os.fstat(fp1) - s2 = os.fstat(fp2) - return samestat(s1, s2) - - -# Split a path in root and extension. -# The extension is everything starting at the last dot in the last -# pathname component; the root is everything before that. -# It is always true that root + ext == p. - -# Generic implementation of splitext, to be parametrized with -# the separators -def _splitext(p, sep, altsep, extsep): - """Split the extension from a pathname. - - Extension is everything from the last dot to the end, ignoring - leading dots. Returns "(root, ext)"; ext may be empty.""" - # NOTE: This code must work for text and bytes strings. - - sepIndex = p.rfind(sep) - if altsep: - altsepIndex = p.rfind(altsep) - sepIndex = max(sepIndex, altsepIndex) - - dotIndex = p.rfind(extsep) - if dotIndex > sepIndex: - # skip all leading dots - filenameIndex = sepIndex + 1 - while filenameIndex < dotIndex: - if p[filenameIndex:filenameIndex+1] != extsep: - return p[:dotIndex], p[dotIndex:] - filenameIndex += 1 - - return p, p[:0] - -def _check_arg_types(funcname, *args): - hasstr = hasbytes = False - for s in args: - if isinstance(s, str): - hasstr = True - elif isinstance(s, bytes): - hasbytes = True - else: - raise TypeError(f'{funcname}() argument must be str, bytes, or ' - f'os.PathLike object, not {s.__class__.__name__!r}') from None - if hasstr and hasbytes: - raise TypeError("Can't mix strings and bytes in path components") from None diff --git a/controllers/guidance_ctrl/Lib/guidance/LICENSE.md b/controllers/guidance_ctrl/Lib/guidance/LICENSE.md deleted file mode 100644 index 32c0918f..00000000 --- a/controllers/guidance_ctrl/Lib/guidance/LICENSE.md +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) The Guidance Contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/controllers/guidance_ctrl/Lib/guidance/__init__.py b/controllers/guidance_ctrl/Lib/guidance/__init__.py deleted file mode 100644 index 34eab4ea..00000000 --- a/controllers/guidance_ctrl/Lib/guidance/__init__.py +++ /dev/null @@ -1,89 +0,0 @@ -__version__ = "0.1.11" - -import functools -import sys -import types -import inspect - -from . import models -from ._grammar import (Placeholder, RawFunction, GrammarFunction, - Terminal, replace_grammar_node, string) -from ._utils import strip_multiline_string_indents -from ._server import Server - -newline = "\n" - -# This makes the guidance module callable -class Guidance(types.ModuleType): - def __call__(self, f=None, *, stateless=False, cache=None, dedent=True, model=models.Model): - return _decorator(f, stateless=stateless, cache=cache, dedent=dedent, model=model) -sys.modules[__name__].__class__ = Guidance - -_null_grammar = string('') - -def _decorator(f, *, stateless, cache, dedent, model): - - # if we are not yet being used as a decorator, then save the args - if f is None: - return functools.partial(_decorator, stateless=stateless, cache=cache, dedent=dedent, model=model) - - # if we are being used as a decorator then return the decorated function - else: - - # this strips out indentation in multiline strings that aligns with the current python indentation - if dedent is True or dedent == 'python': - f = strip_multiline_string_indents(f) - - # we cache if requested - if cache: - f = functools.cache(f) - - @functools.wraps(f) - def wrapped(*args, **kwargs): - - # make a stateless grammar if we can - if stateless is True or (callable(stateless) and stateless(*args, **kwargs)): - - # if we have a placeholder set then we must be in a recursive definition and so we return the placeholder - placeholder = getattr(f, "_self_call_placeholder_", None) - if placeholder is not None: - return placeholder - - # otherwise we call the function to generate the grammar - else: - - # set a placeholder for recursive calls (only if we don't have arguments that might make caching a bad idea) - no_args = len(args) + len(kwargs) == 0 - if no_args: - f._self_call_placeholder_ = Placeholder() - - # call the function to get the grammar node - node = f(_null_grammar, *args, **kwargs) - if not isinstance(node, (Terminal, str)): - node.name = f.__name__ - - # replace all the placeholders with our generated node - if no_args: - replace_grammar_node(node, f._self_call_placeholder_, node) - del f._self_call_placeholder_ - - return node - - # otherwise must be stateful (which means we can't be inside a select() call) - else: - return RawFunction(f, args, kwargs) - - # Remove the first argument from the wrapped function - signature = inspect.signature(f) - params = list(signature.parameters.values()) - params.pop(0) - wrapped.__signature__ = signature.replace(parameters=params) - - # attach this as a method of the model class (if given) - # if model is not None: - # setattr(model, f.__name__, f) - - return wrapped - -# we expose all the library functions at the top level of the module -from .library import * diff --git a/controllers/guidance_ctrl/Lib/guidance/_grammar.py b/controllers/guidance_ctrl/Lib/guidance/_grammar.py deleted file mode 100644 index dc12b9e4..00000000 --- a/controllers/guidance_ctrl/Lib/guidance/_grammar.py +++ /dev/null @@ -1,859 +0,0 @@ -import base64 -import uuid -import json -import inspect -import types -import re - -from typing import List, TypeVar, Union - -from . import _serialization_pb2 -from . import _parser - -_T = TypeVar("_T") - -# to support the embedding of guidance functions inside Python f-strings we use tags with these delimiters -tag_start = "{{G|" # start of a call tag -tag_end = "|G}}" # end of a call tag -_call_pool = {} # the functions associated with the call tags -_tag_pattern = re.compile(re.escape(tag_start) + r"([^\|]+)" + re.escape(tag_end)) # the pattern for matching call tags - -class StatefulException(Exception): - '''This is raised when we try and use the state of a grammar object like it was a live model. - - Note that eventually it would be nice to support stateful parser/grammar constructs directly, but - such "parser combinators" cannot be run effciently in Python. So we use a traditional parser and - grammar separation (hence the need for this exception).''' - pass - -class Function(): - ''' This is the abstract class representing all guidance functions. - - There are two main subclasses: GrammarFunction and RawFunction. GrammarFunctions - represent guidance grammars that can be serialized and sent across the wire, while - RawFunctions represent unconstrained native Python functions. - ''' - - def __init__(self, name, value=None) -> None: - self.name = name - self.value = value - - def __str__(self): - '''Creates a string tag that can be used to retrieve this object.''' - - # save the call in our call pool, ready to be run when it is attached to an LM object - str_id = str(id(self)) - if str_id not in _call_pool: - _call_pool[str_id] = self - - # return a string representation of this call so it can be combined with other strings/calls - return tag_start + str_id + tag_end - - def serialize(self): - raise NotImplementedError() - - @classmethod - def deserialize(cls, serialized_grammar): - raise NotImplementedError() - - -class RawFunction(Function): - __slots__ = ("f", "args", "kwargs") - - def __init__(self, f, args, kwargs): - self.f = f - self.args = args - self.kwargs = kwargs - - def __call__(self, model): - return self.f(model, *self.args, **self.kwargs) - - def __add__(self, other): - - # if we are joining with a string we use the string representation for ourselves - if isinstance(other, str): - return str(self) + other - - def __add__(model): - model = self(model) - if model is None: - raise Exception(f"The guidance function `{self.f.__name__}` did not return a model object! You need to return an updated model object at the end of your guidance function.") - if isinstance(other, GrammarFunction): - return model + other - else: - return other(model) - return RawFunction(__add__, [], {}) - - def __radd__(self, other): - - # if we are joining with a string we use the string representation for ourselves - if isinstance(other, str): - return other + str(self) - - def __radd__(model): - if isinstance(other, GrammarFunction): - model += other - else: - model = other(model) - return self(model) - return RawFunction(__radd__, [], {}) - -class Match: - def __init__(self, captures, log_probs, partial): - self.captures = captures - self.log_probs = log_probs - self.partial = partial - - def __getitem__(self, key): - return self.captures[key] - - def __len__(self): - return len(self.captures) - - def __bool__(self): - return True - - def __str__(self): - return str(self.captures) - - def __repr__(self): - return "" - -class GrammarFunction(Function): - num_used_names = 0 - - def __add__(self, value): - - # see if we have a string with calls or a simple string - if isinstance(value, str) or isinstance(value, bytes): - if isinstance(value, str) and re.search(_tag_pattern, value): - return str(self) + value - else: - value = string(value) - - # see if we can keep building a stateless grammar - if isinstance(value, GrammarFunction): - return Join([self, value]) - - # otherwise we let the stateful object handle things - else: - return value.__radd__(self) - - def __radd__(self, value): - - # see if we have a string with calls or a simple string - if isinstance(value, str) or isinstance(value, bytes): - if isinstance(value, str) and re.search(_tag_pattern, value): - return value + str(self) - else: - value = string(value) - - # see if we can keep building a stateless grammar - if isinstance(value, GrammarFunction): - return Join([value, self]) - - # otherwise we let the stateful object handle things - else: - return value.__add__(self) - - def __getitem__(self, value): - raise StatefulException("GrammarFunctions can't access state!") - - def match(self, byte_string: Union[str, bytes], allow_partial: bool=False, raise_exceptions: bool=False) -> Union[Match, None]: - if isinstance(byte_string, str): - byte_string = byte_string.encode() - parser = _parser.EarleyCommitParser(self) - - for i in range(len(byte_string)): - try: - parser.consume_byte(byte_string[i:i+1]) - except _parser.ParserException: - if raise_exceptions: - raise - else: - return None - - if not allow_partial and not parser.matched(): - return None - else: - return Match(*parser.get_captures(), partial=not parser.matched()) - - @staticmethod - def _new_name(): - num_used = GrammarFunction.num_used_names - - a_ord = ord('a') - - # name the name in base 26 letter notation - name = chr(a_ord + num_used % 26) - if num_used >= 26: - name = chr(a_ord + (num_used % 676) // 26) + name - if num_used >= 676: - name = chr(a_ord + (num_used % 17576) // 676) + name - if num_used >= 17576: - name = chr(a_ord + (num_used % 456976) // 17576) + name - - GrammarFunction.num_used_names += 1 - - return name - - def gbnf_string(self): - used_names = set() - names = {} - lines = [] - root_name = self._rec_gbnf_string(lines, used_names, names) - lines.append("root ::= " + root_name) - return "\n".join(lines) - - def serialize(self): - g = _serialization_pb2.Grammar() - index_map = {} - nodes = {} - self._rec_create_index_map(index_map) # gives all the nodes an index - self._rec_serialize(index_map, nodes) # nodes is filled in (as is index_map) - g.nodes.extend(list(nodes.values())) - return g.SerializeToString() - - def _rec_create_index_map(self, index_map): - if self not in index_map: - index_map[self] = len(index_map) - if hasattr(self, "values"): - for value in self.values: - value._rec_create_index_map(index_map) - - def _rec_serialize(self, index_map, nodes): - if self not in nodes: - v = self._to_proto(index_map) - node = _serialization_pb2.GrammarFunction() - if isinstance(self, Byte): - node.byte.CopyFrom(v) - elif isinstance(self, ByteRange): - node.byte_range.CopyFrom(v) - elif isinstance(self, Select): - node.select.CopyFrom(v) - elif isinstance(self, Join): - node.join.CopyFrom(v) - elif isinstance(self, ModelVariable): - node.model_variable.CopyFrom(v) - else: - raise Exception("Unknown node type") - nodes[self] = node - if hasattr(self, "values"): - for value in self.values: - value._rec_serialize(index_map, nodes) - - @classmethod - def deserialize(cls, serialized_grammar): - g = _serialization_pb2.Grammar() - g.ParseFromString(serialized_grammar) - - # create the list of objects - values = [] - for node in g.nodes: - if node.HasField("byte"): - node = Byte._from_proto(node.byte) - elif node.HasField("byte_range"): - node = ByteRange._from_proto(node.byte_range) - elif node.HasField("select"): - node = Select._from_proto(node.select) - elif node.HasField("join"): - node = Join._from_proto(node.join) - elif node.HasField("model_variable"): - node = ModelVariable._from_proto(node.model_variable) - else: - raise Exception("Unknown node type") - values.append(node) - - # fill in the values pointers now that we have the full list of objects - for v in values: - if hasattr(v, "values"): - for i, index in enumerate(v.values): - v.values[i] = values[index] - - return values[0] # the first element in the root node of the grammar - -class Terminal(GrammarFunction): - def match_byte(self, byte): - pass # abstract - - @property - def max_tokens(self): - return 1000000000000 - -class Byte(Terminal): - __slots__ = ("byte", "hidden", "commit_point", "capture_name", "temperature") - - def __init__(self, byte): - assert isinstance(byte, bytes) - assert len(byte) == 1 - self.byte = byte - self.hidden = False - self.commit_point = False - self.capture_name = None - self.temperature = -1 - - @property - def name(self): - return str(self.byte) - - def __hash__(self): - return self.byte[0] - - def __eq__(self, other): - return isinstance(other, Byte) and self.byte[0] == other.byte[0] - - def __repr__(self) -> str: - return str(self.byte) - - def __len__(self): - return 1 - - def match_byte(self, byte): - return byte == self.byte - - @property - def nullable(self): - return False - - def _to_proto(self, index_map): - data = _serialization_pb2.Byte() - data.byte = self.byte - data.hidden = self.hidden - data.commit_point = self.commit_point - data.capture_name = "" if self.capture_name is None else self.capture_name - data.temperature = self.temperature - return data - - @staticmethod - def _from_proto(data): - out = Byte(data.byte) - out.hidden = data.hidden - out.commit_point = data.commit_point - out.capture_name = None if data.capture_name == "" else data.capture_name - out.temperature = data.temperature - return out - -class ByteRange(Terminal): - __slots__ = ("byte_range", "hidden", "commit_point", "capture_name", "temperature") - - def __init__(self, byte_range): - assert isinstance(byte_range, bytes) - assert len(byte_range) == 2 - self.byte_range = byte_range - self.hidden = False - self.commit_point = False - self.capture_name = None - self.temperature = -1 # -1 means not set - - def match_byte(self, byte): - return self.byte_range[0] <= byte[0] <= self.byte_range[1] - - @property - def name(self): - return str(self.byte_range) - @name.setter - def name(self, value): - pass # we ignore name changes - - @property - def nullable(self): - return False - - def __hash__(self): - return self.byte_range[0] + 256 * self.byte_range[1] - - def __eq__(self, other): - return isinstance(other, ByteRange) and self.byte_range[0] == other.byte_range[0] and self.byte_range[1] == other.byte_range[1] - - def __repr__(self) -> str: - return str(self.byte_range) - - def __len__(self): - return 1 - - def _to_proto(self, index_map): - data = _serialization_pb2.ByteRange() - data.byte_range = self.byte_range - data.hidden = self.hidden - data.commit_point = self.commit_point - data.capture_name = "" if self.capture_name is None else self.capture_name - data.temperature = self.temperature - return data - - @staticmethod - def _from_proto(data): - out = ByteRange(data.byte_range) - out.hidden = data.hidden - out.commit_point = data.commit_point - out.capture_name = None if data.capture_name == "" else data.capture_name - out.temperature = data.temperature - return out - -class Null(): - __slots__ = ("name", "hidden", "commit_point", "capture_name") - - nullable = True - def __init__(self): - self.name = None - self.hidden = False - self.commit_point = False - self.capture_name = None - - def __add__(self, other): - # see if we have a string with calls or a simple string - if isinstance(other, bytes): - return string(other) - elif isinstance(other, str): - return str_to_grammar(other) - - # otherwise we return unchanged - else: - return other - - def __radd__(self, other): - return self.__add__(other) # left vs right makes no difference since we are null - -class ModelVariable(GrammarFunction): - '''This represents a variable that will be read from the model object when this grammar is executed. - - Note that the name is the name of the attribute on the model object this node - will get replaced with. - ''' - __slots__ = ("name", "hidden", "commit_point", "capture_name") - - def __init__(self, name): - self.name = name - self.hidden = False - self.commit_point = False - self.capture_name = None - self.nullable = False - - def _to_proto(self, index_map): - data = _serialization_pb2.ModelVariable() - data.hidden = self.hidden - data.name = self.name - data.commit_point = self.commit_point - data.capture_name = "" if self.capture_name is None else self.capture_name - return data - - @staticmethod - def _from_proto(data): - out = ModelVariable(data.name) - out.hidden = data.hidden - out.commit_point = data.commit_point - out.capture_name = None if data.capture_name == "" else data.capture_name - return out - -def replace_grammar_node(grammar, target, replacement): - # Use a stack to keep track of the nodes to be visited - stack = [grammar] - visited_set = set() # use set for O(1) lookups - - while stack: - current = stack.pop() - - # Check if we have already visited this node - if current in visited_set: - continue - visited_set.add(current) - - # We are done with this node if it's a terminal - if isinstance(current, (Terminal, ModelVariable)): - continue - - # Iterate through the node's values and replace target with replacement - for i, value in enumerate(current.values): - if value == target: - current.values[i] = replacement - else: - stack.append(value) - -# def replace_grammar_node(grammar, target, replacement, visited_set={}): - -# # see if we have already visited this node -# if grammar in visited_set: -# return -# else: -# visited_set[grammar] = True - -# # we are done if this is a terminal -# if isinstance(grammar, (Terminal, ModelVariable)): -# return - -# # replace all matching sub-nodes -# for i,value in enumerate(grammar.values): -# if value == target: -# grammar.values[i] = replacement -# else: -# replace_grammar_node(value, target, replacement, visited_set) - -def replace_model_variables(grammar, model, allowed_vars=None): - '''Replace all the ModelVariable nodes with their values in an iterative manner.''' - visited_set = set() - stack = [(grammar, None, None)] # Stack stores tuples of (node, parent_node, child_index) - replacements = [] - - while stack: - current, parent, child_index = stack.pop() - - # This node is being visited for the first time - if current not in visited_set: - visited_set.add(current) - - # If it's a terminal node, skip it - if isinstance(current, Terminal): - continue - - # Process non-terminal nodes in reverse order to maintain the depth-first order - for i in reversed(range(len(current.values))): - value = current.values[i] - if isinstance(value, ModelVariable): - if allowed_vars is not None and value.name not in allowed_vars: - raise Exception(f"Invalid model variable name: {value.name}") - # Replace the ModelVariable with its value from 'model' (or the tokenizer if model does not have it) - # note we skip over attrs we don't have since we may be run twice, once on the model and once for the engine - if hasattr(model, value.name): - obj = model - elif hasattr(model, "tokenizer") and hasattr(model.tokenizer, value.name): - obj = model.tokenizer - else: - obj = None - if obj is not None: - replacement_value = _wrap_as_grammar(getattr(obj, value.name)) - if value.commit_point: - replacement_value = commit_point(replacement_value, hidden=value.hidden) - replacements.append((current, i, value)) # Record the replacement - current.values[i] = replacement_value # Perform the replacement - else: - # If not ModelVariable, push onto the stack to process later - stack.append((value, current, i)) - - return replacements - -# def replace_model_variables(grammar, model, visited_set={}): -# '''Replace all the ModelVariable nodes with their values.''' - -# # see if we have already visited this node -# if grammar in visited_set: -# return [] -# else: -# visited_set[grammar] = True - -# # we are done if this is a terminal -# if isinstance(grammar, Terminal): -# return [] - -# # replace all matching sub-nodes -# replacements = [] -# for i,value in enumerate(grammar.values): -# if isinstance(value, ModelVariable): -# g = _wrap_as_grammar(getattr(model, value.name)) -# if value.commit_point: -# g = commit_point(g, hidden=value.hidden) -# replacements.append((grammar, i, value)) -# grammar.values[i] = g -# else: -# replacements.extend(replace_model_variables(value, model, visited_set)) -# return replacements - -def unreplace_model_variables(replacements): - '''This restores a grammar back to its original state, ready for another execution.''' - for grammar,i,orig_value in replacements: - grammar.values[i] = orig_value - -def _wrap_as_grammar(value): - '''This takes whatever value was given and tries to turn in into a guidance grammar.''' - - # if it is already a valid grammar we have no need to wrap it - if isinstance(value, GrammarFunction): - return value - - # if it is already a valid grammar we have no need to wrap it - if value is None: - return Null() - - # we have a constant value - if isinstance(value, (str, bytes)): - return string(value) - - raise Exception("Can't wrap as a grammar!") - -def commit_point(value, hidden=False): - '''Force the grammar to commit to a parse that includes this node once it can. - - Not that commit point nodes can be optionally hidden (in fact they are the only - nodes that can be hidden since they are by definition not impacted by multiple possible - inconsistent parses.)''' - # TODO: assert that value is not empty since we don't yet support that - if isinstance(value, str): - value = string(value) - if isinstance(value, Terminal): - value = Join([value]) # commit points should be full nodes (otherwise we can't hide them) TODO: decide if we want to do this even for non-hidden commit points - value.commit_point = True - if hidden: - _rec_hide(value) - return value - -def _rec_hide(grammar): - if not grammar.hidden: - grammar.hidden = True - if hasattr(grammar, "values"): - for g in grammar.values: - _rec_hide(g) - -class Placeholder(GrammarFunction): - __slots__ = tuple("nullable") - def __init__(self): - self.nullable = False - - -class Join(GrammarFunction): - __slots__ = ("nullable", "values", "name", "hidden", "commit_point", "capture_name", "max_tokens") - - def __init__(self, values, name: Union[str, None]=None, max_tokens=100000000) -> None: - values = [string(v) if isinstance(v, (str, bytes)) else v for v in values] # wrap raw strings - self.nullable = all(getattr(v, "nullable", False) for v in values) - self.values = [v for v in values if not isinstance(v, Null)] - self.name = name if name is not None else GrammarFunction._new_name() - self.hidden = False - self.commit_point = False - self.capture_name = None - self.max_tokens = max_tokens - - def __repr__(self, indent="", done=None): - if done is None: - done = set() - s = self.name.ljust(20) + " <- " + " ".join([v.name for v in self.values]) - s += " " + ("hidden " if self.hidden else "") + ("commit_point " if self.commit_point else "") + (f"capture_name={self.capture_name} " if self.capture_name else "") + (f"max_tokens={self.max_tokens}" if self.max_tokens < 100000 else "") +"\n" - done.add(self) - for v in self.values: - if v not in done and (isinstance(v, Join) or isinstance(v, Select)): - s += v.__repr__(indent, done) - return s - - def _to_proto(self, index_map): - data = _serialization_pb2.Join() - data.nullable = self.nullable - for v in self.values: - data.values.append(index_map[v]) - data.name = self.name - data.hidden = self.hidden - data.commit_point = self.commit_point - data.capture_name = "" if self.capture_name is None else self.capture_name - data.max_tokens = self.max_tokens - return data - - @staticmethod - def _from_proto(data): - out = Join( - data.values, # we put ints in that will be replaced later by the deserialize method - name=data.name, - max_tokens=data.max_tokens - ) - out.nullable = data.nullable - out.hidden = data.hidden - out.commit_point = data.commit_point - out.capture_name = None if data.capture_name == "" else data.capture_name - return out - - -class Select(GrammarFunction): - __slots__ = ("nullable", "_values", "name", "hidden", "commit_point", "capture_name", "max_tokens", "recursive") - - def __init__(self, values, capture_name=None, name=None, max_tokens=10000000, recursive=False) -> None: - self.values = values - self.name = name if name is not None else GrammarFunction._new_name() - self.hidden = False - self.commit_point = False - self.capture_name = capture_name - self.max_tokens = max_tokens - self.recursive = recursive - - @property - def values(self): - return self._values - @values.setter - def values(self, vals): - self._values = [string(v) if isinstance(v, (str, bytes)) else v for v in vals] - self.nullable = any(getattr(v, "nullable", False) for v in self._values) - self._values = [v for v in self._values if not isinstance(v, Null)] - - def __repr__(self, indent="", done=None): - if done is None: - done = set() - s = self.name.ljust(20) + " <- " + " | ".join([v.name for v in self.values]) - s += " " + ("hidden " if self.hidden else "") + ("commit_point " if self.commit_point else "") + (f"max_tokens={self.max_tokens}" if self.max_tokens < 100000 else "") +"\n" - done.add(self) - for v in self.values: - if v not in done and (isinstance(v, Join) or isinstance(v, Select)): - s += v.__repr__(indent, done) - return s - - def _to_proto(self, index_map): - data = _serialization_pb2.Select() - data.nullable = self.nullable - for v in self.values: - data.values.append(index_map[v]) - data.name = self.name - data.hidden = self.hidden - data.commit_point = self.commit_point - data.capture_name = "" if self.capture_name is None else self.capture_name - data.max_tokens = self.max_tokens - data.recursive = self.recursive - - return data - - @staticmethod - def _from_proto(data): - out = Select( - data.values, # we put ints in that will be replaced later by the deserialize method - name=data.name, - max_tokens=data.max_tokens - ) - out.nullable = data.nullable - out.hidden = data.hidden - out.commit_point = data.commit_point - out.capture_name = None if data.capture_name == "" else data.capture_name - out.recursive = data.recursive - return out - -def string(value) -> Union[str, bytes, Null, Byte, Join]: - if isinstance(value, str): - b = bytes(value, encoding="utf8") - elif isinstance(value, bytes): - b = value - else: - raise Exception("Must pass bytes or str to the string() function!") - if len(value) == 0: - return Null() - elif len(b) == 1: - return Byte(b) - else: - return Join([Byte(b[i:i+1]) for i in range(len(b))], name=str(b)) - -def select(options: List[_T], name=None, list_append=False, recurse=False, skip_checks=False) -> Union[Select, _T]: - # TODO: allow for returning the probabilites of the selected item - # TODO: also the full probabilites distribution over all items. We can implement this using the prob of the selected item by repeating the call, removing the selected item each time - if not skip_checks: - for i, value in enumerate(options): - assert not isinstance(value, RawFunction), "You cannot select between stateful functions in the current guidance implementation!" - assert not isinstance(value, types.FunctionType), "Did you pass a function without calling it to select? You need to pass the results of a called guidance function to select." - if isinstance(value, int) or isinstance(value, float): - options[i] = str(value) - - # set up list append var saving if requested - if list_append: - name = "__LIST_APPEND:" + name - - if recurse: - node = Select([], capture_name=name, recursive=True) - node.values = [node + v for v in options if v != ""] + options - return node - else: - if len(options) == 1 and name is None: - return options[0] - else: - return Select(options, capture_name=name, recursive=False) - -def byte_range(low, high) -> ByteRange: - return ByteRange(low + high) - -# def ignore_placeholders(value): -# if not isinstance(value, Join): # don't double wrap -# value = Join([value]) # this ensures we capture what we want, and not something surprisingly self_recursive -# value.ignore_placeholders = True -# return value - -def capture(value, name): - # if log_probs: - # name += ":__LOG_PROBS" - if not (isinstance(value, Join) and len(value.values) == 1): # don't double wrap - value = Join([value]) # this ensures we capture what we want, and not something surprisingly self_recursive - value.capture_name = name - return value - -def token_limit(value, max_tokens: int): - _rec_token_limit(value, max_tokens) - return value - -def _rec_token_limit(grammar, max_tokens: int): - if grammar.max_tokens > max_tokens and not isinstance(grammar, Terminal): - if getattr(grammar, "recursive", False): # only restrict recursive selects, otherwise we would block all ways to complete the grammar - grammar.max_tokens = max_tokens - for value in getattr(grammar, "values", []): # restrict recursive selects recursive nodes - if not isinstance(value, Terminal): - value.max_tokens = max_tokens - if hasattr(grammar, "values"): - for g in grammar.values: - _rec_token_limit(g, max_tokens) - -def with_temperature(value, temperature): - '''This sets the sampling temperature to be used for the given portion of the grammar. - - Note that if the grammar passed to us already has some portions with a temperature - setting in place, those settings will not be overridden. - ''' - _re_with_temperature(value, temperature, {}) - return value - -def _re_with_temperature(grammar, temperature, visited_set): - - # don't go down the same path twice - if grammar in visited_set: - return - visited_set[grammar] = True - - # if getattr(grammar, "temperature", 100000000) > temperature: - if isinstance(grammar, Terminal) and grammar.temperature < 0: # only need to set temp for terminals - grammar.temperature = temperature - elif getattr(grammar, "temperature", 100000000) > temperature and hasattr(grammar, "values"): - for g in grammar.values: - _re_with_temperature(g, temperature, visited_set) - -# def model_variable(name): -# return ModelVariable(name) - -def active_role_end() -> ModelVariable: - return ModelVariable('active_role_end') - -def eos_token() -> ModelVariable: - return ModelVariable('eos_token') - -def bos_token() -> ModelVariable: - return ModelVariable('bos_token') - -_null_grammar = string('') -# def char_range(low, high): -# low_bytes = bytes(low, encoding="utf8") -# high_bytes = bytes(high, encoding="utf8") -# if len(low_bytes) > 1 or len(high_bytes) > 1: -# raise Exception("We don't yet support multi-byte character ranges!") -# return ByteRange(low_bytes + high_bytes) -def str_to_grammar(value: str): - is_id = False - parts = re.split(_tag_pattern, value) - - # we have no embedded objects - if len(parts) == 1: - return string(value) - - # if we have embedded objects we have to convert the string to a grammar tree - else: - partial_grammar = _null_grammar - # lm.suffix = "" - for i,part in enumerate(parts): - # if i < len(parts) - 1: - # lm.suffix = parts[i+1] - if is_id: - call = _call_pool[part] - if isinstance(call, GrammarFunction): - partial_grammar += _call_pool[part] - else: - partial_grammar = RawFunction(lambda lm, g, call: call(lm + g), partial_grammar, _call_pool[part]) - # lm += partial_grammar - # lm = _call_pool[part](lm) - # partial_grammar = _null_grammar - elif part != "": - partial_grammar += string(part) - is_id = not is_id - return partial_grammar \ No newline at end of file diff --git a/controllers/guidance_ctrl/Lib/guidance/_json_schema_to_grammar.py b/controllers/guidance_ctrl/Lib/guidance/_json_schema_to_grammar.py deleted file mode 100644 index 2ca0d724..00000000 --- a/controllers/guidance_ctrl/Lib/guidance/_json_schema_to_grammar.py +++ /dev/null @@ -1,123 +0,0 @@ -import json -from typing import Dict - -from ._grammar import Byte, GrammarFunction, Join, Select, select -from .library._char_range import char_range - -_QUOTE = Byte(b'"') -_SAFE_STRING = select( - [ - char_range("a", "z"), - char_range("A", "Z"), - char_range("0", "9"), - *[c for c in "-_' ,.!?/[]{}():;"], - "\\n", - "\\t", - "\\\\", - ], - recurse=True, -) -_OPEN_BRACE = Byte(b"{") -_CLOSE_BRACE = Byte(b"}") -_OPEN_BRACKET = Byte(b"[") -_CLOSE_BRACKET = Byte(b"]") -_COMMA = Byte(b",") -_COLON = Byte(b":") - - -def _make_optional(f: GrammarFunction) -> GrammarFunction: - return select(["", f]) - - -def _process_int() -> GrammarFunction: - return Join([select(["-", ""]), select([char_range("0", "9")], recurse=True)]) - - -def _process_number() -> GrammarFunction: - mantissa_int = _process_int() - mantissa_frac = _make_optional( - Join([Byte(b"."), select([char_range("0", "9")], recurse=True)]) - ) - exponent = _make_optional( - Join( - [ - "e", - # Since the exponent can contain a '+', can't just reuse - # _process_int() here - select(["", "-", "+"]), - select([char_range("0", "9")], recurse=True), - ] - ) - ) - return Join( - [ - mantissa_int, - mantissa_frac, - exponent, - ], - ) - - -def _process_object(schema_properties: Dict[str, any]) -> GrammarFunction: - properties = [] - for name, nxt_node in schema_properties.items(): - nxt = Join( - [ - Join([_QUOTE, name, _QUOTE]), - _COLON, - _process_node(nxt_node), - _COMMA if len(properties) + 1 < len(schema_properties) else "", - ] - ) - properties.append(nxt) - return Join([_OPEN_BRACE, *properties, _CLOSE_BRACE]) - - -def _process_array(item_node: Dict[str, any]) -> GrammarFunction: - return Join( - [ - _OPEN_BRACKET, - _make_optional( - # One or more items - Join( - [ - select( - ["", Join([_process_node(item_node), _COMMA])], - recurse=True, - ), - _process_node(item_node), - ] - ) - ), - _CLOSE_BRACKET, - ] - ) - - -def _process_node(node: Dict[str, any]) -> GrammarFunction: - if node["type"] == "null": - # Not completely sure about this - return Select(["null"]) - elif node["type"] == "string": - return Join([_QUOTE, _SAFE_STRING, _QUOTE]) - elif node["type"] == "boolean": - return select(["true", "false"]) - elif node["type"] == "integer": - return _process_int() - elif node["type"] == "number": - return _process_number() - elif node["type"] == "object": - return _process_object(node["properties"]) - elif node["type"] == "array": - item_node = dict(type=node["items"]["type"]) - if item_node["type"] == "object": - item_node["properties"] = node["items"]["properties"] - return _process_array(item_node) - else: - raise ValueError(f"Unsupported type in schema: {node['type']}") - - -def json_schema_to_grammar(schema: str) -> GrammarFunction: - schema_obj = json.loads(schema) - - return _process_node(schema_obj) diff --git a/controllers/guidance_ctrl/Lib/guidance/_parser.py b/controllers/guidance_ctrl/Lib/guidance/_parser.py deleted file mode 100644 index 5d9704e6..00000000 --- a/controllers/guidance_ctrl/Lib/guidance/_parser.py +++ /dev/null @@ -1,545 +0,0 @@ -from sys import stderr -import numpy as np -from ordered_set import OrderedSet -from ._grammar import Join, Select, Terminal, Null, Byte, ByteRange - - -class ParserException(Exception): - def __init__(self, *args, **kwargs): - self.current_byte = kwargs.pop("current_byte", None) - self.allowed_bytes = kwargs.pop("allowed_bytes", None) - super().__init__(*args, **kwargs) - - -class EarleyItem: - __slots__ = ("node", "values", "start", "pos", "log_prob", "children", "hidden_start") - - def __init__(self, node, values, pos, start, log_prob, hidden_start): - self.node = node - self.values = values - self.start = start - self.pos = pos - self.log_prob = log_prob - self.children = None - self.hidden_start = hidden_start - - def __eq__(self, other): - return isinstance(other, EarleyItem) and \ - self.start == other.start and \ - self.pos == other.pos and \ - self.node == other.node and \ - self.values == other.values and \ - self.log_prob == other.log_prob - - def __hash__(self): - return hash((self.node, self.values, self.start, self.pos)) - - def __repr__(self): - if isinstance(self.node, Join): - s = f"{self.node.name:20} -> " - rs = "" - for i,v in enumerate(self.values): - if self.pos == i: - rs += "•" - rs += v.name + " " - if self.pos == len(self.values): - rs += "•" - elif isinstance(self.node, Select): - s = f"{self.node.name:20} -> " - rs = "" - if self.pos == 0: - rs += "•" - rs += self.values[0].name - if self.pos == 1: - rs += "•" - else: - assert False - return s + f"{rs:40} ({self.start}) {'nullable' if self.node.nullable else ''}" - -class Parser: - '''An abstract base class for guidance parsers.''' - pass - -class EarleyCommitParser(Parser): - def __init__(self, grammar): - - # we can't have a terminal as the root - if isinstance(grammar, Terminal): - grammar = Join([grammar]) - - self.grammar = grammar - self.bytes = b'' - self.state_sets = [OrderedSet()] # the list of Earley items for each byte - self.token_counts = [] # used to track how many tokens have been used - self.state_set_pos = 0 - self.shadow_pos = 0 - self._add_node(self.grammar, 0, 0.0, 1000000000) - self._inner_loop(self.state_set_pos) - - @property - def pos(self): - return self.shadow_pos - @pos.setter - def pos(self, new_pos): - - # do nothing if we aren't moving - if new_pos == self.state_set_pos: - return - elif new_pos > self.state_set_pos: - raise ParserException("Can't move the parser position forward! (only backward)") - - # check if we are just moving the shadow position - if new_pos >= self.shadow_pos: - self.shadow_pos = new_pos - return - - # actually reset our position if we need to - self.state_sets = self.state_sets[:new_pos+1] + [OrderedSet()] - self.token_counts = self.token_counts[:new_pos+2] - self.bytes = self.bytes[:new_pos] - self.state_set_pos = new_pos - self.shadow_pos = new_pos - self._inner_loop(self.state_set_pos) - - def _add_item(self, state_set_pos, new_item): - state_set = self.state_sets[state_set_pos] - if new_item not in state_set: - state_set.append(new_item) - else: - existing_item = state_set.items[state_set.map[new_item]] - existing_item.hidden_start = min(existing_item.hidden_start, new_item.hidden_start) - - def _add_node(self, grammar, state_set_pos, log_prob, hidden_start): - if isinstance(grammar, Terminal): - new_item = EarleyItem(grammar, tuple(), 0, state_set_pos, log_prob, hidden_start) - self._add_item(state_set_pos, new_item) - - elif isinstance(grammar, Join): - new_item = EarleyItem(grammar, tuple(grammar.values), 0, state_set_pos, log_prob, hidden_start) - self._add_item(state_set_pos, new_item) - - elif isinstance(grammar, Select): - for value in grammar.values: - new_item = EarleyItem(grammar, (value,), 0, state_set_pos, log_prob, hidden_start) - self._add_item(state_set_pos, new_item) - - def _inner_loop(self, state_set_pos, start_pos=0): - curr_state_set = self.state_sets[state_set_pos] - if len(self.state_sets) == state_set_pos + 1: - self.state_sets.append(OrderedSet()) - self.token_counts.append(self.token_counts[-1] if len(self.token_counts) > 0 else 0) - next_state_set = self.state_sets[state_set_pos + 1] - pos = start_pos - while len(curr_state_set) > pos: - item = curr_state_set[pos] - - # completion - if item.pos == len(item.values): - - # if we complete an item that is a "commit point" then we eliminate all other possible - # parses so that we are "committed" to using this item - # we do this by removing any unprocessed items in the current state set and clearing the next state set - if item.node.commit_point: - while len(curr_state_set) > pos: - - # if we find another valid commit point that starts earlier we use that instead - # this causes us to pick the longest matching valid commit point - end_item = curr_state_set[-1] - if end_item.node.commit_point and end_item.pos == len(end_item.values) and end_item.start < item.start: - item = end_item - - curr_state_set.pop() - curr_state_set.append(item) # we append the current item again (we do this since we may have swapped it out above) - next_state_set.clear() - - # advance all the parents that our completion impacts - token_span = self.token_counts[state_set_pos] - self.token_counts[item.start] - start_state_set = self.state_sets[item.start] - for start_item in start_state_set: - if start_item.pos < len(start_item.values) and start_item.values[start_item.pos] == item.node: - - # if item.node.max_tokens <= token_span and any(start_item.node == v and len(v.values) > 1 for v in item.node.values): - # continue # skip advancing parents that are also children (recursion) once we are past the token limit - - curr_state_set.append(EarleyItem( - start_item.node, - start_item.values, - start_item.pos + 1, - start_item.start, - start_item.log_prob + item.log_prob, # increment the log prob by the child value, - start_item.hidden_start - )) - - # don't advance past our max token limit - elif item.node.max_tokens > self.token_counts[state_set_pos] - self.token_counts[item.start]: - - # scan (note we only scan forward when we have more max token headroom left) - next_item_node = item.values[item.pos] - hidden_start = item.hidden_start - if next_item_node.hidden: - hidden_start = min(state_set_pos, hidden_start) - if isinstance(next_item_node, Terminal):# and item.node.max_tokens > self.token_counts[state_set_pos] - self.token_counts[item.start]: - next_state_set.append(EarleyItem(item.node, item.values, item.pos + 1, item.start, item.log_prob, hidden_start)) # the log prob will get incremented when consume_bytes is called - - # prediction - else: - self._add_node(next_item_node, state_set_pos, 0.0, hidden_start) # the log probs will get incremented by children later - - # handle nullable items by advancing them automatically (since we know we can) - if next_item_node.nullable: - new_item = EarleyItem(item.node, item.values, item.pos + 1, item.start, item.log_prob, item.hidden_start) - if new_item not in self.state_sets[state_set_pos]: - self.state_sets[state_set_pos].append(new_item) - pos += 1 - - def earliest_hidden_start(self, state_pos=None): - '''The earliest that a hidden node might match. - - This is useful because it tells us which bytes may end being hidden. - ''' - if state_pos is None: - state_pos = self.state_set_pos - earliest_pos = 10000000000 - for item in self.state_sets[state_pos]: - earliest_pos = min(earliest_pos, item.hidden_start) - return earliest_pos - - def matched(self): - '''Checks if the parser has completely matched the grammar.''' - if self.shadow_pos != self.state_set_pos: - return False - for item in self.state_sets[self.state_set_pos]: - if item.node == self.grammar and item.pos == len(item.values): - return True - return False - - def shadow_rewind(self, new_pos): - if new_pos == self.state_set_pos: - return - self.shadow_pos = new_pos - - def commit_and_collapse_item(self, item): - '''This collapses the item into zero width and rewinds the parser position accordingly. - - Note we assume the item is in the current state set. - ''' - - # trim off the state sets that matches this item - self.state_sets = self.state_sets[:item.start + 1] - self.token_counts = self.token_counts[:item.start + 1] - self.bytes = self.bytes[:item.start] - self.state_set_pos = item.start - self.shadow_pos = item.start - - # add this state to its start point (making it a zero length match with no values) - self.state_sets[item.start].append(EarleyItem(item.node, tuple(), 0, item.start, item.log_prob, item.hidden_start)) - - # expand from this state - self._inner_loop(item.start, len(self.state_sets[item.start]) - 1) - - def mark_new_token(self): - # TODO: we allow ourselves to go one past our max token limit when we hit a one-byte token - # because we don't know if we are continuing or extending a new token when we parse - # the first byte of the token. We could fix this by rerunning the inner_loop after each - # token, but we skip that for now since max_tokens is not a hard garuntee anyway when you - # have patterns. - - self.token_counts[-1] += 1 - - def consume_byte(self, byte, log_prob=0.0): - '''Advances the parser by the given byte.''' - - # see if we need to advance our shadow position... - if self.shadow_pos < self.state_set_pos: - assert byte == self.bytes[self.shadow_pos:self.shadow_pos+1], "Attempted to consume a byte by advancing shadow_pos but the byte didn't match!" - self.shadow_pos += 1 - return - - # ...if not, we extend our bytes - self.bytes += byte - - # filter out all the extensions that don't match this byte - new_next_state_set = [] - found_valid = False - found_invalid = False - hidden_start = 10000000000 - for item in self.state_sets[self.state_set_pos + 1]: - token_span = self.token_counts[-1] - self.token_counts[item.start] - if item.node.max_tokens <= token_span: - found_invalid = True - continue - elif item.pos > 0 and isinstance(item.values[item.pos - 1], Terminal): - last_inner_node = item.values[item.pos - 1] - if not last_inner_node.match_byte(byte): - found_invalid = True - continue - else: - found_valid = True - if last_inner_node.commit_point: - item.log_prob += log_prob - new_next_state_set = [item] - hidden_start = min(hidden_start, item.hidden_start) - found_invalid = True # we make everything else invalid, so that means we found something invalid - break - item.log_prob += log_prob # update the probability of the item by the probability of choosing this byte - new_next_state_set.append(item) - hidden_start = min(hidden_start, item.hidden_start) - if not found_valid: - raise ParserException("Attempted to consume a byte that the grammar does not accept!",current_byte=byte) - if found_invalid: # only update if we changed the set - self.state_sets[self.state_set_pos + 1] = OrderedSet(new_next_state_set) - - # advance the parser one position - self.state_set_pos += 1 - self.shadow_pos += 1 - self._inner_loop(self.state_set_pos) - - # look for a commit point node - commit_point = None - for item in self.state_sets[self.state_set_pos]: - if item.node.commit_point and item.pos == len(item.values) or (item.pos > 0 and item.values[item.pos-1].commit_point): - commit_point = item - break # TODO: consider how we might need to prioritize multiple commit point nodes (an uncommon scenario I think) - # hidden_start, - return commit_point - - def valid_next_bytes(self): - '''A list of Byte and ByteRange objects representing the next valid bytes.''' - valid_items = set() - next_state_set = self.state_sets[self.state_set_pos + 1] - for item in next_state_set: - token_span = self.token_counts[-1] - self.token_counts[item.start] - if item.node.max_tokens <= token_span: - continue - elif item.pos > 0 and isinstance(item.values[item.pos - 1], Terminal): - v = item.values[item.pos - 1] - if v not in valid_items: - valid_items.add(v) - return valid_items - - def next_byte_temperature(self): - '''The maximum temperature over all the next bytes, or -1 if no temperature is set.''' - max_temp = -1 - next_state_set = self.state_sets[self.state_set_pos + 1] - for item in next_state_set: - if item.pos > 0 and isinstance(item.values[item.pos - 1], Terminal): - v = item.values[item.pos - 1] - max_temp = max(max_temp, v.temperature) - return max_temp - - def next_byte_mask(self): - '''A mask version of the `valid_next_bytes` method.''' - - mask = np.zeros(256, dtype=bool) - - # if we are shadow rewound then we just force those bytes again - if self.shadow_pos < self.state_set_pos: - mask[self.bytes[self.shadow_pos]] = True - - # otherwise we compute the valid bytes from the grammar - else: - valid_items = self.valid_next_bytes() - for item in valid_items: - if isinstance(item, Byte): - mask[item.byte[0]] = True - elif isinstance(item, ByteRange): - mask[item.byte_range[0]:item.byte_range[1]+1] = True - else: - raise ParserException("Unknown Terminal Type: " + str(type(item)), ) - return mask - - def __repr__(self, state_sets=None) -> str: - s = "" - if state_sets is None: - state_sets = self.state_sets - for i,states in enumerate(state_sets): - s += f"\n=== {i} ===" - if self.state_set_pos == i: - s += " (state_set_pos)" - s += "\n" - for state in states: - if isinstance(state.node, Join): - s += f"{state.node.name:20} -> " - rs = "" - for i,v in enumerate(state.values): - if state.pos == i: - rs += "•" - rs += v.name + " " - if state.pos == len(state.values): - rs += "•" - elif isinstance(state.node, Select): - s += f"{state.node.name:20} -> " - rs = "" - if state.pos == 0: - rs += "•" - if len(state.values) == 0: - rs += "NO_VALUES!" - else: - rs += state.values[0].name - if state.pos == 1: - rs += "•" - else: - assert False - s += f"{rs:40} ({state.start}) {'nullable' if state.node.nullable else ''}\n" - return s - - def _reversed_state_sets(self): - new_state_sets = [OrderedSet([]) for _ in range(len(self.state_sets))] - for i,states in enumerate(self.state_sets): - for state in states: - # if state.node.name == "__call___c": - # pass - new_state_sets[state.start].append(EarleyItem(state.node, state.values, state.pos, i, state.log_prob, state.hidden_start)) - - return new_state_sets - - def parse_tree(self): - reversed_state_sets = self._reversed_state_sets() - root_item = None - - # find the matching root state - for item in reversed_state_sets[0]: - if item.node == self.grammar and item.start == len(self.bytes) and item.pos == len(item.values): # note that ".start" mean end because items are reversed - root_item = item - if root_item is None: - return None - self._compute_parse_tree(0, root_item, reversed_state_sets) - return root_item - - def get_captures(self, data=None, log_prob_data=None): - root_node = self.parse_tree() - if data is None: - data = {} - if log_prob_data is None: - log_prob_data = {} - if root_node is not None: - # parse complete, so we can get the captures - self._record_captures_from_root(root_node, data, log_prob_data) - return data, log_prob_data - # compute on partially parsed tree - self._record_captures_partial(data, log_prob_data) - return data, log_prob_data - - def _record_captures_partial(self, data, log_prob_data): - byte_data = self.bytes - - for item in self.state_sets[self.state_set_pos]: - cname = item.node.capture_name - if cname is None: - continue - captured_value = byte_data[item.start:self.earliest_hidden_start()] - if captured_value.endswith(b'<'): - print("WARNING: Captured value ends with '<' which is a special character in the parser!", file=stderr) - data[cname] = captured_value - log_prob_data[cname] = item.log_prob - - def _record_captures_from_root(self, initial_item, data, log_prob_data): - byte_data = self.bytes - stack = [(initial_item, 0)] - used_names = set() # track which capture names have been used so self-recursive children don't overwrite their parents - - while stack: - item, byte_pos = stack.pop() - # terminal nodes - if isinstance(item, Terminal): - - # if we are at a capture group node then we save the matched terminal byte - if item.capture_name is not None: - data[item.capture_name] = item.byte - log_prob_data[item.capture_name] = 0 - - # internal nodes - else: - start_byte_pos = byte_pos - - # recurse for all our non-null children - for child in item.children: - if child is not None: - stack.append((child, byte_pos)) - # _record_captures(child, data, log_prob_data, byte_data, byte_pos) - if isinstance(child, Terminal): - byte_pos += len(child) - else: - byte_pos = child.start # note that "start" means "end" since this is a reversed state set - - # if we are at a capture group node then we save the matched bytes range - # note that we record this after calling our children so that we save the outermost version of self-recursive calls - cname = item.node.capture_name - if cname is not None and cname not in used_names and not item.node.hidden: - - # see if we are doing a list append - if cname.startswith("__LIST_APPEND:"): - cname = cname[14:] # trim off the list append tag - if cname not in data or not isinstance(data[cname], list): - data[cname] = [] - log_prob_data[cname] = [] - data[cname].append(byte_data[start_byte_pos:item.start]) - log_prob_data[cname].append(item.log_prob) - - # or just a regular assignment - else: - data[cname] = byte_data[start_byte_pos:item.start] # note that "start" means "end" since this is a reversed state set - log_prob_data[cname] = item.log_prob - - used_names.add(cname) - - def _compute_parse_tree(self, initial_pos, initial_item, reversed_state_sets): - stack = [(initial_pos, initial_item)] - - while stack: - pos, item = stack.pop() - - # compute the children for this item - assert self._compute_children(pos, item, reversed_state_sets) - - # recurse on the children - for child in item.children: - if child is None: - pass # this child was nullable and was chosen to be null (empty) - elif isinstance(child, Terminal): - pos += len(child) - else: - stack.append((pos, child)) - pos = child.start # note that ".start" mean end because items are reversed - - def _compute_children(self, state_set_pos, item, reversed_state_sets, values_pos = 0): - - # ensure we have a children array - if item.children is None: - item.children = [None for _ in range(len(item.values))] - - # consume as many terminal children as possible - while True: - - # if we are at the end of the values then there no more children and we see if we consumed all the right bytes - if values_pos == len(item.values): - return state_set_pos == item.start # note that ".start" mean end because items are reversed - - # get the child we are trying to match (meaning we are looking for completed early items for this node) - value = item.values[values_pos] - - # if we have a terminal node we can jump forward that many bytes - if isinstance(value, Terminal): - item.children[values_pos] = value - values_pos += 1 - state_set_pos += len(value) - else: - break - - # otherwise we need to try all possible next matching items in the current state set - # so we loop over every item in the current state set looking for a completed match - for inner_item in reversed_state_sets[state_set_pos]: - if inner_item.node == value and inner_item.pos == len(inner_item.values): - - # see if we can get a complete parse following this inner item - if self._compute_children(inner_item.start, item, reversed_state_sets, values_pos + 1): - item.children[values_pos] = inner_item - return True - - # if we didn't find a child set and this is nullable we can skip this child (since it may not exist if nulled) - if value.nullable: - if self._compute_children(state_set_pos, item, reversed_state_sets, values_pos + 1): - item.children[values_pos] = None # this child was skipped since it was nullable - return True - - return False diff --git a/controllers/guidance_ctrl/Lib/guidance/_server.py b/controllers/guidance_ctrl/Lib/guidance/_server.py deleted file mode 100644 index 936f4eb5..00000000 --- a/controllers/guidance_ctrl/Lib/guidance/_server.py +++ /dev/null @@ -1,89 +0,0 @@ -from fastapi import FastAPI, Request, HTTPException, Security -from fastapi.security import APIKeyHeader -from fastapi.responses import StreamingResponse -import os # For environment variables or config files -import base64 - -from .models._model import Model, Engine -from ._grammar import GrammarFunction - -from pydantic import BaseModel, Field - - -class GuidanceRequest(BaseModel): - parser: str = Field( - title="parser", description="The text generated so far by the guidance program" - ) - grammar: str = Field( - title="grammar", - description="Guidance grammar to constrain the next characters generated", - ) - - -class Server: - def __init__(self, engine, api_key=None, ssl_certfile=None, ssl_keyfile=None): - """This exposes an Engine object over the network.""" - - if isinstance(engine, Model): - engine = engine.engine - elif not isinstance(engine, Engine): - raise TypeError("engine must be an Engine object") - self.engine = engine - self.app = FastAPI() - self.valid_api_keys = self._load_api_keys(api_key) - if ssl_certfile is None: - ssl_certfile = os.getenv("GUIDANCE_SSL_CERTFILE") - if ssl_keyfile is None: - ssl_keyfile = os.getenv("GUIDANCE_SSL_KEYFILE") - self.ssl_certfile = ssl_certfile - self.ssl_keyfile = ssl_keyfile - - api_key_header = APIKeyHeader(name="x-api-key", auto_error=False) - - # def get_api_key(api_key_header: str = Security(api_key_header)) -> str: - # if api_key_header in self.valid_api_keys: - # return api_key_header - # raise HTTPException( - # status_code=status.HTTP_401_UNAUTHORIZED, - # detail="Invalid or missing API Key", - # ) - - @self.app.post("/extend") - async def extend_parser( - guidance_request: GuidanceRequest, x_api_key: str = Security(api_key_header) - ): - if x_api_key not in self.valid_api_keys: - raise HTTPException(status_code=401, detail="Invalid API key") - - # data = await request.json() - # parser = data.get("parser") - grammar = GrammarFunction.deserialize( - base64.b64decode(guidance_request.grammar) - ) - - return StreamingResponse( - self.engine(guidance_request.parser, grammar), - media_type="application/json", - ) - - def _load_api_keys(self, api_key): - valid_api_keys = set() - if api_key is None: - api_key = os.getenv("GUIDANCE_API_KEY") - if api_key: - valid_api_keys.add(api_key) - else: - valid_api_keys.add(api_key) - return valid_api_keys - - def run(self, host="localhost", port=8000): - # Use uvicorn or another ASGI server to run - import uvicorn - - uvicorn.run( - self.app, - host=host, - port=port, - ssl_certfile=self.ssl_certfile, - ssl_keyfile=self.ssl_keyfile, - ) # use host="0.0.0.0" for remote access diff --git a/controllers/guidance_ctrl/Lib/guidance/_utils.py b/controllers/guidance_ctrl/Lib/guidance/_utils.py deleted file mode 100644 index a2d52d0a..00000000 --- a/controllers/guidance_ctrl/Lib/guidance/_utils.py +++ /dev/null @@ -1,222 +0,0 @@ -import os -import requests -import inspect -import json -import asyncio -import queue -import ast -import types -import textwrap -import sys -import numpy as np - -class _Rewrite(ast.NodeTransformer): - def visit_Constant(self, node): - if isinstance(node.value, str) and node.lineno < node.end_lineno: - self.start_counts[node.lineno-1] += 1 - start_line = self.source_lines[node.lineno-1] - start_string = start_line[node.col_offset:] - - # check for literal multiline strings - if start_string.startswith("f'''") or start_string.startswith("'''") or start_string.startswith('f"""') or start_string.startswith('"""'): - - # track our indentation level - if self.indentation[node.lineno-1] is None: - indent = start_line[:len(start_line) - len(start_line.lstrip())] - for i in range(node.lineno-1, node.end_lineno): - self.indentation[i] = indent - indent = self.indentation[node.lineno-1] - - # strip indentation when it is consistent - lines = node.value.split("\n") - fail = False - new_lines = [] - for i,line in enumerate(lines): - if (i == 0 and (self.start_counts[node.lineno-1] > 1 or not start_line.endswith("\\"))) or line == "": - new_lines.append(line) - elif line.startswith(indent): - new_lines.append(line[len(indent):]) - # elif (i == 0 and line.endswith("\\")) or line == "": - # new_lines.append(line) - else: - fail = True - break - if not fail: - node.value = "\n".join(new_lines) - - return node -class normalize_notebook_stdout_stderr(): - '''Remaps stdout and stderr back to their normal selves from what ipykernel did to them. - - Based on: https://github.com/ipython/ipykernel/issues/795 - ''' - - def __enter__(self): - normal_stdout = sys.__stdout__.fileno() - self.restore_stdout = None - if getattr(sys.stdout, "_original_stdstream_copy", normal_stdout) != normal_stdout: - self.restore_stdout = sys.stdout._original_stdstream_copy - sys.stdout._original_stdstream_copy = normal_stdout - - normal_stderr = sys.__stderr__.fileno() - self.restore_stderr = None - if getattr(sys.stderr, "_original_stdstream_copy", normal_stderr) != normal_stderr: - self.restore_stderr = sys.stderr._original_stdstream_copy - sys.stderr._original_stdstream_copy = normal_stderr - - def __exit__(self, exc_type, exc_value, traceback): - if self.restore_stdout is not None: - sys.stderr._original_stdstream_copy = self.restore_stdout - if self.restore_stderr is not None: - sys.stderr._original_stdstream_copy = self.restore_stderr - -def strip_multiline_string_indents(f): - - source = textwrap.dedent(inspect.getsource(f)) - blanks = '\n' * f.__code__.co_firstlineno # padd the source so the lines in the file line up for the debugger - source = blanks + '\n'.join(source.splitlines()[1:]) # remove the decorator first line. - - # define the external closure variables so f.__closure__ will match our recompiled version - if len(f.__code__.co_freevars) > 0: - raise Exception("You currently must use @guidance(dedent=False) for closure functions (function nested within other functions that reference the outer functions variables)!") - lines = source.split("\n") - lines[0] = "def __outer__closure_wrap():" - lines[1] = " " + ",".join(f.__code__.co_freevars) + " = " + ",".join("None" for _ in f.__code__.co_freevars) - source = " \n".join(lines) # TODO: this does not quite work because new_code_obj is now the __outer__closure_wrap() function...could be fixed with work... - - old_code_obj = f.__code__ - old_ast = ast.parse(source) - r = _Rewrite() - r.source_lines = source.split("\n") - r.indentation = [None for l in r.source_lines] - r.start_counts = [0 for l in r.source_lines] - # r._avoid_backslashes = True - new_ast = r.visit(old_ast) - new_code_obj = compile(new_ast, old_code_obj.co_filename, 'exec') - - # find the code block - for i in range(len(new_code_obj.co_consts)): - if str(type(new_code_obj.co_consts[i])) == "": - break - - # create a new function based on the modified code - new_f = types.FunctionType( - new_code_obj.co_consts[i], - f.__globals__, - name=f.__name__, - argdefs=f.__defaults__, - closure=f.__closure__ - ) - new_f.__kwdefaults__ = f.__kwdefaults__ - return new_f - -class CaptureEvents(): - """Creates a scope where all the events are captured in a queue. - - Note that this does not stop the events from being captured by higher level scopes. - """ - def __init__(self, lm): - self.lm = lm - - def __enter__(self): - self.lm._event_queue = queue.Queue() - return self.lm._event_queue - - def __exit__(self, type, value, traceback): - self.lm._event_queue = None - -class JupyterComm(): - def __init__(self, target_id, ipython_handle, callback=None, on_open=None, mode="register"): - from ipykernel.comm import Comm - - self.target_name = "guidance_interface_target_" + target_id - # print("TARGET NAME", self.target_name) - self.callback = callback - self.jcomm = None - self.ipython_handle = ipython_handle - self.addd = 1 - self.send_queue = asyncio.Queue() - self.open_event = asyncio.Event() - self.is_open = False - asyncio.get_event_loop().create_task(self._send_loop()) - if mode == "register": - # log("REGISTERING", self.target_name) - # asyncio.get_event_loop().create_task(self._register()) - def comm_opened(comm, open_msg): - # log("OPENED") - self.addd = 2 - self.jcomm = comm - self.is_open = True - self.jcomm.on_msg(self._fire_callback) - self.open_event.set() - self._fire_callback({"content": {"data": {"event": "opened"}}}) - - self.ipython_handle.kernel.comm_manager.register_target(self.target_name, comm_opened) - # get_ipython().kernel.comm_manager.register_target(self.target_name, comm_opened) # noqa: F821 - elif mode == "open": - # log("OPENING", self.target_name) - self.jcomm = Comm(target_name=self.target_name) - self.jcomm.on_msg(self._fire_callback) - # self._fire_callback({"content": {"data": "opened"}}) - else: - raise Exception("Passed mode must be either 'open' or 'register'!") - - def clear_send_queue(self): - while not self.send_queue.empty(): - self.send_queue.get_nowait() - self.send_queue.task_done() - - def _fire_callback(self, msg): - self.callback(msg["content"]["data"]) - - def send(self, data): - self.send_queue.put_nowait(data) - - async def _send_loop(self): - while True: - # log("SENDING_LOOP") - if self.jcomm is None: - self.open_event.clear() - await self.open_event.wait() - data = await self.send_queue.get() - # log("SENDING_LOOP got one!") - self.jcomm.send({"data": json.dumps(data)}) - - # async def _waiting_send(self, data): - # #log("SENDING", self.jcomm, data) - - # # await the open event if needed - # if self.jcomm is None: - # self.open_event.clear() - # await self.open_event.wait() - # #log("SENDING_now", self.jcomm, data) - # self.jcomm.send({"data": json.dumps(data)}) # we encode the JSON so iPython doesn't mess it up - - -# https://stackoverflow.com/questions/15411967/how-can-i-check-if-code-is-executed-in-the-ipython-notebook -def is_interactive(): - import __main__ as main - return not hasattr(main, '__file__') - - -def log_softmax(array: np.ndarray, axis: int = -1) -> np.ndarray: - # https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.log_softmax.html - array_maxs: np.ndarray = np.amax(array, axis=axis, keepdims=True) - if array_maxs.ndim > 0: - array_maxs[~np.isfinite(array_maxs)] = 0 - elif not np.isfinite(array_maxs): - array_maxs = 0 - subtract_maxs = array - array_maxs - exp = np.exp(subtract_maxs) - # suppress warnings about log of zero - with np.errstate(divide='ignore'): - summed = np.sum(exp, axis=axis, keepdims=True) - out = np.log(summed) - return subtract_maxs - out - - -def softmax(array: np.ndarray, axis: int = -1) -> np.ndarray: - # https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.softmax.html - array_maxs = np.amax(array, axis=axis, keepdims=True) - exp_x_shifted = np.exp(array - array_maxs) - return exp_x_shifted / np.sum(exp_x_shifted, axis=axis, keepdims=True) diff --git a/controllers/guidance_ctrl/Lib/guidance/library/__init__.py b/controllers/guidance_ctrl/Lib/guidance/library/__init__.py deleted file mode 100644 index 3afe63bf..00000000 --- a/controllers/guidance_ctrl/Lib/guidance/library/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -# import functions that can be called directly -from ._gen import gen, call_tool, will_gen -from ._image import image - -# core grammar functions -from .._grammar import select -from .._grammar import commit_point -from .._grammar import with_temperature -from .._grammar import string -from .._grammar import token_limit -from .._grammar import capture -from .._grammar import byte_range - -# context blocks -from ._block import block -from ._role import role, system, assistant, user, function, instruction, indent_roles -from ._format import monospace -from ._silent import silent -from ._set_var import set_var -from ._set_attribute import set_attribute -# from ..models._model import context_free - -# stateless library functions -from ._any_char import any_char -from ._zero_or_more import zero_or_more -from ._one_or_more import one_or_more -from ._char_range import char_range -from ._char_set import char_set -from ._prefix_tree import prefix_tree -from ._substring import substring -from ._regex import regex -from ._optional import optional -from ._tool import Tool -from ._any_char_but import any_char_but \ No newline at end of file diff --git a/controllers/guidance_ctrl/Lib/guidance/library/_any_char.py b/controllers/guidance_ctrl/Lib/guidance/library/_any_char.py deleted file mode 100644 index 1e928db8..00000000 --- a/controllers/guidance_ctrl/Lib/guidance/library/_any_char.py +++ /dev/null @@ -1,7 +0,0 @@ -import guidance -from .._grammar import byte_range - -@guidance(stateless=True) -def any_char(lm): - # TODO: extend this to support utf-8 encoded multibyte unicode characters - return lm + byte_range(b'\x00', b'\xff') \ No newline at end of file diff --git a/controllers/guidance_ctrl/Lib/guidance/library/_any_char_but.py b/controllers/guidance_ctrl/Lib/guidance/library/_any_char_but.py deleted file mode 100644 index 58ed097b..00000000 --- a/controllers/guidance_ctrl/Lib/guidance/library/_any_char_but.py +++ /dev/null @@ -1,21 +0,0 @@ -import guidance -from .._grammar import byte_range, select - -@guidance(stateless=True) -def any_char_but(lm, forbidden): - """Allows any char except those in forbidden""" - # TODO: extend this to support utf-8 encoded multibyte unicode characters - forb = sorted(set([ord(x) for x in forbidden])) - start = 0 - ranges = [] - for i in forb: - if i == 0: - continue - newrange = (start, i - 1) - if newrange[0] < newrange[1]: - ranges.append(newrange) - start = i + 1 - if start < 127: - ranges.append((start, 127)) - ranges = [(i.to_bytes(1, 'big'), j.to_bytes(1, 'big')) for i, j in ranges] - return select([byte_range(x[0], x[1]) for x in ranges]) \ No newline at end of file diff --git a/controllers/guidance_ctrl/Lib/guidance/library/_block.py b/controllers/guidance_ctrl/Lib/guidance/library/_block.py deleted file mode 100644 index c485af2a..00000000 --- a/controllers/guidance_ctrl/Lib/guidance/library/_block.py +++ /dev/null @@ -1,16 +0,0 @@ -from guidance import models - -class ContextBlock: - def __init__(self, opener, closer, name=None): - self.opener = opener - self.closer = closer - self.name = name - - def __enter__(self): - models.Model.open_blocks[self] = None - - def __exit__(self, exc_type, exc_value, traceback): - del models.Model.open_blocks[self] - -def block(name=None, opener="", closer=""): - return ContextBlock(opener, closer, name=name) \ No newline at end of file diff --git a/controllers/guidance_ctrl/Lib/guidance/library/_char_range.py b/controllers/guidance_ctrl/Lib/guidance/library/_char_range.py deleted file mode 100644 index 861412b1..00000000 --- a/controllers/guidance_ctrl/Lib/guidance/library/_char_range.py +++ /dev/null @@ -1,8 +0,0 @@ -from .._grammar import byte_range - -def char_range(low, high): - low_bytes = bytes(low, encoding="utf8") - high_bytes = bytes(high, encoding="utf8") - if len(low_bytes) > 1 or len(high_bytes) > 1: - raise Exception("We don't yet support multi-byte character ranges!") - return byte_range(low_bytes, high_bytes) \ No newline at end of file diff --git a/controllers/guidance_ctrl/Lib/guidance/library/_char_set.py b/controllers/guidance_ctrl/Lib/guidance/library/_char_set.py deleted file mode 100644 index b59d4f28..00000000 --- a/controllers/guidance_ctrl/Lib/guidance/library/_char_set.py +++ /dev/null @@ -1,17 +0,0 @@ -from .._grammar import select -from ._char_range import char_range - -def char_set(def_string: str): - parts = [] - pos = 0 - while pos < len(def_string): - if pos + 2 < len(def_string) and def_string[pos + 1] == "-": - parts.append(char_range(def_string[pos], def_string[pos + 2])) - pos += 3 - elif pos + 1 < len(def_string) and def_string[pos] == "\\": - parts.append(def_string[pos + 1]) - pos += 2 - else: - parts.append(def_string[pos]) - pos += 1 - return select(parts) \ No newline at end of file diff --git a/controllers/guidance_ctrl/Lib/guidance/library/_format.py b/controllers/guidance_ctrl/Lib/guidance/library/_format.py deleted file mode 100644 index 1fd868bd..00000000 --- a/controllers/guidance_ctrl/Lib/guidance/library/_format.py +++ /dev/null @@ -1,5 +0,0 @@ -from ._block import block - -def monospace(): - return block(opener="<||_html:_||>", closer="<||_html:_||>") - diff --git a/controllers/guidance_ctrl/Lib/guidance/library/_gen.py b/controllers/guidance_ctrl/Lib/guidance/library/_gen.py deleted file mode 100644 index 11ec793c..00000000 --- a/controllers/guidance_ctrl/Lib/guidance/library/_gen.py +++ /dev/null @@ -1,239 +0,0 @@ -import regex as regex_module -import logging -import guidance -from ._silent import silent -from .._grammar import select -from ._zero_or_more import zero_or_more -from .._grammar import commit_point -from ._any_char import any_char -from .._grammar import capture -from ._regex import regex as regex_grammar -from .._grammar import token_limit, eos_token, active_role_end, with_temperature -from ._tool import Tool -from ._block import block - -logger = logging.getLogger(__name__) - -# TODO: make this stateless! -@guidance(stateless=lambda *args, **kwargs: kwargs.get("tools", None) is None) # TODO: uncomment this once we get temperature stateless -def gen(lm, name=None, *, max_tokens=1000, list_append=False, regex=None, - tools=None, hide_tool_call=False, stop=None, stop_regex=None, suffix="", n=1, temperature=0.0, top_p=1.0, - save_stop_text=False): - """ Generate a set of tokens until a given stop criteria has been met. - - This function is a useful utility that can allow you to specify most grammars used by typical - LM generation programs. It also has the added ability to interleave generation with tool calls. - - Parameters - ---------- - name : str or None - If this is not None then the results of the generation will be saved as a variable on - the Model object (so you can access the result as `lm["var_name"]`). - - max_tokens : int - The maximum number of generation tokens we should use. Note that this limit is not exact when - regular expression pattern constraints are present, but guidance does attempt to end the generation - as soon as possible while keeping the regex constraints satisfied. - - list_append : bool - If this is True then the results saved to `lm[name]` will not be written directly but rather appended - to a list (if no list with the current name is present one will be created). This is useful for - building lists inside python loops. - - regex : str or None - This is a regular expression that will be used to constrain the generation. The model is only allowed - to generate tokens that match this regular expression. Note that for variable length expressions the - model is free to continue the expression after a complete match, but generation will terminate as soon - as the model generates anything that does not match the pattern (this ending behavior may change a bit we - update guidance to maintain the grammar parsing state between calls). - - stop : str or list or None - The stop string (or list of strings) we should use for terminating this generation segment. - - stop_regex : str or list or None - The stop regular expression (or list of regular expressions) we should use for terminating this generation segment. - - save_stop_text : bool or str - If True then this saves the captured stop text or regex into a variable of the name `str(name) + "_stop_text"`. If - a string is given then the captured stop text is saved under that name. - - temperature : float - The temperature to use during this generation call. Note that when parsing ambiguous grammars that include - multiple conflicting temperatures (for example from multiple possible `gen` calls inside a `select`) the highest - temperature of all options is used by the model (since we only want to run the model once, not once for every - possible parse path). - - top_p : float - TODO! Will control the models top_p generation parameter, but has been yet been implemented beyond top_p=1.0. - - n : int - TODO! Will control the number of parallel generation calls made during gen. - - tools : Tool or list or None - A list of guidance.Tool or python functions (which will be converted to guidance.Tool) - - hide_tool_call : bool - Controls if we should hide the text generated by the model to trigger a tool call. You may want to hide the tool - call from the model's context if you plan to change it's format after the call is made. - """ - # TODO: expand the tools doc string - assert n == 1, "We still need to add support for n>1! Consider putting your gen call in a loop for now." - assert top_p == 1, "We still need to add support for top_p != 1!" - - logger.debug(f'start gen(name="{name}")') - - # set stream if we are interactive - # if stream_tokens is None and not lm.is_silent() and n == 1: - # stream_tokens = True - - # use the suffix as the stop string if not otherwise specified - # TODO: still need to make suffix work with grammars - # eos_token = lm.eos_token.decode('utf8') - if stop is None and stop_regex is None and suffix != "": - stop = suffix - # if stop is None and stop_regex is None and getattr(lm, "suffix", False): - # if lm.suffix.startswith("\n"): - # stop = "\n" - # elif lm.suffix.startswith('"') and str(lm).endswith('"'): - # stop = '"' - # elif lm.suffix.startswith("'") and str(lm).endswith("'"): - # stop = "'" - - # fall back to stopping at the EOS token - if stop is not False: - if stop is None: - stop = [] - if isinstance(stop, str): - stop = [stop] - if regex is None: - stop.append(select([eos_token(), active_role_end()])) - - if stop_regex is None: - stop_regex = [] - if isinstance(stop_regex, str): - stop_regex = [stop_regex] - stop_regex = [regex_grammar(x) for x in stop_regex] - - # This needs to be here for streaming - # if name is not None and not list_append: - # lm[name] = "" - - # define the generation pattern - if regex is not None: - pattern = regex_grammar(regex) - else: - pattern = zero_or_more(any_char()) - - tagged_name = "__LIST_APPEND:" + name if list_append and name is not None else name - - # define any capture group for non-tool calls - if name is not None and tools is None: - pattern = capture(pattern, name=tagged_name) - - # limit the number of tokens - pattern = token_limit(pattern, max_tokens) - - # define the stop pattern - if stop is False or len(stop + stop_regex) == 0: - stop_pattern = '' - else: - stop_pattern = select(stop + stop_regex) - if save_stop_text is True: - save_stop_text = str(name) + "_stop_text" - if isinstance(save_stop_text, str): - stop_pattern = capture(stop_pattern, name=save_stop_text) - stop_pattern = commit_point(stop_pattern, hidden=True) - - # single generation - start_pos = len(str(lm)) - if tools is not None: - with block(tagged_name): - tools = [Tool(callable=x) if not isinstance(x, Tool) else x for x in tools] - init_token_count = lm.token_count - gen_grammar = pattern + select([stop_pattern] + [capture(commit_point(x.call_grammar, hidden=hide_tool_call), name=f'tool{i}') for i, x in enumerate(tools)]) - while lm.token_count <= max_tokens + init_token_count: - lm = lm._run_stateless(gen_grammar, temperature=temperature) # TODO: we should not be using this internal method - tool_called = False - for i in range(len(tools)): - tool_i = f'tool{i}' - if tool_i in lm: - tool_called = True - lm += tools[i].tool_call() - lm = lm.remove(tool_i) - if not tool_called: - lm += suffix - break - elif n == 1: - lm += with_temperature(pattern + stop_pattern + suffix, temperature) - - logger.debug(f'finish gen') - return lm - - -def click_loop_start(id, total_count, echo, color): - click_script = ''' -function cycle_IDVAL(button_el) { -var i = 0; -while (i < 50) { -var el = document.getElementById("IDVAL_" + i); -if (el.style.display == "inline") { - el.style.display = "none"; - var next_el = document.getElementById("IDVAL_" + (i+1)); - if (!next_el) { - next_el = document.getElementById("IDVAL_0"); - } - if (next_el) { - next_el.style.display = "inline"; - } - break; -} -i += 1; -} -button_el.innerHTML = (((i+1) % TOTALCOUNT) + 1) + "/" + TOTALCOUNT; -} -cycle_IDVAL(this);'''.replace("IDVAL", id).replace("TOTALCOUNT", str(total_count)).replace("\n", "") - out = f'''
1/{total_count}
''' - out += f"
" - return "<||_html:" + out + "_||>" - -def click_loop_mid(id, index, echo): - alpha = 1.0 if not echo else 0.5 - out = f"