diff --git a/.travis.yml b/.travis.yml index 066fd15..62ecb6e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,23 +1,28 @@ sudo: required +dist: focal language: python matrix: include: - - python: 3.6 - env: TOXENV=py36 - - python: 3.7 - env: TOXENV=py37 - - python: 3.8 - env: TOXENV=py38 + - python: 3.6 + env: TOXENV=py36 + - python: 3.7 + env: TOXENV=py37 + - python: 3.8 + env: TOXENV=py38 + - python: 3.9 + env: TOXENV=py39 + - python: "3.10" + env: TOXENV=py310 services: -- docker + - docker before_install: -- make eventstore_docker + - make eventstore_docker install: -- pip install tox black + - pip install tox black script: -- make travis + - make travis after_success: -- codecov + - codecov deploy: provider: pypi user: __token__ diff --git a/README.rst b/README.rst index ba3a97b..ef372b1 100644 --- a/README.rst +++ b/README.rst @@ -17,7 +17,6 @@ Documentation is available on `Read the docs`_. Basic Usage ----------- - Working with connections ~~~~~~~~~~~~~~~~~~~~~~~~ @@ -25,23 +24,17 @@ Usually you will want to interact with photon pump via the `~photonpump.Client` First you will need to create a connection: - >>> import asyncio >>> from photonpump import connect >>> - >>> loop = asyncio.get_event_loop() - >>> - >>> async with connect(loop=loop) as c: + >>> async with connect() as c: >>> await c.ping() The `photonpump.connect` function returns an async context manager so that the connection will be automatically closed when you are finished. Alternatively you can create a client and manage its lifetime yourself. - >>> import asyncio >>> from photonpump import connect >>> - >>> loop = asyncio.get_event_loop() - >>> - >>> client = connect(loop=loop) + >>> client = connect() >>> await client.connect() >>> await client.ping() >>> await client.close() diff --git a/example.py b/example.py index 8e2b93a..deee3b6 100644 --- a/example.py +++ b/example.py @@ -21,5 +21,5 @@ async def do_things(): if __name__ == "__main__": - loop = asyncio.get_event_loop() + loop = asyncio.new_event_loop() loop.run_until_complete(do_things()) diff --git a/photonpump/__init__.py b/photonpump/__init__.py index 8558086..b026b7d 100644 --- a/photonpump/__init__.py +++ b/photonpump/__init__.py @@ -41,5 +41,6 @@ def get_named_logger(cls, *names): logging.INSANE = INSANE_LEVEL_NUM from ._version import get_versions -__version__ = get_versions()['version'] + +__version__ = get_versions()["version"] del get_versions diff --git a/photonpump/_version.py b/photonpump/_version.py index 9c7f0ca..e532794 100644 --- a/photonpump/_version.py +++ b/photonpump/_version.py @@ -1,4 +1,3 @@ - # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build @@ -58,17 +57,18 @@ class NotThisMethod(Exception): def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" + def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f + return decorate -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): +def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None @@ -76,10 +76,13 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) + p = subprocess.Popen( + [c] + args, + cwd=cwd, + env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr else None), + ) break except EnvironmentError: e = sys.exc_info()[1] @@ -116,16 +119,22 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None, "date": None} + return { + "version": dirname[len(parentdir_prefix) :], + "full-revisionid": None, + "dirty": False, + "error": None, + "date": None, + } else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: - print("Tried directories %s but none started with prefix %s" % - (str(rootdirs), parentdir_prefix)) + print( + "Tried directories %s but none started with prefix %s" + % (str(rootdirs), parentdir_prefix) + ) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @@ -181,7 +190,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) + tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d @@ -190,7 +199,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) + tags = set([r for r in refs if re.search(r"\d", r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: @@ -198,19 +207,26 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] + r = ref[len(tag_prefix) :] if verbose: print("picking %s" % r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None, - "date": date} + return { + "version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": None, + "date": date, + } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags", "date": None} + return { + "version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": "no suitable tags", + "date": None, + } @register_vcs_handler("git", "pieces_from_vcs") @@ -225,8 +241,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) + out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) @@ -234,10 +249,19 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", - "--match", "%s*" % tag_prefix], - cwd=root) + describe_out, rc = run_command( + GITS, + [ + "describe", + "--tags", + "--dirty", + "--always", + "--long", + "--match", + "%s*" % tag_prefix, + ], + cwd=root, + ) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") @@ -260,17 +284,16 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] + git_describe = git_describe[: git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) + mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) + pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out return pieces # tag @@ -279,10 +302,12 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" - % (full_tag, tag_prefix)) + pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % ( + full_tag, + tag_prefix, + ) return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] + pieces["closest-tag"] = full_tag[len(tag_prefix) :] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) @@ -293,13 +318,13 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): else: # HEX: no tags pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) + count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], - cwd=root)[0].strip() + date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[ + 0 + ].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces @@ -330,8 +355,7 @@ def render_pep440(pieces): rendered += ".dirty" else: # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) + rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered @@ -445,11 +469,13 @@ def render_git_describe_long(pieces): def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None} + return { + "version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + "date": None, + } if not style or style == "default": style = "pep440" # the default @@ -469,9 +495,13 @@ def render(pieces, style): else: raise ValueError("unknown style '%s'" % style) - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None, - "date": pieces.get("date")} + return { + "version": rendered, + "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], + "error": None, + "date": pieces.get("date"), + } def get_versions(): @@ -485,8 +515,7 @@ def get_versions(): verbose = cfg.verbose try: - return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, - verbose) + return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass @@ -495,13 +524,16 @@ def get_versions(): # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. - for i in cfg.versionfile_source.split('/'): + for i in cfg.versionfile_source.split("/"): root = os.path.dirname(root) except NameError: - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to find root of source tree", - "date": None} + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to find root of source tree", + "date": None, + } try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) @@ -515,6 +547,10 @@ def get_versions(): except NotThisMethod: pass - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to compute version", "date": None} + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to compute version", + "date": None, + } diff --git a/photonpump/compat.py b/photonpump/compat.py new file mode 100644 index 0000000..4f986ff --- /dev/null +++ b/photonpump/compat.py @@ -0,0 +1,4 @@ +import asyncio + +# python 3.6 doesn't have "get_running_loop" +get_running_loop = getattr(asyncio, "get_running_loop", asyncio.get_event_loop) diff --git a/photonpump/connection.py b/photonpump/connection.py index bc7744a..04cf9ce 100644 --- a/photonpump/connection.py +++ b/photonpump/connection.py @@ -5,11 +5,14 @@ import logging import struct import uuid +import warnings from typing import Any, NamedTuple, Optional, Sequence, Union +from webbrowser import get from . import conversations as convo from . import messages as msg from .discovery import NodeService, get_discoverer, select_random +from photonpump.compat import get_running_loop HEADER_LENGTH = 1 + 1 + 16 SIZE_UINT_32 = 4 @@ -58,17 +61,15 @@ def __init__( ctrl_queue=None, connect_timeout=5, name=None, - loop=None, ): self.name = name self.connection_counter = 0 self.dispatcher = dispatcher - self.loop = loop or asyncio.get_event_loop() self.discovery = discovery self.connected = Event() self.disconnected = Event() self.stopped = Event() - self.ctrl_queue = ctrl_queue or asyncio.Queue(loop=self.loop) + self.ctrl_queue = ctrl_queue or asyncio.Queue() self.log = logging.get_named_logger(Connector) self._run_loop = asyncio.ensure_future(self._run()) self.heartbeat_failures = 0 @@ -134,6 +135,7 @@ async def stop(self, exn=None): self.stopped(exn) async def _attempt_connect(self, node): + loop = get_running_loop() if not node: try: self.log.debug("Performing node discovery") @@ -154,11 +156,11 @@ async def _attempt_connect(self, node): self.connection_counter, self.dispatcher, self, - self.loop, + loop, self.name, ) await asyncio.wait_for( - self.loop.create_connection(lambda: protocol, node.address, node.port), + loop.create_connection(lambda: protocol, node.address, node.port), self.connect_timeout, ) except Exception as e: @@ -340,7 +342,6 @@ def __init__( connection_number: int, output_queue: asyncio.Queue, name=None, - loop=None, ): self._logger = logging.get_named_logger(MessageWriter, name, connection_number) self.writer = writer @@ -381,9 +382,7 @@ def __init__( queue, pacemaker: PaceMaker, name=None, - loop=None, ): - self._loop = loop or asyncio.get_event_loop() self.header_bytes = array.array("B", [0] * (self.MESSAGE_MIN_SIZE)) self.header_bytes_required = self.MESSAGE_MIN_SIZE self.queue = queue @@ -401,7 +400,7 @@ def feed_data(self, data): async def start(self): """Loop forever reading messages and invoking - the operation that caused them""" + the operation that caused them""" while True: try: @@ -499,11 +498,10 @@ async def process(self, chunk: bytes): class MessageDispatcher: - def __init__(self, name=None, loop=None): + def __init__(self, name=None): self.active_conversations = {} self._logger = logging.get_named_logger(MessageDispatcher, name) self.output = None - self._loop = loop or asyncio.get_event_loop() async def start_conversation( self, conversation: convo.Conversation @@ -1117,7 +1115,7 @@ def __init__( PhotonPumpProtocol, self.name, connection_number ) self.transport = None - self.loop = loop or asyncio.get_event_loop() + self.loop = loop or get_running_loop() super().__init__(self.loop) self.connection_number = connection_number self.node = addr @@ -1126,11 +1124,11 @@ def __init__( def connection_made(self, transport): self._log.debug("Connection made.") - self.input_queue = asyncio.Queue(loop=self.loop) - self.output_queue = asyncio.Queue(loop=self.loop) + self.input_queue = asyncio.Queue() + self.output_queue = asyncio.Queue() self.transport = transport - stream_reader = asyncio.StreamReader(loop=self.loop) + stream_reader = asyncio.StreamReader() stream_reader.set_transport(transport) stream_writer = asyncio.StreamWriter(transport, self, stream_reader, self.loop) self.pacemaker = PaceMaker(self.output_queue, self.connector) @@ -1186,7 +1184,6 @@ async def stop(self): self.write_loop, self.dispatch_loop, self.heartbeat_loop, - loop=self.loop, return_exceptions=True, ) self.transport.close() @@ -1207,93 +1204,99 @@ def connect( selector=select_random, retry_policy=None, ) -> Client: - """ Create a new client. + """Create a new client. - Examples: - Since the Client is an async context manager, we can use it in a - with block for automatic connect/disconnect semantics. + Examples: + Since the Client is an async context manager, we can use it in a + with block for automatic connect/disconnect semantics. - >>> async with connect(host='127.0.0.1', port=1113) as c: - >>> await c.ping() + >>> async with connect(host='127.0.0.1', port=1113) as c: + >>> await c.ping() - Or we can call connect at a more convenient moment + Or we can call connect at a more convenient moment - >>> c = connect() - >>> await c.connect() - >>> await c.ping() - >>> await c.close() + >>> c = connect() + >>> await c.connect() + >>> await c.ping() + >>> await c.close() - For cluster discovery cases, we can provide a discovery host and - port. The host may be an IP or DNS entry. If you provide a DNS - entry, discovery will choose randomly from the registered IP - addresses for the hostname. + For cluster discovery cases, we can provide a discovery host and + port. The host may be an IP or DNS entry. If you provide a DNS + entry, discovery will choose randomly from the registered IP + addresses for the hostname. - >>> async with connect(discovery_host="eventstore.test") as c: - >>> await c.ping() + >>> async with connect(discovery_host="eventstore.test") as c: + >>> await c.ping() - The discovery host returns gossip data about the cluster. We use the - gossip to select a node at random from the avaialble cluster members. + The discovery host returns gossip data about the cluster. We use the + gossip to select a node at random from the avaialble cluster members. - If you're using - :meth:`persistent subscriptions ` - you will always want to connect to the master node of the cluster. - The selector parameter is a function that chooses an available node from - the gossip result. To select the master node, use the - :func:`photonpump.discovery.prefer_master` function. This function will return - the master node if there is a live master, and a random replica otherwise. - All requests to the server can be made with the require_master flag which - will raise an error if the current node is not a master. + If you're using + :meth:`persistent subscriptions ` + you will always want to connect to the master node of the cluster. + The selector parameter is a function that chooses an available node from + the gossip result. To select the master node, use the + :func:`photonpump.discovery.prefer_master` function. This function will return + the master node if there is a live master, and a random replica otherwise. + All requests to the server can be made with the require_master flag which + will raise an error if the current node is not a master. - >>> async with connect( - >>> discovery_host="eventstore.test", - >>> selector=discovery.prefer_master, - >>> ) as c: - >>> await c.ping(require_master=True) + >>> async with connect( + >>> discovery_host="eventstore.test", + >>> selector=discovery.prefer_master, + >>> ) as c: + >>> await c.ping(require_master=True) - Conversely, you might want to avoid connecting to the master node for reasons - of scalability. For this you can use the - :func:`photonpump.discovery.prefer_replica` function. + Conversely, you might want to avoid connecting to the master node for reasons + of scalability. For this you can use the + :func:`photonpump.discovery.prefer_replica` function. - >>> async with connect( - >>> discovery_host="eventstore.test", - >>> selector=discovery.prefer_replica, - >>> ) as c: - >>> await c.ping() + >>> async with connect( + >>> discovery_host="eventstore.test", + >>> selector=discovery.prefer_replica, + >>> ) as c: + >>> await c.ping() - For some operations, you may need to authenticate your requests by - providing a username and password to the client. + For some operations, you may need to authenticate your requests by + providing a username and password to the client. - >>> async with connect(username='admin', password='changeit') as c: - >>> await c.ping() + >>> async with connect(username='admin', password='changeit') as c: + >>> await c.ping() - Ordinarily you will create a single Client per application, but for - advanced scenarios you might want multiple connections. In this - situation, you can name each connection in order to get better logging. + Ordinarily you will create a single Client per application, but for + advanced scenarios you might want multiple connections. In this + situation, you can name each connection in order to get better logging. - >>> async with connect(name="event-reader"): - >>> await c.ping() + >>> async with connect(name="event-reader"): + >>> await c.ping() - >>> async with connect(name="event-writer"): - >>> await c.ping() + >>> async with connect(name="event-writer"): + >>> await c.ping() - Args: - host: The IP or DNS entry to connect with, defaults to 'localhost'. - port: The port to connect with, defaults to 1113. - discovery_host: The IP or DNS entry to use for cluster discovery. - discovery_port: The port to use for cluster discovery, defaults to 2113. - username: The username to use when communicating with eventstore. - password: The password to use when communicating with eventstore. - loop:An Asyncio event loop. - selector: An optional function that selects one element from a list of - :class:`photonpump.disovery.DiscoveredNode` elements. + Args: + host: The IP or DNS entry to connect with, defaults to 'localhost'. + port: The port to connect with, defaults to 1113. + discovery_host: The IP or DNS entry to use for cluster discovery. + discovery_port: The port to use for cluster discovery, defaults to 2113. + username: The username to use when communicating with eventstore. + password: The password to use when communicating with eventstore. + loop:An Asyncio event loop. + selector: An optional function that selects one element from a list of + :class:`photonpump.disovery.DiscoveredNode` elements. """ + if loop is not None: + warnings.warn( + "The loop parameter has been deprecated", + DeprecationWarning, + stacklevel=2, + ) discovery = get_discoverer( host, port, discovery_host, discovery_port, selector, retry_policy ) - dispatcher = MessageDispatcher(name=name, loop=loop) + dispatcher = MessageDispatcher(name=name) connector = Connector(discovery, dispatcher, name=name) credential = msg.Credential(username, password) if username and password else None diff --git a/photonpump/conversations.py b/photonpump/conversations.py index 7c546e5..78ca675 100644 --- a/photonpump/conversations.py +++ b/photonpump/conversations.py @@ -260,7 +260,6 @@ def __init__( require_master: bool = False, conversation_id: UUID = None, credential=None, - loop=None, ): super().__init__(conversation_id, credential) self._logger = logging.get_named_logger(WriteEvents) diff --git a/photonpump/messages_pb2.py b/photonpump/messages_pb2.py index 0201626..431d6f3 100644 --- a/photonpump/messages_pb2.py +++ b/photonpump/messages_pb2.py @@ -2254,7 +2254,9 @@ ], extensions=[], nested_types=[], - enum_types=[_READEVENTCOMPLETED_READEVENTRESULT,], + enum_types=[ + _READEVENTCOMPLETED_READEVENTRESULT, + ], serialized_options=None, is_extendable=False, syntax="proto2", @@ -2526,7 +2528,9 @@ ], extensions=[], nested_types=[], - enum_types=[_READSTREAMEVENTSCOMPLETED_READSTREAMRESULT,], + enum_types=[ + _READSTREAMEVENTSCOMPLETED_READSTREAMRESULT, + ], serialized_options=None, is_extendable=False, syntax="proto2", @@ -2798,7 +2802,9 @@ ], extensions=[], nested_types=[], - enum_types=[_READALLEVENTSCOMPLETED_READALLRESULT,], + enum_types=[ + _READALLEVENTSCOMPLETED_READALLRESULT, + ], serialized_options=None, is_extendable=False, syntax="proto2", @@ -3933,7 +3939,9 @@ ], extensions=[], nested_types=[], - enum_types=[_PERSISTENTSUBSCRIPTIONNAKEVENTS_NAKACTION,], + enum_types=[ + _PERSISTENTSUBSCRIPTIONNAKEVENTS_NAKACTION, + ], serialized_options=None, is_extendable=False, syntax="proto2", @@ -4318,7 +4326,9 @@ ], extensions=[], nested_types=[], - enum_types=[_SUBSCRIPTIONDROPPED_SUBSCRIPTIONDROPREASON,], + enum_types=[ + _SUBSCRIPTIONDROPPED_SUBSCRIPTIONDROPREASON, + ], serialized_options=None, is_extendable=False, syntax="proto2", @@ -4512,8 +4522,12 @@ ), ], extensions=[], - nested_types=[_NOTHANDLED_MASTERINFO,], - enum_types=[_NOTHANDLED_NOTHANDLEDREASON,], + nested_types=[ + _NOTHANDLED_MASTERINFO, + ], + enum_types=[ + _NOTHANDLED_NOTHANDLEDREASON, + ], serialized_options=None, is_extendable=False, syntax="proto2", @@ -4632,7 +4646,9 @@ ], extensions=[], nested_types=[], - enum_types=[_SCAVENGEDATABASECOMPLETED_SCAVENGERESULT,], + enum_types=[ + _SCAVENGEDATABASECOMPLETED_SCAVENGERESULT, + ], serialized_options=None, is_extendable=False, syntax="proto2", diff --git a/requirements.txt b/requirements.txt index 7729bf3..beca8df 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,6 @@ aioresponses black +typing-extensions codecov colorama idna-ssl diff --git a/test/conftest.py b/test/conftest.py index 5602858..49355c6 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -6,7 +6,7 @@ logging.basicConfig(level=logging.DEBUG) -@pytest.yield_fixture +@pytest.fixture def event_loop(): loop = asyncio.new_event_loop() diff --git a/test/connection/test_connector.py b/test/connection/test_connector.py index 76c2c7f..bb7e077 100644 --- a/test/connection/test_connector.py +++ b/test/connection/test_connector.py @@ -49,13 +49,12 @@ async def test_when_connecting_to_a_server(event_loop): addr = NodeService("localhost", 8338, None) - async with EchoServer(addr, event_loop): + async with EchoServer(addr): dispatcher = SpyDispatcher() connector = Connector( SingleNodeDiscovery(addr, DiscoveryRetryPolicy()), dispatcher, - loop=event_loop, ) ping = Ping() @@ -87,17 +86,16 @@ async def test_when_a_server_disconnects(event_loop): connector = Connector( SingleNodeDiscovery(addr, DiscoveryRetryPolicy()), dispatcher, - loop=event_loop, ctrl_queue=queue, ) - raised_disconnected_event = asyncio.Future(loop=event_loop) + raised_disconnected_event = asyncio.Future() def on_disconnected(): raised_disconnected_event.set_result(True) connector.disconnected.append(on_disconnected) - async with EchoServer(addr, event_loop) as server: + async with EchoServer(addr) as server: await connector.start() connect = await queue.next_event() @@ -132,11 +130,10 @@ async def test_when_three_heartbeats_fail_in_a_row(event_loop): connector = Connector( SingleNodeDiscovery(addr, DiscoveryRetryPolicy()), dispatcher, - loop=event_loop, ctrl_queue=queue, ) - async with EchoServer(addr, event_loop): + async with EchoServer(addr): await connector.start() [connect, connected] = await queue.next_event(count=2) assert connect.command == ConnectorCommand.Connect @@ -168,11 +165,10 @@ async def test_when_a_heartbeat_succeeds(event_loop): connector = Connector( SingleNodeDiscovery(addr, DiscoveryRetryPolicy()), dispatcher, - loop=event_loop, ctrl_queue=queue, ) - async with EchoServer(addr, event_loop): + async with EchoServer(addr): await connector.start() [connect, connected] = await queue.next_event(count=2) assert connect.command == ConnectorCommand.Connect @@ -229,7 +225,7 @@ def on_stopped(exn): policy = never_retry() dispatcher = SpyDispatcher() connector = Connector( - SingleNodeDiscovery(addr, policy), dispatcher, loop=event_loop, ctrl_queue=queue + SingleNodeDiscovery(addr, policy), dispatcher, ctrl_queue=queue ) connector.stopped.append(on_stopped) @@ -251,11 +247,10 @@ async def test_when_the_connection_fails_with_an_error(event_loop): connector = Connector( SingleNodeDiscovery(addr, DiscoveryRetryPolicy()), dispatcher, - loop=event_loop, ctrl_queue=queue, ) - async with EchoServer(addr, event_loop): + async with EchoServer(addr): await connector.start() [connect, connected] = await queue.next_event(count=2) @@ -279,11 +274,10 @@ async def test_when_restarting_a_running_connector(event_loop): connector = Connector( SingleNodeDiscovery(addr, DiscoveryRetryPolicy()), dispatcher, - loop=event_loop, ctrl_queue=queue, ) - async with EchoServer(addr, event_loop): + async with EchoServer(addr): await connector.start() [connect, connected] = await queue.next_event(count=2) @@ -310,11 +304,10 @@ async def test_when_restarting_a_stopped_connector(event_loop): connector = Connector( SingleNodeDiscovery(addr, DiscoveryRetryPolicy()), dispatcher, - loop=event_loop, ctrl_queue=queue, ) - async with EchoServer(addr, event_loop): + async with EchoServer(addr): await connector.reconnect() [connect, connected] = await queue.next_event(count=2) diff --git a/test/connection_test.py b/test/connection_test.py index 88f5733..684b225 100644 --- a/test/connection_test.py +++ b/test/connection_test.py @@ -9,7 +9,7 @@ @pytest.mark.asyncio async def test_ping_context_mgr(event_loop): - async with connect(loop=event_loop) as conn: + async with connect() as conn: id = uuid.uuid4() await conn.ping(conversation_id=id) @@ -17,7 +17,7 @@ async def test_ping_context_mgr(event_loop): @pytest.mark.asyncio async def test_connect_subscription(event_loop): - async with connect(username="admin", password="changeit", loop=event_loop) as conn: + async with connect(username="admin", password="changeit") as conn: subscription_name = str(uuid.uuid4()) stream_name = str(uuid.uuid4()) event_id = uuid.uuid4() @@ -33,7 +33,7 @@ async def test_connect_subscription(event_loop): @pytest.mark.asyncio async def test_subscribe_to(event_loop): - async with connect(username="admin", password="changeit", loop=event_loop) as conn: + async with connect(username="admin", password="changeit") as conn: stream_name = str(uuid.uuid4()) event_id = uuid.uuid4() @@ -59,5 +59,17 @@ async def wait(self, seed): expected_policy = silly_retry_policy() - async with connect(loop=event_loop, retry_policy=expected_policy) as client: + async with connect(retry_policy=expected_policy) as client: assert client.connector.discovery.retry_policy == expected_policy + + +@pytest.mark.asyncio +async def test_connect_logs_deprecation_warning_when_used_with_loop_parameter( + event_loop, +): + with pytest.warns(DeprecationWarning) as record: + async with connect(loop=event_loop) as conn: + await conn.ping(conversation_id=uuid.uuid4()) + + assert len(record) == 1 + assert "The loop parameter has been deprecated" in record[0].message.args[0] diff --git a/test/conversations/test_catchup.py b/test/conversations/test_catchup.py index 3470653..d69ead8 100644 --- a/test/conversations/test_catchup.py +++ b/test/conversations/test_catchup.py @@ -801,8 +801,7 @@ async def test_live_restart(): @pytest.mark.asyncio async def test_paging_projection(): - """ - """ + """ """ convo = CatchupSubscription("my-stream") output = TeeQueue() diff --git a/test/conversations/test_catchup_all.py b/test/conversations/test_catchup_all.py index aa84c14..ff76e0f 100644 --- a/test/conversations/test_catchup_all.py +++ b/test/conversations/test_catchup_all.py @@ -240,9 +240,9 @@ async def test_end_of_stream(): @pytest.mark.asyncio async def test_paging(): """ - During the read phase, we expect to page through multiple batches of - events. In this scenario we have two batches, each of two events. - """ + During the read phase, we expect to page through multiple batches of + events. In this scenario we have two batches, each of two events. + """ convo = CatchupAllSubscription() output = TeeQueue() @@ -294,9 +294,9 @@ async def test_paging(): async def test_subscribes_at_end_of_stream(): """ - When we have read all the events in the stream, we should send a - request to subscribe for new events. - """ + When we have read all the events in the stream, we should send a + request to subscribe for new events. + """ convo = CatchupAllSubscription() output = TeeQueue() @@ -321,12 +321,12 @@ async def test_subscribes_at_end_of_stream(): async def test_should_perform_a_catchup_when_subscription_is_confirmed(): """ - When we have read all the events in the stream, we should send a - request to subscribe for new events. + When we have read all the events in the stream, we should send a + request to subscribe for new events. - We should start reading catchup events from the `next_commit_position` - returned by the historical event read. - """ + We should start reading catchup events from the `next_commit_position` + returned by the historical event read. + """ convo = CatchupAllSubscription() output = TeeQueue() @@ -358,33 +358,33 @@ async def test_should_perform_a_catchup_when_subscription_is_confirmed(): async def test_should_return_catchup_events_before_subscribed_events(): """ - It's possible that the following sequence of events occurs: - * The client reads the last batch of events from a stream containing - 50 events. - * The client sends SubscribeToStream - * Event 51 is written to the stream - * The server creates a subscription starting at event 51 and - responds with SubscriptionConfirmed - * Event 52 is written to the stream - * The client receives event 52. + It's possible that the following sequence of events occurs: + * The client reads the last batch of events from a stream containing + 50 events. + * The client sends SubscribeToStream + * Event 51 is written to the stream + * The server creates a subscription starting at event 51 and + responds with SubscriptionConfirmed + * Event 52 is written to the stream + * The client receives event 52. - To solve this problem, the client needs to perform an additional read - to fetch any missing events created between the last batch and the - subscription confirmation. + To solve this problem, the client needs to perform an additional read + to fetch any missing events created between the last batch and the + subscription confirmation. - -------------- + -------------- - In this scenario, we read a single event (1) from the end of the stream - and expect to create a subscription. + In this scenario, we read a single event (1) from the end of the stream + and expect to create a subscription. - We receive event 4 immediately on the subscription. We expect that the - client requests missing events. + We receive event 4 immediately on the subscription. We expect that the + client requests missing events. - We receive two pages, of one event each: 3, and 4, and then drop the subscription. + We receive two pages, of one event each: 3, and 4, and then drop the subscription. - Lastly, we expect that the events are yielded in the correct order - despite being received out of order and that we have no duplicates. - """ + Lastly, we expect that the events are yielded in the correct order + despite being received out of order and that we have no duplicates. + """ convo = CatchupAllSubscription() output = TeeQueue() @@ -532,14 +532,14 @@ async def test_subscribe_with_context_manager(): @pytest.mark.asyncio async def test_restart_from_historical(): """ - If we ask the conversation to start again while we're reading historical events - we should re-send the most recent page request. + If we ask the conversation to start again while we're reading historical events + we should re-send the most recent page request. - In this scenario, we start reading the stream at event 10, we receive a - page with 2 events, we request the next page starting at 12. + In this scenario, we start reading the stream at event 10, we receive a + page with 2 events, we request the next page starting at 12. - When we restart the conversation, we should again request the page starting at 12. - """ + When we restart the conversation, we should again request the page starting at 12. + """ conversation_id = uuid.uuid4() output = TeeQueue() @@ -573,24 +573,24 @@ async def test_restart_from_historical(): @pytest.mark.asyncio async def test_restart_from_catchup(): """ - If the connection drops during the catchup phase, we need to unsubscribe - from the stream and then go back to reading historical events starting from - the last page. - - => Request historical events - <= Receive 1 event, next_event = 1 - => Subscribe - <= Confirmed - => Catch up from 1 - - ** Restart ** - - => Unsubscribe - <= Confirmed - => Read historical from 1 - <= Empty page - => Subscribe - """ + If the connection drops during the catchup phase, we need to unsubscribe + from the stream and then go back to reading historical events starting from + the last page. + + => Request historical events + <= Receive 1 event, next_event = 1 + => Subscribe + <= Confirmed + => Catch up from 1 + + ** Restart ** + + => Unsubscribe + <= Confirmed + => Read historical from 1 + <= Empty page + => Subscribe + """ conversation_id = uuid.uuid4() output = TeeQueue() convo = CatchupAllSubscription(conversation_id=conversation_id) @@ -629,19 +629,19 @@ async def test_restart_from_catchup(): @pytest.mark.asyncio async def test_historical_duplicates(): """ - It's possible that we receive the reply to a ReadStreamEvents request after we've - resent the request. This will result in our receiving a duplicate page. + It's possible that we receive the reply to a ReadStreamEvents request after we've + resent the request. This will result in our receiving a duplicate page. - In this instance, we should not raise duplicate events. + In this instance, we should not raise duplicate events. - => Request historical - RESTART - => Request historical - <= 2 events - <= 3 events + => Request historical + RESTART + => Request historical + <= 2 events + <= 3 events - Should only see the 3 unique events - """ + Should only see the 3 unique events + """ two_events = ( ReadAllEventsResponseBuilder() @@ -681,32 +681,32 @@ async def test_historical_duplicates(): @pytest.mark.asyncio async def test_subscription_duplicates(): """ - If we receive subscription events while catching up, we buffer them internally. - If we restart the conversation at that point we need to make sure we clear our buffer - and do not raise duplicate events. - - => Request historical - <= Empty - => Subscribe to stream - <= Confirmed - => Request catchup - <= Subscribed event 2 appeared - <= Event 1, not end of stream - - RESTART - - => Drop subscription - <= Dropped - => Request historical from_event = 1 - <= Receive event 2 at end of stream - => Subscribe - <= Confirmed - => Catchup - <= Subscribed event 3 appeared - <= Empty - - Should yield [event 1, event 2, event 3] - """ + If we receive subscription events while catching up, we buffer them internally. + If we restart the conversation at that point we need to make sure we clear our buffer + and do not raise duplicate events. + + => Request historical + <= Empty + => Subscribe to stream + <= Confirmed + => Request catchup + <= Subscribed event 2 appeared + <= Event 1, not end of stream + + RESTART + + => Drop subscription + <= Dropped + => Request historical from_event = 1 + <= Receive event 2 at end of stream + => Subscribe + <= Confirmed + => Catchup + <= Subscribed event 3 appeared + <= Empty + + Should yield [event 1, event 2, event 3] + """ event_1_not_end_of_stream = ( ReadAllEventsResponseBuilder() .with_event(event_number=1) @@ -753,24 +753,24 @@ async def test_subscription_duplicates(): @pytest.mark.asyncio async def test_live_restart(): """ - If we reset the conversation while we are live, we should first unsubscribe - then start a historical read from the last read event. - - => Read historial - <= empty - => subscribe - <= confirmed - => catchup - <= empty - <= event 1 appeared - <= event 2 appeared - - RESTART - - => unsubscribe - <= dropped - => Read historical from 2 - """ + If we reset the conversation while we are live, we should first unsubscribe + then start a historical read from the last read event. + + => Read historial + <= empty + => subscribe + <= confirmed + => catchup + <= empty + <= event 1 appeared + <= event 2 appeared + + RESTART + + => unsubscribe + <= dropped + => Read historical from 2 + """ output = TeeQueue() convo = CatchupAllSubscription() diff --git a/test/fakes.py b/test/fakes.py index 936cedc..132a5d7 100644 --- a/test/fakes.py +++ b/test/fakes.py @@ -3,6 +3,7 @@ from photonpump.connection import Event from photonpump.conversations import Conversation +from photonpump.compat import get_running_loop class TeeQueue: @@ -55,16 +56,16 @@ def data_received(self, data): class EchoServer: - def __init__(self, addr, loop): + def __init__(self, addr): self.host = addr.address self.port = addr.port - self.loop = loop self.protocol_counter = 0 self.running = False async def __aenter__(self): self.transports = [] - server = self.loop.create_server(self.make_protocol, self.host, self.port) + loop = get_running_loop() + server = loop.create_server(self.make_protocol, self.host, self.port) self._server = await server self.running = True logging.info("Echo server is running %s", self._server) @@ -120,7 +121,9 @@ async def write_to(self, output): class FakeConnector: - def __init__(self,): + def __init__( + self, + ): self.connected = Event() self.disconnected = Event() self.stopped = Event() diff --git a/test/read_test.py b/test/read_test.py index 1c4fd8f..cbed90e 100644 --- a/test/read_test.py +++ b/test/read_test.py @@ -12,9 +12,7 @@ @pytest.mark.asyncio async def test_single_event_roundtrip(event_loop): stream_name = str(uuid.uuid4()) - async with connect( - loop=event_loop, username="test-user", password="test-password" - ) as c: + async with connect(username="test-user", password="test-password") as c: await c.publish_event( stream_name, "thing_happened", body={"thing": 1, "happening": True} ) @@ -32,9 +30,7 @@ async def test_single_event_roundtrip(event_loop): @pytest.mark.asyncio async def test_missing_stream(event_loop): stream_name = str(uuid.uuid4()) - async with connect( - loop=event_loop, username="test-user", password="test-password" - ) as c: + async with connect(username="test-user", password="test-password") as c: with pytest.raises(exceptions.StreamNotFound) as exc: await c.get_event(stream_name, 0) assert exc.value.stream == stream_name @@ -43,9 +39,7 @@ async def test_missing_stream(event_loop): @pytest.mark.asyncio async def test_read_multiple(event_loop): stream_name = str(uuid.uuid4()) - async with connect( - loop=event_loop, username="test-user", password="test-password" - ) as c: + async with connect(username="test-user", password="test-password") as c: await given_a_stream_with_three_events(c, stream_name) result = await c.get(stream_name) @@ -62,9 +56,7 @@ async def test_read_multiple(event_loop): @pytest.mark.asyncio async def test_read_with_max_count(event_loop): stream_name = str(uuid.uuid4()) - async with connect( - loop=event_loop, username="test-user", password="test-password" - ) as c: + async with connect(username="test-user", password="test-password") as c: await given_a_stream_with_three_events(c, stream_name) result = await c.get(stream_name, max_count=1) @@ -80,9 +72,7 @@ async def test_read_with_max_count(event_loop): @pytest.mark.asyncio async def test_read_with_max_count_and_from_event(event_loop): stream_name = str(uuid.uuid4()) - async with connect( - loop=event_loop, username="test-user", password="test-password" - ) as c: + async with connect(username="test-user", password="test-password") as c: await given_a_stream_with_three_events(c, stream_name) result = await c.get(stream_name, max_count=1, from_event=2) @@ -99,7 +89,6 @@ async def test_read_with_max_count_and_from_event(event_loop): async def test_streaming_read(event_loop): stream_name = str(uuid.uuid4()) async with connect( - loop=event_loop, username="test-user", password="test-password", name="streaming-read", @@ -125,7 +114,6 @@ def embiggen(e): stream_name = str(uuid.uuid4()) async with connect( - loop=event_loop, username="test-user", password="test-password", name="comprehensions", @@ -151,9 +139,7 @@ def embiggen(e): @pytest.mark.asyncio async def test_iter_from_missing_stream(event_loop): - async with connect( - loop=event_loop, username="test-user", password="test-password" - ) as c: + async with connect(username="test-user", password="test-password") as c: with pytest.raises(exceptions.StreamNotFound): [e async for e in c.iter("my-stream-that-isnt-a-stream")] @@ -161,7 +147,6 @@ async def test_iter_from_missing_stream(event_loop): @pytest.mark.asyncio async def test_iterall(event_loop): async with connect( - loop=event_loop, username="admin", # iter_all aggregates all streams so it needs systemwide-read perms password="changeit", name="iter_all", @@ -180,7 +165,6 @@ async def test_iterall(event_loop): @pytest.mark.asyncio async def test_readall(event_loop): async with connect( - loop=event_loop, username="admin", # get_all aggregates all streams so it needs systemwide-read perms password="changeit", name="iter_all", @@ -200,7 +184,7 @@ async def test_readall(event_loop): @pytest.mark.asyncio async def test_anonymous_access_still_works(event_loop): stream_name = str(uuid.uuid4()) - async with connect(loop=event_loop, port=11113, discovery_port=22113) as c: + async with connect(port=11113, discovery_port=22113) as c: await c.publish_event(stream_name, "first_event", body={"thing": 1}) results = await c.get(stream_name) assert results[0].event.type == "first_event" diff --git a/test/write_test.py b/test/write_test.py index e8e1cf7..bf97db0 100644 --- a/test/write_test.py +++ b/test/write_test.py @@ -10,9 +10,7 @@ @pytest.mark.asyncio async def test_single_event_publish(event_loop): stream_name = str(uuid.uuid4()) - async with connect( - loop=event_loop, username="test-user", password="test-password" - ) as conn: + async with connect(username="test-user", password="test-password") as conn: result = await conn.publish_event( stream_name, "testEvent", @@ -26,9 +24,7 @@ async def test_single_event_publish(event_loop): @pytest.mark.asyncio async def test_three_events_publish(event_loop): stream_name = str(uuid.uuid4()) - async with connect( - loop=event_loop, username="test-user", password="test-password" - ) as c: + async with connect(username="test-user", password="test-password") as c: result = await c.publish( stream_name, [ @@ -53,9 +49,7 @@ async def test_three_events_publish(event_loop): @pytest.mark.asyncio async def test_a_large_event(event_loop): stream_name = str(uuid.uuid4()) - async with connect( - loop=event_loop, username="test-user", password="test-password" - ) as c: + async with connect(username="test-user", password="test-password") as c: write_result = await c.publish( stream_name, [ @@ -74,8 +68,9 @@ async def test_a_large_event(event_loop): async def test_publish_raises_exception_if_not_authenticated(event_loop): stream_name = str(uuid.uuid4()) - async with connect(loop=event_loop) as conn: + async with connect() as conn: with pytest.raises(exceptions.AccessDenied): await conn.publish( - stream_name, [messages.NewEvent("pony_jumped", data={})], + stream_name, + [messages.NewEvent("pony_jumped", data={})], ) diff --git a/tox.ini b/tox.ini index b5a00e9..b1e6d7d 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py36, py37, py38 +envlist = py36, py37, py38, py39, py310 [testenv] whitelist_externals=/usr/bin/make diff --git a/versioneer.py b/versioneer.py index 64fea1c..2b54540 100644 --- a/versioneer.py +++ b/versioneer.py @@ -1,4 +1,3 @@ - # Version: 0.18 """The Versioneer - like a rocketeer, but for versions. @@ -277,6 +276,7 @@ """ from __future__ import print_function + try: import configparser except ImportError: @@ -308,11 +308,13 @@ def get_root(): setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): - err = ("Versioneer was unable to run the project root directory. " - "Versioneer requires setup.py to be executed from " - "its immediate directory (like 'python setup.py COMMAND'), " - "or in a way that lets it use sys.argv[0] to find the root " - "(like 'python path/to/setup.py COMMAND').") + err = ( + "Versioneer was unable to run the project root directory. " + "Versioneer requires setup.py to be executed from " + "its immediate directory (like 'python setup.py COMMAND'), " + "or in a way that lets it use sys.argv[0] to find the root " + "(like 'python path/to/setup.py COMMAND')." + ) raise VersioneerBadRootError(err) try: # Certain runtime workflows (setup.py install/develop in a setuptools @@ -325,8 +327,10 @@ def get_root(): me_dir = os.path.normcase(os.path.splitext(me)[0]) vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) if me_dir != vsr_dir: - print("Warning: build in %s is using versioneer.py from %s" - % (os.path.dirname(me), versioneer_py)) + print( + "Warning: build in %s is using versioneer.py from %s" + % (os.path.dirname(me), versioneer_py) + ) except NameError: pass return root @@ -348,6 +352,7 @@ def get(parser, name): if parser.has_option("versioneer", name): return parser.get("versioneer", name) return None + cfg = VersioneerConfig() cfg.VCS = VCS cfg.style = get(parser, "style") or "" @@ -372,17 +377,18 @@ class NotThisMethod(Exception): def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" + def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f + return decorate -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): +def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None @@ -390,10 +396,13 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) + p = subprocess.Popen( + [c] + args, + cwd=cwd, + env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr else None), + ) break except EnvironmentError: e = sys.exc_info()[1] @@ -418,7 +427,9 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, return stdout, p.returncode -LONG_VERSION_PY['git'] = ''' +LONG_VERSION_PY[ + "git" +] = ''' # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build @@ -993,7 +1004,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) + tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d @@ -1002,7 +1013,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) + tags = set([r for r in refs if re.search(r"\d", r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: @@ -1010,19 +1021,26 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] + r = ref[len(tag_prefix) :] if verbose: print("picking %s" % r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None, - "date": date} + return { + "version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": None, + "date": date, + } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags", "date": None} + return { + "version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": "no suitable tags", + "date": None, + } @register_vcs_handler("git", "pieces_from_vcs") @@ -1037,8 +1055,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) + out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) @@ -1046,10 +1063,19 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", - "--match", "%s*" % tag_prefix], - cwd=root) + describe_out, rc = run_command( + GITS, + [ + "describe", + "--tags", + "--dirty", + "--always", + "--long", + "--match", + "%s*" % tag_prefix, + ], + cwd=root, + ) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") @@ -1072,17 +1098,16 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] + git_describe = git_describe[: git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) + mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) + pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out return pieces # tag @@ -1091,10 +1116,12 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" - % (full_tag, tag_prefix)) + pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % ( + full_tag, + tag_prefix, + ) return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] + pieces["closest-tag"] = full_tag[len(tag_prefix) :] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) @@ -1105,13 +1132,13 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): else: # HEX: no tags pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) + count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], - cwd=root)[0].strip() + date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[ + 0 + ].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces @@ -1167,16 +1194,22 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None, "date": None} + return { + "version": dirname[len(parentdir_prefix) :], + "full-revisionid": None, + "dirty": False, + "error": None, + "date": None, + } else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: - print("Tried directories %s but none started with prefix %s" % - (str(rootdirs), parentdir_prefix)) + print( + "Tried directories %s but none started with prefix %s" + % (str(rootdirs), parentdir_prefix) + ) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @@ -1205,11 +1238,13 @@ def versions_from_file(filename): contents = f.read() except EnvironmentError: raise NotThisMethod("unable to read _version.py") - mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", - contents, re.M | re.S) + mo = re.search( + r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S + ) if not mo: - mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", - contents, re.M | re.S) + mo = re.search( + r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S + ) if not mo: raise NotThisMethod("no version_json in _version.py") return json.loads(mo.group(1)) @@ -1218,8 +1253,7 @@ def versions_from_file(filename): def write_to_version_file(filename, versions): """Write the given version number to the given _version.py file.""" os.unlink(filename) - contents = json.dumps(versions, sort_keys=True, - indent=1, separators=(",", ": ")) + contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) with open(filename, "w") as f: f.write(SHORT_VERSION_PY % contents) @@ -1251,8 +1285,7 @@ def render_pep440(pieces): rendered += ".dirty" else: # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) + rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered @@ -1366,11 +1399,13 @@ def render_git_describe_long(pieces): def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None} + return { + "version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + "date": None, + } if not style or style == "default": style = "pep440" # the default @@ -1390,9 +1425,13 @@ def render(pieces, style): else: raise ValueError("unknown style '%s'" % style) - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None, - "date": pieces.get("date")} + return { + "version": rendered, + "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], + "error": None, + "date": pieces.get("date"), + } class VersioneerBadRootError(Exception): @@ -1415,8 +1454,9 @@ def get_versions(verbose=False): handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS verbose = verbose or cfg.verbose - assert cfg.versionfile_source is not None, \ - "please set versioneer.versionfile_source" + assert ( + cfg.versionfile_source is not None + ), "please set versioneer.versionfile_source" assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" versionfile_abs = os.path.join(root, cfg.versionfile_source) @@ -1470,9 +1510,13 @@ def get_versions(verbose=False): if verbose: print("unable to compute version") - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, "error": "unable to compute version", - "date": None} + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to compute version", + "date": None, + } def get_version(): @@ -1521,6 +1565,7 @@ def run(self): print(" date: %s" % vers.get("date")) if vers["error"]: print(" error: %s" % vers["error"]) + cmds["version"] = cmd_version # we override "build_py" in both distutils and setuptools @@ -1553,14 +1598,15 @@ def run(self): # now locate _version.py in the new build/ directory and replace # it with an updated value if cfg.versionfile_build: - target_versionfile = os.path.join(self.build_lib, - cfg.versionfile_build) + target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) + cmds["build_py"] = cmd_build_py if "cx_Freeze" in sys.modules: # cx_freeze enabled? from cx_Freeze.dist import build_exe as _build_exe + # nczeczulin reports that py2exe won't like the pep440-style string # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. # setup(console=[{ @@ -1581,17 +1627,21 @@ def run(self): os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % - {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) + f.write( + LONG + % { + "DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + } + ) + cmds["build_exe"] = cmd_build_exe del cmds["build_py"] - if 'py2exe' in sys.modules: # py2exe enabled? + if "py2exe" in sys.modules: # py2exe enabled? try: from py2exe.distutils_buildexe import py2exe as _py2exe # py3 except ImportError: @@ -1610,13 +1660,17 @@ def run(self): os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % - {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) + f.write( + LONG + % { + "DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + } + ) + cmds["py2exe"] = cmd_py2exe # we override different "sdist" commands for both environments @@ -1643,8 +1697,10 @@ def make_release_tree(self, base_dir, files): # updated value target_versionfile = os.path.join(base_dir, cfg.versionfile_source) print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, - self._versioneer_generated_versions) + write_to_version_file( + target_versionfile, self._versioneer_generated_versions + ) + cmds["sdist"] = cmd_sdist return cmds @@ -1699,11 +1755,13 @@ def do_setup(): root = get_root() try: cfg = get_config_from_root(root) - except (EnvironmentError, configparser.NoSectionError, - configparser.NoOptionError) as e: + except ( + EnvironmentError, + configparser.NoSectionError, + configparser.NoOptionError, + ) as e: if isinstance(e, (EnvironmentError, configparser.NoSectionError)): - print("Adding sample versioneer config to setup.cfg", - file=sys.stderr) + print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with open(os.path.join(root, "setup.cfg"), "a") as f: f.write(SAMPLE_CONFIG) print(CONFIG_ERROR, file=sys.stderr) @@ -1712,15 +1770,18 @@ def do_setup(): print(" creating %s" % cfg.versionfile_source) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) - - ipy = os.path.join(os.path.dirname(cfg.versionfile_source), - "__init__.py") + f.write( + LONG + % { + "DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + } + ) + + ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") if os.path.exists(ipy): try: with open(ipy, "r") as f: @@ -1762,8 +1823,10 @@ def do_setup(): else: print(" 'versioneer.py' already in MANIFEST.in") if cfg.versionfile_source not in simple_includes: - print(" appending versionfile_source ('%s') to MANIFEST.in" % - cfg.versionfile_source) + print( + " appending versionfile_source ('%s') to MANIFEST.in" + % cfg.versionfile_source + ) with open(manifest_in, "a") as f: f.write("include %s\n" % cfg.versionfile_source) else: