Skip to content

Commit

Permalink
Enable all pyupgrade checks in ruff
Browse files Browse the repository at this point in the history
This will help to keep us from using deprecated Python features going
forward.

Signed-off-by: Tristan Partin <[email protected]>
  • Loading branch information
tristan957 authored Oct 8, 2024
1 parent 618680c commit 5bd8e23
Show file tree
Hide file tree
Showing 203 changed files with 1,075 additions and 564 deletions.
7 changes: 4 additions & 3 deletions pre-commit.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
#!/usr/bin/env python3

from __future__ import annotations

import argparse
import enum
import os
import subprocess
import sys
from typing import List


@enum.unique
Expand Down Expand Up @@ -55,12 +56,12 @@ def mypy() -> str:
return "poetry run mypy"


def get_commit_files() -> List[str]:
def get_commit_files() -> list[str]:
files = subprocess.check_output("git diff --cached --name-only --diff-filter=ACM".split())
return files.decode().splitlines()


def check(name: str, suffix: str, cmd: str, changed_files: List[str], no_color: bool = False):
def check(name: str, suffix: str, cmd: str, changed_files: list[str], no_color: bool = False):
print(f"Checking: {name} ", end="")
applicable_files = list(filter(lambda fname: fname.strip().endswith(suffix), changed_files))
if not applicable_files:
Expand Down
5 changes: 4 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -97,5 +97,8 @@ select = [
"I", # isort
"W", # pycodestyle
"B", # bugbear
"UP032", # f-string
"UP", # pyupgrade
]

[tool.ruff.lint.pyupgrade]
keep-runtime-typing = true # Remove this stanza when we require Python 3.10
5 changes: 3 additions & 2 deletions scripts/benchmark_durations.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
#! /usr/bin/env python3

from __future__ import annotations

import argparse
import json
import logging
from typing import Dict

import psycopg2
import psycopg2.extras
Expand Down Expand Up @@ -110,7 +111,7 @@ def main(args: argparse.Namespace):
output = args.output
percentile = args.percentile

res: Dict[str, float] = {}
res: dict[str, float] = {}

try:
logging.info("connecting to the database...")
Expand Down
3 changes: 3 additions & 0 deletions scripts/download_basebackup.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@
#
# This can be useful in disaster recovery.
#

from __future__ import annotations

import argparse

import psycopg2
Expand Down
11 changes: 8 additions & 3 deletions scripts/flaky_tests.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,21 @@
#! /usr/bin/env python3

from __future__ import annotations

import argparse
import json
import logging
import os
from collections import defaultdict
from typing import Any, DefaultDict, Dict, Optional
from typing import TYPE_CHECKING

import psycopg2
import psycopg2.extras
import toml

if TYPE_CHECKING:
from typing import Any, Optional

FLAKY_TESTS_QUERY = """
SELECT
DISTINCT parent_suite, suite, name
Expand All @@ -33,7 +38,7 @@ def main(args: argparse.Namespace):
build_type = args.build_type
pg_version = args.pg_version

res: DefaultDict[str, DefaultDict[str, Dict[str, bool]]]
res: defaultdict[str, defaultdict[str, dict[str, bool]]]
res = defaultdict(lambda: defaultdict(dict))

try:
Expand All @@ -60,7 +65,7 @@ def main(args: argparse.Namespace):
pageserver_virtual_file_io_engine_parameter = ""

# re-use existing records of flaky tests from before parametrization by compaction_algorithm
def get_pageserver_default_tenant_config_compaction_algorithm() -> Optional[Dict[str, Any]]:
def get_pageserver_default_tenant_config_compaction_algorithm() -> Optional[dict[str, Any]]:
"""Duplicated from parametrize.py"""
toml_table = os.getenv("PAGESERVER_DEFAULT_TENANT_CONFIG_COMPACTION_ALGORITHM")
if toml_table is None:
Expand Down
16 changes: 11 additions & 5 deletions scripts/force_layer_download.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,21 @@
from __future__ import annotations

import argparse
import asyncio
import json
import logging
import signal
import sys
from collections import defaultdict
from collections.abc import Awaitable
from dataclasses import dataclass
from typing import Any, Awaitable, Dict, List, Tuple
from typing import TYPE_CHECKING

import aiohttp

if TYPE_CHECKING:
from typing import Any


class ClientException(Exception):
pass
Expand Down Expand Up @@ -89,7 +95,7 @@ async def timeline_poll_download_remote_layers_status(
class Completed:
"""The status dict returned by the API"""

status: Dict[str, Any]
status: dict[str, Any]


sigint_received = asyncio.Event()
Expand Down Expand Up @@ -179,7 +185,7 @@ async def main_impl(args, report_out, client: Client):
"""
Returns OS exit status.
"""
tenant_and_timline_ids: List[Tuple[str, str]] = []
tenant_and_timline_ids: list[tuple[str, str]] = []
# fill tenant_and_timline_ids based on spec
for spec in args.what:
comps = spec.split(":")
Expand Down Expand Up @@ -215,14 +221,14 @@ async def main_impl(args, report_out, client: Client):
tenant_and_timline_ids = tmp

logging.info("create tasks and process them at specified concurrency")
task_q: asyncio.Queue[Tuple[str, Awaitable[Any]]] = asyncio.Queue()
task_q: asyncio.Queue[tuple[str, Awaitable[Any]]] = asyncio.Queue()
tasks = {
f"{tid}:{tlid}": do_timeline(client, tid, tlid) for tid, tlid in tenant_and_timline_ids
}
for task in tasks.items():
task_q.put_nowait(task)

result_q: asyncio.Queue[Tuple[str, Any]] = asyncio.Queue()
result_q: asyncio.Queue[tuple[str, Any]] = asyncio.Queue()
taskq_handlers = []
for _ in range(0, args.concurrent_tasks):
taskq_handlers.append(taskq_handler(task_q, result_q))
Expand Down
3 changes: 3 additions & 0 deletions scripts/ingest_perf_test_result.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
#!/usr/bin/env python3

from __future__ import annotations

import argparse
import json
import logging
Expand Down
5 changes: 3 additions & 2 deletions scripts/ingest_regress_test_result-new-format.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
#! /usr/bin/env python3

from __future__ import annotations

import argparse
import dataclasses
import json
Expand All @@ -11,7 +13,6 @@
from dataclasses import dataclass
from datetime import datetime, timezone
from pathlib import Path
from typing import Tuple

import backoff
import psycopg2
Expand Down Expand Up @@ -91,7 +92,7 @@ def create_table(cur):
cur.execute(CREATE_TABLE)


def parse_test_name(test_name: str) -> Tuple[str, int, str]:
def parse_test_name(test_name: str) -> tuple[str, int, str]:
build_type, pg_version = None, None
if match := TEST_NAME_RE.search(test_name):
found = match.groupdict()
Expand Down
2 changes: 2 additions & 0 deletions scripts/sk_cleanup_tenants/script.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from __future__ import annotations

import argparse
import logging
import os
Expand Down
2 changes: 2 additions & 0 deletions test_runner/cloud_regress/test_cloud_regress.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
Run the regression tests on the cloud instance of Neon
"""

from __future__ import annotations

from pathlib import Path
from typing import Any

Expand Down
2 changes: 2 additions & 0 deletions test_runner/conftest.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from __future__ import annotations

pytest_plugins = (
"fixtures.pg_version",
"fixtures.parametrize",
Expand Down
1 change: 1 addition & 0 deletions test_runner/fixtures/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from __future__ import annotations
41 changes: 24 additions & 17 deletions test_runner/fixtures/benchmark_fixture.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,17 @@
from __future__ import annotations

import calendar
import dataclasses
import enum
import json
import os
import re
import timeit
from collections.abc import Iterator
from contextlib import contextmanager
from datetime import datetime
from pathlib import Path

# Type-related stuff
from typing import Callable, ClassVar, Dict, Iterator, Optional
from typing import TYPE_CHECKING

import allure
import pytest
Expand All @@ -23,6 +24,10 @@
from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonPageserver

if TYPE_CHECKING:
from typing import Callable, ClassVar, Optional


"""
This file contains fixtures for micro-benchmarks.
Expand Down Expand Up @@ -138,18 +143,6 @@ def parse_from_stdout(

@dataclasses.dataclass
class PgBenchInitResult:
# Taken from https://github.com/postgres/postgres/blob/REL_15_1/src/bin/pgbench/pgbench.c#L5144-L5171
EXTRACTORS: ClassVar[Dict[str, re.Pattern]] = { # type: ignore[type-arg]
"drop_tables": re.compile(r"drop tables (\d+\.\d+) s"),
"create_tables": re.compile(r"create tables (\d+\.\d+) s"),
"client_side_generate": re.compile(r"client-side generate (\d+\.\d+) s"),
"server_side_generate": re.compile(r"server-side generate (\d+\.\d+) s"),
"vacuum": re.compile(r"vacuum (\d+\.\d+) s"),
"primary_keys": re.compile(r"primary keys (\d+\.\d+) s"),
"foreign_keys": re.compile(r"foreign keys (\d+\.\d+) s"),
"total": re.compile(r"done in (\d+\.\d+) s"), # Total time printed by pgbench
}

total: Optional[float]
drop_tables: Optional[float]
create_tables: Optional[float]
Expand All @@ -162,6 +155,20 @@ class PgBenchInitResult:
start_timestamp: int
end_timestamp: int

# Taken from https://github.com/postgres/postgres/blob/REL_15_1/src/bin/pgbench/pgbench.c#L5144-L5171
EXTRACTORS: ClassVar[dict[str, re.Pattern[str]]] = dataclasses.field(
default_factory=lambda: {
"drop_tables": re.compile(r"drop tables (\d+\.\d+) s"),
"create_tables": re.compile(r"create tables (\d+\.\d+) s"),
"client_side_generate": re.compile(r"client-side generate (\d+\.\d+) s"),
"server_side_generate": re.compile(r"server-side generate (\d+\.\d+) s"),
"vacuum": re.compile(r"vacuum (\d+\.\d+) s"),
"primary_keys": re.compile(r"primary keys (\d+\.\d+) s"),
"foreign_keys": re.compile(r"foreign keys (\d+\.\d+) s"),
"total": re.compile(r"done in (\d+\.\d+) s"), # Total time printed by pgbench
}
)

@classmethod
def parse_from_stderr(
cls,
Expand All @@ -175,7 +182,7 @@ def parse_from_stderr(

last_line = stderr.splitlines()[-1]

timings: Dict[str, Optional[float]] = {}
timings: dict[str, Optional[float]] = {}
last_line_items = re.split(r"\(|\)|,", last_line)
for item in last_line_items:
for key, regex in cls.EXTRACTORS.items():
Expand Down Expand Up @@ -385,7 +392,7 @@ def get_int_counter_value(
self,
pageserver: NeonPageserver,
metric_name: str,
label_filters: Optional[Dict[str, str]] = None,
label_filters: Optional[dict[str, str]] = None,
) -> int:
"""Fetch the value of given int counter from pageserver metrics."""
all_metrics = pageserver.http_client().get_metrics()
Expand Down
20 changes: 13 additions & 7 deletions test_runner/fixtures/common_types.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,16 @@
from __future__ import annotations

import random
from dataclasses import dataclass
from enum import Enum
from functools import total_ordering
from typing import Any, Dict, Type, TypeVar, Union
from typing import TYPE_CHECKING, TypeVar

if TYPE_CHECKING:
from typing import Any, Union

T = TypeVar("T", bound="Id")

T = TypeVar("T", bound="Id")

DEFAULT_WAL_SEG_SIZE = 16 * 1024 * 1024

Expand Down Expand Up @@ -56,7 +62,7 @@ def __sub__(self, other: Any) -> int:
return NotImplemented
return self.lsn_int - other.lsn_int

def __add__(self, other: Union[int, "Lsn"]) -> "Lsn":
def __add__(self, other: Union[int, Lsn]) -> Lsn:
if isinstance(other, int):
return Lsn(self.lsn_int + other)
elif isinstance(other, Lsn):
Expand All @@ -70,7 +76,7 @@ def __hash__(self) -> int:
def as_int(self) -> int:
return self.lsn_int

def segment_lsn(self, seg_sz: int = DEFAULT_WAL_SEG_SIZE) -> "Lsn":
def segment_lsn(self, seg_sz: int = DEFAULT_WAL_SEG_SIZE) -> Lsn:
return Lsn(self.lsn_int - (self.lsn_int % seg_sz))

def segno(self, seg_sz: int = DEFAULT_WAL_SEG_SIZE) -> int:
Expand Down Expand Up @@ -127,7 +133,7 @@ def __hash__(self) -> int:
return hash(str(self.id))

@classmethod
def generate(cls: Type[T]) -> T:
def generate(cls: type[T]) -> T:
"""Generate a random ID"""
return cls(random.randbytes(16).hex())

Expand Down Expand Up @@ -162,7 +168,7 @@ class TenantTimelineId:
timeline_id: TimelineId

@classmethod
def from_json(cls, d: Dict[str, Any]) -> "TenantTimelineId":
def from_json(cls, d: dict[str, Any]) -> TenantTimelineId:
return TenantTimelineId(
tenant_id=TenantId(d["tenant_id"]),
timeline_id=TimelineId(d["timeline_id"]),
Expand All @@ -181,7 +187,7 @@ def __init__(self, tenant_id: TenantId, shard_number: int, shard_count: int):
assert self.shard_number < self.shard_count or self.shard_count == 0

@classmethod
def parse(cls: Type[TTenantShardId], input) -> TTenantShardId:
def parse(cls: type[TTenantShardId], input) -> TTenantShardId:
if len(input) == 32:
return cls(
tenant_id=TenantId(input),
Expand Down
Loading

1 comment on commit 5bd8e23

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

5085 tests run: 4878 passed, 0 failed, 207 skipped (full report)


Flaky tests (1)

Postgres 14

Code coverage* (full report)

  • functions: 31.4% (7505 of 23939 functions)
  • lines: 49.6% (60281 of 121651 lines)

* collected from Rust tests only


The comment gets automatically updated with the latest test results
5bd8e23 at 2024-10-08T20:21:31.253Z :recycle:

Please sign in to comment.