Skip to content

Commit

Permalink
Replace black with ruff
Browse files Browse the repository at this point in the history
  • Loading branch information
sjperkins committed Oct 30, 2023
1 parent 781968a commit f470e2a
Show file tree
Hide file tree
Showing 12 changed files with 27 additions and 24 deletions.
12 changes: 5 additions & 7 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,14 @@
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.2.0
rev: v4.5.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-added-large-files
- repo: https://github.com/psf/black
rev: 22.8.0
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.1.3
hooks:
- id: black
language_version: python3
args:
- --target-version=py38
- id: ruff-format
name: ruff format
1 change: 0 additions & 1 deletion daskms/apps/fragments.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ def fragments():
default=False,
)
def stat(fragment_path, prune):

ancestors = get_ancestry(fragment_path, only_required=prune)

click.echo("Ancestry:")
Expand Down
1 change: 1 addition & 0 deletions daskms/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@

from daskms.testing import mark_in_pytest


# content of conftest.py
def pytest_configure(config):
mark_in_pytest(True)
Expand Down
2 changes: 1 addition & 1 deletion daskms/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -491,7 +491,7 @@ def finalize_compute(results, info, coords, attrs):
data_vars = OrderedDict()
rev_results = list(results[::-1])

for (dask_collection, k, v) in info:
for dask_collection, k, v in info:
if dask_collection:
fn, args = v
r = rev_results.pop()
Expand Down
3 changes: 0 additions & 3 deletions daskms/experimental/arrow/reads.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,6 @@ def partition_chunking(partition, fragment_rows, chunks):
it = zip(chunk_intervals, chunk_intervals[1:])

for c, (lower, upper) in enumerate(it):

si = np.searchsorted(intervals, lower, side="right") - 1
ei = np.searchsorted(intervals, upper, side="left")

Expand All @@ -191,7 +190,6 @@ def partition_chunking(partition, fragment_rows, chunks):


def fragment_reader(fragments, ranges, column, shape, dtype):

if len(fragments) > 1: # Reading over multiple row_groups.
arr = np.empty(shape, dtype=dtype)
offset = 0
Expand Down Expand Up @@ -277,7 +275,6 @@ def xds_from_parquet(store, columns=None, chunks=None, **kwargs):
partition_chunks = partition_chunking(p, fragment_rows, chunks)

for pieces in partition_chunks.values():

chunk_fragments = [fragments[i] for i, _ in pieces]
chunk_ranges = [r for _, r in pieces]
chunk_metas = [f.metadata for f in chunk_fragments]
Expand Down
3 changes: 0 additions & 3 deletions daskms/experimental/arrow/tests/test_parquet.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,6 @@ def test_xds_to_parquet_local(ms, tmp_path_factory, spw_table, ant_table):
def test_xds_to_parquet_s3(
ms, spw_table, ant_table, py_minio_client, minio_user_key, minio_url, s3_bucket_name
):

py_minio_client.make_bucket(s3_bucket_name)

store = DaskMSStore(
Expand All @@ -189,7 +188,6 @@ def test_xds_to_parquet_s3(

@pytest.fixture(params=[1, 2, 3, 4])
def parquet_ms(ms, tmp_path_factory, request):

parquet_store = tmp_path_factory.mktemp("parquet") / "test.parquet"

# Chunk in row so we can probe chunk behaviour on reads.
Expand All @@ -204,7 +202,6 @@ def parquet_ms(ms, tmp_path_factory, request):

@pytest.mark.parametrize("rc", [1, 2, 3, 4])
def test_xds_from_parquet_chunks(ms, parquet_ms, rc):

xdsl = xds_from_parquet(parquet_ms, chunks={"row": rc})

chunks = chain.from_iterable([xds.chunks["row"] for xds in xdsl])
Expand Down
1 change: 0 additions & 1 deletion daskms/experimental/arrow/writes.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,6 @@ def __reduce__(self):
return (ParquetFragment, (self.store, self.key, self.schema, self.dataset_id))

def write(self, chunk, *data):

table_path = (
self.key if self.store.table else self.store.join(["MAIN", self.key])
)
Expand Down
1 change: 0 additions & 1 deletion daskms/experimental/zarr/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,6 @@ def prepare_zarr_group(dataset_id, dataset, store, rechunk=False):


def get_group_chunks(group):

group_chunks = {}

for array in group.values():
Expand Down
2 changes: 0 additions & 2 deletions daskms/experimental/zarr/tests/test_zarr.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,6 @@ def test_metadata_consolidation(ms, ant_table, tmp_path_factory, consolidated):


def zarr_tester(ms, spw_table, ant_table, zarr_store, spw_store, ant_store):

ms_datasets = xds_from_ms(ms)
spw_datasets = xds_from_table(spw_table, group_cols="__row__")
ant_datasets = xds_from_table(ant_table)
Expand Down Expand Up @@ -292,7 +291,6 @@ def test_fasteners(ms, tmp_path_factory):


def test_basic_roundtrip(tmp_path):

path = tmp_path / "test.zarr"

# We need >10 datasets to be sure roundtripping is consistent.
Expand Down
3 changes: 0 additions & 3 deletions daskms/tests/test_storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@

@pytest.mark.skipif(xarray is None, reason="Need xarray to check equality.")
def test_storage_ms(ms):

oxdsl = xds_from_ms(ms)

writes = xds_to_storage_table(oxdsl, ms)
Expand All @@ -28,7 +27,6 @@ def test_storage_ms(ms):

@pytest.mark.skipif(xarray is None, reason="Need xarray to check equality.")
def test_storage_zarr(ms, tmp_path_factory):

zarr_store = tmp_path_factory.mktemp("zarr") / "test.zarr"

oxdsl = xds_from_ms(ms)
Expand All @@ -52,7 +50,6 @@ def test_storage_zarr(ms, tmp_path_factory):

@pytest.mark.skipif(xarray is None, reason="Need xarray to check equality.")
def test_storage_parquet(ms, tmp_path_factory):

parquet_store = tmp_path_factory.mktemp("parquet") / "test.parquet"

oxdsl = xds_from_ms(ms)
Expand Down
2 changes: 0 additions & 2 deletions daskms/writes.py
Original file line number Diff line number Diff line change
Expand Up @@ -522,7 +522,6 @@ def cached_row_order(rowid):
if not layer_name.startswith("row-") and not layer_name.startswith(
"group-rows-"
):

log.warning(
"Unusual ROWID layer %s. "
"This is probably OK but "
Expand All @@ -539,7 +538,6 @@ def cached_row_order(rowid):
layer_names[0].startswith("group-rows-")
and layer_names[1].startswith("rechunk-merge-")
):

log.warning(
"Unusual ROWID layers %s for "
"the group ordering case. "
Expand Down
20 changes: 20 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,9 @@ testing = ["minio", "pytest"]
tbump = "^6.9.0"
pre-commit = "^2.20.0"
black = "^22.8.0"
ipython = "^8.16.1"
ipdb = "^0.13.13"
ruff = "^0.1.3"

[tool.poetry.group.docs.dependencies]
furo = "^2022.9.15"
Expand All @@ -45,6 +48,23 @@ numpydoc = "^1.4.0"
Pygments = "^2.13.0"
sphinx-copybutton = "^0.5.0"


[tool.ruff]
line-length = 88
target-version = "py310"

select = [
# flake8-builtins
"A",
# flake8-bugbear
"B",
# isort
"I001",
"I002",
# tidy imports
"TID"
]

[build-system]
requires = ["setuptools", "poetry-core"]
build-backend = "poetry.core.masonry.api"

0 comments on commit f470e2a

Please sign in to comment.