diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 968bdc1f0..b553749ad 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -24,6 +24,22 @@ on: branches: ["branch-*"] jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Install Python + uses: actions/setup-python@v4 + with: + python-version: "3.11" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install ruff + # Update output format to enable automatic inline annotations. + - name: Run Ruff + run: ruff check --output-format=github datafusion + generate-license: runs-on: ubuntu-latest steps: diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 371f07979..291120bf8 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -103,13 +103,6 @@ jobs: source venv/bin/activate pip install -r requirements-311.txt - - name: Run Python Linters - if: ${{ matrix.python-version == '3.10' && matrix.toolchain == 'stable' }} - run: | - source venv/bin/activate - flake8 --exclude venv,benchmarks/db-benchmark --ignore=E501,W503 - black --line-length 79 --diff --check . - - name: Run tests env: RUST_BACKTRACE: 1 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 39049bf49..8509fae2c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -20,21 +20,14 @@ repos: rev: v1.6.23 hooks: - id: actionlint-docker - - repo: https://github.com/psf/black - rev: 22.3.0 + - repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.3.0 hooks: - - id: black - files: datafusion/.* - # Explicitly specify the pyproject.toml at the repo root, not per-project. - args: ["--config", "pyproject.toml", "--line-length", "79", "--diff", "--check", "."] - - repo: https://github.com/PyCQA/flake8 - rev: 5.0.4 - hooks: - - id: flake8 - files: datafusion/.*$ - types: [file] - types_or: [python] - additional_dependencies: ["flake8-force"] + # Run the linter. + - id: ruff + # Run the formatter. + - id: ruff-format - repo: local hooks: - id: rust-fmt diff --git a/README.md b/README.md index a682f73d5..a2e3efd4e 100644 --- a/README.md +++ b/README.md @@ -202,7 +202,7 @@ source venv/bin/activate # update pip itself if necessary python -m pip install -U pip # install dependencies (for Python 3.8+) -python -m pip install -r requirements-310.txt +python -m pip install -r requirements.in ``` The tests rely on test data in git submodules. @@ -222,12 +222,27 @@ python -m pytest ### Running & Installing pre-commit hooks -arrow-datafusion-python takes advantage of [pre-commit](https://pre-commit.com/) to assist developers with code linting to help reduce the number of commits that ultimately fail in CI due to linter errors. Using the pre-commit hooks is optional for the developer but certainly helpful for keeping PRs clean and concise. +arrow-datafusion-python takes advantage of [pre-commit](https://pre-commit.com/) to assist developers with code linting to help reduce +the number of commits that ultimately fail in CI due to linter errors. Using the pre-commit hooks is optional for the +developer but certainly helpful for keeping PRs clean and concise. -Our pre-commit hooks can be installed by running `pre-commit install`, which will install the configurations in your ARROW_DATAFUSION_PYTHON_ROOT/.github directory and run each time you perform a commit, failing to complete the commit if an offending lint is found allowing you to make changes locally before pushing. +Our pre-commit hooks can be installed by running `pre-commit install`, which will install the configurations in +your ARROW_DATAFUSION_PYTHON_ROOT/.github directory and run each time you perform a commit, failing to complete +the commit if an offending lint is found allowing you to make changes locally before pushing. The pre-commit hooks can also be run adhoc without installing them by simply running `pre-commit run --all-files` +## Running linters without using pre-commit + +There are scripts in `ci/scripts` for running Rust and Python linters. + +```shell +./ci/scripts/python_lint.sh +./ci/scripts/rust_clippy.sh +./ci/scripts/rust_fmt.sh +./ci/scripts/rust_toml_fmt.sh +``` + ## How to update dependencies To change test dependencies, change the `requirements.in` and run diff --git a/benchmarks/db-benchmark/groupby-datafusion.py b/benchmarks/db-benchmark/groupby-datafusion.py index 2c35259e8..3a4399f7d 100644 --- a/benchmarks/db-benchmark/groupby-datafusion.py +++ b/benchmarks/db-benchmark/groupby-datafusion.py @@ -79,17 +79,13 @@ def execute(df): data = pacsv.read_csv( src_grp, - convert_options=pacsv.ConvertOptions( - auto_dict_encode=True, column_types=schema - ), + convert_options=pacsv.ConvertOptions(auto_dict_encode=True, column_types=schema), ) print("dataset loaded") # create a session context with explicit runtime and config settings runtime = ( - RuntimeConfig() - .with_disk_manager_os() - .with_fair_spill_pool(64 * 1024 * 1024 * 1024) + RuntimeConfig().with_disk_manager_os().with_fair_spill_pool(64 * 1024 * 1024 * 1024) ) config = ( SessionConfig() @@ -116,9 +112,7 @@ def execute(df): if sql: df = ctx.sql("SELECT id1, SUM(v1) AS v1 FROM x GROUP BY id1") else: - df = ctx.table("x").aggregate( - [f.col("id1")], [f.sum(f.col("v1")).alias("v1")] - ) + df = ctx.table("x").aggregate([f.col("id1")], [f.sum(f.col("v1")).alias("v1")]) ans = execute(df) shape = ans_shape(ans) @@ -197,9 +191,7 @@ def execute(df): gc.collect() t_start = timeit.default_timer() if sql: - df = ctx.sql( - "SELECT id3, SUM(v1) AS v1, AVG(v3) AS v3 FROM x GROUP BY id3" - ) + df = ctx.sql("SELECT id3, SUM(v1) AS v1, AVG(v3) AS v3 FROM x GROUP BY id3") else: df = ctx.table("x").aggregate( [f.col("id3")], diff --git a/benchmarks/db-benchmark/join-datafusion.py b/benchmarks/db-benchmark/join-datafusion.py index 602cee697..4d59c7dc2 100755 --- a/benchmarks/db-benchmark/join-datafusion.py +++ b/benchmarks/db-benchmark/join-datafusion.py @@ -152,11 +152,7 @@ def ans_shape(batches): print(f"q2: {t}") t_start = timeit.default_timer() df = ctx.create_dataframe([ans]) -chk = ( - df.aggregate([], [f.sum(col("v1")), f.sum(col("v2"))]) - .collect()[0] - .column(0)[0] -) +chk = df.aggregate([], [f.sum(col("v1")), f.sum(col("v2"))]).collect()[0].column(0)[0] chkt = timeit.default_timer() - t_start m = memory_usage() write_log( @@ -193,11 +189,7 @@ def ans_shape(batches): print(f"q3: {t}") t_start = timeit.default_timer() df = ctx.create_dataframe([ans]) -chk = ( - df.aggregate([], [f.sum(col("v1")), f.sum(col("v2"))]) - .collect()[0] - .column(0)[0] -) +chk = df.aggregate([], [f.sum(col("v1")), f.sum(col("v2"))]).collect()[0].column(0)[0] chkt = timeit.default_timer() - t_start m = memory_usage() write_log( @@ -234,11 +226,7 @@ def ans_shape(batches): print(f"q4: {t}") t_start = timeit.default_timer() df = ctx.create_dataframe([ans]) -chk = ( - df.aggregate([], [f.sum(col("v1")), f.sum(col("v2"))]) - .collect()[0] - .column(0)[0] -) +chk = df.aggregate([], [f.sum(col("v1")), f.sum(col("v2"))]).collect()[0].column(0)[0] chkt = timeit.default_timer() - t_start m = memory_usage() write_log( @@ -275,11 +263,7 @@ def ans_shape(batches): print(f"q5: {t}") t_start = timeit.default_timer() df = ctx.create_dataframe([ans]) -chk = ( - df.aggregate([], [f.sum(col("v1")), f.sum(col("v2"))]) - .collect()[0] - .column(0)[0] -) +chk = df.aggregate([], [f.sum(col("v1")), f.sum(col("v2"))]).collect()[0].column(0)[0] chkt = timeit.default_timer() - t_start m = memory_usage() write_log( diff --git a/benchmarks/tpch/tpch.py b/benchmarks/tpch/tpch.py index ea830a1ff..7f104a4cb 100644 --- a/benchmarks/tpch/tpch.py +++ b/benchmarks/tpch/tpch.py @@ -83,9 +83,7 @@ def bench(data_path, query_path): time_millis = (end - start) * 1000 total_time_millis += time_millis print("q{},{}".format(query, round(time_millis, 1))) - results.write( - "q{},{}\n".format(query, round(time_millis, 1)) - ) + results.write("q{},{}\n".format(query, round(time_millis, 1))) results.flush() except Exception as e: print("query", query, "failed", e) diff --git a/ci/scripts/python_lint.sh b/ci/scripts/python_lint.sh new file mode 100755 index 000000000..3f7310ba7 --- /dev/null +++ b/ci/scripts/python_lint.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +set -ex +ruff format datafusion +ruff check datafusion \ No newline at end of file diff --git a/datafusion/__init__.py b/datafusion/__init__.py index df53b396a..cfb673613 100644 --- a/datafusion/__init__.py +++ b/datafusion/__init__.py @@ -208,9 +208,7 @@ def udaf(accum, input_type, return_type, state_type, volatility, name=None): Create a new User Defined Aggregate Function """ if not issubclass(accum, Accumulator): - raise TypeError( - "`accum` must implement the abstract base class Accumulator" - ) + raise TypeError("`accum` must implement the abstract base class Accumulator") if name is None: name = accum.__qualname__.lower() if isinstance(input_type, pa.lib.DataType): diff --git a/datafusion/cudf.py b/datafusion/cudf.py index e39daea31..d8bc8e6d0 100644 --- a/datafusion/cudf.py +++ b/datafusion/cudf.py @@ -68,9 +68,7 @@ def to_cudf_df(self, plan): elif isinstance(node, TableScan): return cudf.read_parquet(self.parquet_tables[node.table_name()]) else: - raise Exception( - "unsupported logical operator: {}".format(type(node)) - ) + raise Exception("unsupported logical operator: {}".format(type(node))) def create_schema(self, schema_name: str, **kwargs): logger.debug(f"Creating schema: {schema_name}") diff --git a/datafusion/input/base.py b/datafusion/input/base.py index fb1207896..efcaf7697 100644 --- a/datafusion/input/base.py +++ b/datafusion/input/base.py @@ -31,13 +31,9 @@ class BaseInputSource(ABC): """ @abstractmethod - def is_correct_input( - self, input_item: Any, table_name: str, **kwargs - ) -> bool: + def is_correct_input(self, input_item: Any, table_name: str, **kwargs) -> bool: pass @abstractmethod - def build_table( - self, input_item: Any, table_name: str, **kwarg - ) -> SqlTable: + def build_table(self, input_item: Any, table_name: str, **kwarg) -> SqlTable: pass diff --git a/datafusion/input/location.py b/datafusion/input/location.py index 939c7f415..16e632d1b 100644 --- a/datafusion/input/location.py +++ b/datafusion/input/location.py @@ -72,9 +72,7 @@ def build_table( for _ in reader: num_rows += 1 # TODO: Need to actually consume this row into resonable columns - raise RuntimeError( - "TODO: Currently unable to support CSV input files." - ) + raise RuntimeError("TODO: Currently unable to support CSV input files.") else: raise RuntimeError( f"Input of format: `{format}` is currently not supported.\ diff --git a/datafusion/pandas.py b/datafusion/pandas.py index c2da83ff6..cf1fa6f7b 100644 --- a/datafusion/pandas.py +++ b/datafusion/pandas.py @@ -64,9 +64,7 @@ def to_pandas_df(self, plan): elif isinstance(node, TableScan): return pd.read_parquet(self.parquet_tables[node.table_name()]) else: - raise Exception( - "unsupported logical operator: {}".format(type(node)) - ) + raise Exception("unsupported logical operator: {}".format(type(node))) def create_schema(self, schema_name: str, **kwargs): logger.debug(f"Creating schema: {schema_name}") diff --git a/datafusion/polars.py b/datafusion/polars.py index e4eb966fc..ac5e26e3e 100644 --- a/datafusion/polars.py +++ b/datafusion/polars.py @@ -51,9 +51,7 @@ def to_polars_df(self, plan): args = [self.to_polars_expr(expr) for expr in node.projections()] return inputs[0].select(*args) elif isinstance(node, Aggregate): - groupby_expr = [ - self.to_polars_expr(expr) for expr in node.group_by_exprs() - ] + groupby_expr = [self.to_polars_expr(expr) for expr in node.group_by_exprs()] aggs = [] for expr in node.aggregate_exprs(): expr = expr.to_variant() @@ -67,17 +65,13 @@ def to_polars_df(self, plan): ) ) else: - raise Exception( - "Unsupported aggregate function {}".format(expr) - ) + raise Exception("Unsupported aggregate function {}".format(expr)) df = inputs[0].groupby(groupby_expr).agg(aggs) return df elif isinstance(node, TableScan): return polars.read_parquet(self.parquet_tables[node.table_name()]) else: - raise Exception( - "unsupported logical operator: {}".format(type(node)) - ) + raise Exception("unsupported logical operator: {}".format(type(node))) def create_schema(self, schema_name: str, **kwargs): logger.debug(f"Creating schema: {schema_name}") diff --git a/datafusion/tests/generic.py b/datafusion/tests/generic.py index 1f984a40a..07399792c 100644 --- a/datafusion/tests/generic.py +++ b/datafusion/tests/generic.py @@ -50,9 +50,7 @@ def data_datetime(f): datetime.datetime.now() - datetime.timedelta(days=1), datetime.datetime.now() + datetime.timedelta(days=1), ] - return pa.array( - data, type=pa.timestamp(f), mask=np.array([False, True, False]) - ) + return pa.array(data, type=pa.timestamp(f), mask=np.array([False, True, False])) def data_date32(): @@ -61,9 +59,7 @@ def data_date32(): datetime.date(1980, 1, 1), datetime.date(2030, 1, 1), ] - return pa.array( - data, type=pa.date32(), mask=np.array([False, True, False]) - ) + return pa.array(data, type=pa.date32(), mask=np.array([False, True, False])) def data_timedelta(f): @@ -72,9 +68,7 @@ def data_timedelta(f): datetime.timedelta(days=1), datetime.timedelta(seconds=1), ] - return pa.array( - data, type=pa.duration(f), mask=np.array([False, True, False]) - ) + return pa.array(data, type=pa.duration(f), mask=np.array([False, True, False])) def data_binary_other(): diff --git a/datafusion/tests/test_aggregation.py b/datafusion/tests/test_aggregation.py index 0a6c90c32..99a470b6b 100644 --- a/datafusion/tests/test_aggregation.py +++ b/datafusion/tests/test_aggregation.py @@ -81,9 +81,7 @@ def test_built_in_aggregation(df): assert result.column(2) == pa.array([4]) assert result.column(3) == pa.array([6]) assert result.column(4) == pa.array([[4, 4, 6]]) - np.testing.assert_array_almost_equal( - result.column(5), np.average(values_a) - ) + np.testing.assert_array_almost_equal(result.column(5), np.average(values_a)) np.testing.assert_array_almost_equal( result.column(6), np.corrcoef(values_a, values_b)[0][1] ) @@ -101,35 +99,20 @@ def test_built_in_aggregation(df): ) np.testing.assert_array_almost_equal(result.column(11), np.max(values_a)) np.testing.assert_array_almost_equal(result.column(12), np.mean(values_b)) - np.testing.assert_array_almost_equal( - result.column(13), np.median(values_b) - ) + np.testing.assert_array_almost_equal(result.column(13), np.median(values_b)) np.testing.assert_array_almost_equal(result.column(14), np.min(values_a)) np.testing.assert_array_almost_equal( result.column(15), np.sum(values_b.to_pylist()) ) - np.testing.assert_array_almost_equal( - result.column(16), np.std(values_a, ddof=1) - ) - np.testing.assert_array_almost_equal( - result.column(17), np.std(values_b, ddof=0) - ) - np.testing.assert_array_almost_equal( - result.column(18), np.std(values_c, ddof=1) - ) - np.testing.assert_array_almost_equal( - result.column(19), np.var(values_a, ddof=1) - ) - np.testing.assert_array_almost_equal( - result.column(20), np.var(values_b, ddof=0) - ) - np.testing.assert_array_almost_equal( - result.column(21), np.var(values_c, ddof=1) - ) + np.testing.assert_array_almost_equal(result.column(16), np.std(values_a, ddof=1)) + np.testing.assert_array_almost_equal(result.column(17), np.std(values_b, ddof=0)) + np.testing.assert_array_almost_equal(result.column(18), np.std(values_c, ddof=1)) + np.testing.assert_array_almost_equal(result.column(19), np.var(values_a, ddof=1)) + np.testing.assert_array_almost_equal(result.column(20), np.var(values_b, ddof=0)) + np.testing.assert_array_almost_equal(result.column(21), np.var(values_c, ddof=1)) def test_bit_add_or_xor(df): - df = df.aggregate( [], [ @@ -147,7 +130,6 @@ def test_bit_add_or_xor(df): def test_bool_and_or(df): - df = df.aggregate( [], [ diff --git a/datafusion/tests/test_config.py b/datafusion/tests/test_config.py index 960e72c94..12d9fc3ff 100644 --- a/datafusion/tests/test_config.py +++ b/datafusion/tests/test_config.py @@ -35,10 +35,7 @@ def test_get_then_set(config): def test_get_all(config): config_dict = config.get_all() - assert ( - config_dict["datafusion.catalog.create_default_catalog_and_schema"] - == "true" - ) + assert config_dict["datafusion.catalog.create_default_catalog_and_schema"] == "true" def test_get_invalid_config(config): diff --git a/datafusion/tests/test_context.py b/datafusion/tests/test_context.py index 97bff9bb9..d48bdd929 100644 --- a/datafusion/tests/test_context.py +++ b/datafusion/tests/test_context.py @@ -36,9 +36,7 @@ def test_create_context_no_args(): def test_create_context_with_all_valid_args(): - runtime = ( - RuntimeConfig().with_disk_manager_os().with_fair_spill_pool(10000000) - ) + runtime = RuntimeConfig().with_disk_manager_os().with_fair_spill_pool(10000000) config = ( SessionConfig() .with_create_default_catalog_and_schema(True) @@ -357,9 +355,7 @@ def test_read_json_compressed(ctx, tmp_path): with gzip.open(gzip_path, "wb") as gzipped_file: gzipped_file.writelines(csv_file) - df = ctx.read_json( - gzip_path, file_extension=".gz", file_compression_type="gz" - ) + df = ctx.read_json(gzip_path, file_extension=".gz", file_compression_type="gz") result = df.collect() assert result[0].column(0) == pa.array(["a", "b", "c"]) @@ -381,9 +377,7 @@ def test_read_csv_compressed(ctx, tmp_path): with gzip.open(gzip_path, "wb") as gzipped_file: gzipped_file.writelines(csv_file) - csv_df = ctx.read_csv( - gzip_path, file_extension=".gz", file_compression_type="gz" - ) + csv_df = ctx.read_csv(gzip_path, file_extension=".gz", file_compression_type="gz") csv_df.select(column("c1")).show() diff --git a/datafusion/tests/test_dataframe.py b/datafusion/tests/test_dataframe.py index c9b0f076f..9944abeef 100644 --- a/datafusion/tests/test_dataframe.py +++ b/datafusion/tests/test_dataframe.py @@ -151,9 +151,7 @@ def test_with_column(df): def test_with_column_renamed(df): - df = df.with_column("c", column("a") + column("b")).with_column_renamed( - "c", "sum" - ) + df = df.with_column("c", column("a") + column("b")).with_column_renamed("c", "sum") result = df.collect()[0] @@ -218,9 +216,7 @@ def test_distinct(): [pa.array([1, 2, 3]), pa.array([4, 5, 6])], names=["a", "b"], ) - df_b = ctx.create_dataframe([[batch]]).sort( - column("a").sort(ascending=True) - ) + df_b = ctx.create_dataframe([[batch]]).sort(column("a").sort(ascending=True)) assert df_a.collect() == df_b.collect() @@ -251,9 +247,7 @@ def test_window_functions(df): "cume_dist", ), f.alias( - f.window( - "ntile", [literal(2)], order_by=[f.order_by(column("c"))] - ), + f.window("ntile", [literal(2)], order_by=[f.order_by(column("c"))]), "ntile", ), f.alias( @@ -261,9 +255,7 @@ def test_window_functions(df): "previous", ), f.alias( - f.window( - "lead", [column("b")], order_by=[f.order_by(column("b"))] - ), + f.window("lead", [column("b")], order_by=[f.order_by(column("b"))]), "next", ), f.alias( @@ -275,9 +267,7 @@ def test_window_functions(df): "first_value", ), f.alias( - f.window( - "last_value", [column("b")], order_by=[f.order_by(column("b"))] - ), + f.window("last_value", [column("b")], order_by=[f.order_by(column("b"))]), "last_value", ), f.alias( @@ -418,12 +408,14 @@ def test_optimized_logical_plan(aggregate_df): def test_execution_plan(aggregate_df): plan = aggregate_df.execution_plan() - expected = "AggregateExec: mode=FinalPartitioned, gby=[c1@0 as c1], aggr=[SUM(test.c2)]\n" # noqa: E501 + expected = ( + "AggregateExec: mode=FinalPartitioned, gby=[c1@0 as c1], aggr=[SUM(test.c2)]\n" # noqa: E501 + ) assert expected == plan.display() # Check the number of partitions is as expected. - assert type(plan.partition_count) is int + assert isinstance(plan.partition_count, int) expected = ( "ProjectionExec: expr=[c1@0 as c1, SUM(test.c2)@1 as SUM(test.c2)]\n" @@ -477,9 +469,7 @@ def test_intersect(): [pa.array([3]), pa.array([6])], names=["a", "b"], ) - df_c = ctx.create_dataframe([[batch]]).sort( - column("a").sort(ascending=True) - ) + df_c = ctx.create_dataframe([[batch]]).sort(column("a").sort(ascending=True)) df_a_i_b = df_a.intersect(df_b).sort(column("a").sort(ascending=True)) @@ -505,9 +495,7 @@ def test_except_all(): [pa.array([1, 2]), pa.array([4, 5])], names=["a", "b"], ) - df_c = ctx.create_dataframe([[batch]]).sort( - column("a").sort(ascending=True) - ) + df_c = ctx.create_dataframe([[batch]]).sort(column("a").sort(ascending=True)) df_a_e_b = df_a.except_all(df_b).sort(column("a").sort(ascending=True)) @@ -542,9 +530,7 @@ def test_union(ctx): [pa.array([1, 2, 3, 3, 4, 5]), pa.array([4, 5, 6, 6, 7, 8])], names=["a", "b"], ) - df_c = ctx.create_dataframe([[batch]]).sort( - column("a").sort(ascending=True) - ) + df_c = ctx.create_dataframe([[batch]]).sort(column("a").sort(ascending=True)) df_a_u_b = df_a.union(df_b).sort(column("a").sort(ascending=True)) @@ -568,9 +554,7 @@ def test_union_distinct(ctx): [pa.array([1, 2, 3, 4, 5]), pa.array([4, 5, 6, 7, 8])], names=["a", "b"], ) - df_c = ctx.create_dataframe([[batch]]).sort( - column("a").sort(ascending=True) - ) + df_c = ctx.create_dataframe([[batch]]).sort(column("a").sort(ascending=True)) df_a_u_b = df_a.union(df_b, True).sort(column("a").sort(ascending=True)) @@ -650,7 +634,7 @@ def test_empty_to_arrow_table(df): def test_to_pylist(df): # Convert datafusion dataframe to Python list pylist = df.to_pylist() - assert type(pylist) == list + assert isinstance(pylist, list) assert pylist == [ {"a": 1, "b": 4, "c": 8}, {"a": 2, "b": 5, "c": 5}, @@ -661,7 +645,7 @@ def test_to_pylist(df): def test_to_pydict(df): # Convert datafusion dataframe to Python dictionary pydict = df.to_pydict() - assert type(pydict) == dict + assert isinstance(pydict, dict) assert pydict == {"a": [1, 2, 3], "b": [4, 5, 6], "c": [8, 5, 8]} @@ -702,9 +686,7 @@ def test_write_parquet(df, tmp_path): "compression, compression_level", [("gzip", 6), ("brotli", 7), ("zstd", 15)], ) -def test_write_compressed_parquet( - df, tmp_path, compression, compression_level -): +def test_write_compressed_parquet(df, tmp_path, compression, compression_level): path = tmp_path df.write_parquet( @@ -744,9 +726,7 @@ def test_write_compressed_parquet_wrong_compression_level( @pytest.mark.parametrize("compression", ["brotli", "zstd", "wrong"]) -def test_write_compressed_parquet_missing_compression_level( - df, tmp_path, compression -): +def test_write_compressed_parquet_missing_compression_level(df, tmp_path, compression): path = tmp_path with pytest.raises(ValueError): diff --git a/datafusion/tests/test_functions.py b/datafusion/tests/test_functions.py index eb37692b2..1a18fd823 100644 --- a/datafusion/tests/test_functions.py +++ b/datafusion/tests/test_functions.py @@ -74,9 +74,7 @@ def test_lit_arith(df): """ Test literals with arithmetic operations """ - df = df.select( - literal(1) + column("b"), f.concat(column("a"), literal("!")) - ) + df = df.select(literal(1) + column("b"), f.concat(column("a"), literal("!"))) result = df.collect() assert len(result) == 1 result = result[0] @@ -148,28 +146,16 @@ def test_math_functions(): np.testing.assert_array_almost_equal(result.column(4), np.arcsin(values)) np.testing.assert_array_almost_equal(result.column(5), np.arccos(values)) np.testing.assert_array_almost_equal(result.column(6), np.exp(values)) - np.testing.assert_array_almost_equal( - result.column(7), np.log(values + 1.0) - ) - np.testing.assert_array_almost_equal( - result.column(8), np.log2(values + 1.0) - ) - np.testing.assert_array_almost_equal( - result.column(9), np.log10(values + 1.0) - ) + np.testing.assert_array_almost_equal(result.column(7), np.log(values + 1.0)) + np.testing.assert_array_almost_equal(result.column(8), np.log2(values + 1.0)) + np.testing.assert_array_almost_equal(result.column(9), np.log10(values + 1.0)) np.testing.assert_array_less(result.column(10), np.ones_like(values)) np.testing.assert_array_almost_equal(result.column(11), np.arctan(values)) - np.testing.assert_array_almost_equal( - result.column(12), np.arctan2(values, 1.1) - ) + np.testing.assert_array_almost_equal(result.column(12), np.arctan2(values, 1.1)) np.testing.assert_array_almost_equal(result.column(13), np.ceil(values)) np.testing.assert_array_almost_equal(result.column(14), np.floor(values)) - np.testing.assert_array_almost_equal( - result.column(15), np.power(values, 3) - ) - np.testing.assert_array_almost_equal( - result.column(16), np.power(values, 4) - ) + np.testing.assert_array_almost_equal(result.column(15), np.power(values, 3)) + np.testing.assert_array_almost_equal(result.column(16), np.power(values, 4)) np.testing.assert_array_almost_equal(result.column(17), np.round(values)) np.testing.assert_array_almost_equal(result.column(18), np.sqrt(values)) np.testing.assert_array_almost_equal(result.column(19), np.sign(values)) @@ -190,9 +176,7 @@ def test_math_functions(): np.testing.assert_array_almost_equal(result.column(32), np.sinh(values)) np.testing.assert_array_almost_equal(result.column(33), np.tanh(values)) np.testing.assert_array_almost_equal(result.column(34), math.factorial(6)) - np.testing.assert_array_almost_equal( - result.column(35), np.isnan(na_values) - ) + np.testing.assert_array_almost_equal(result.column(35), np.isnan(na_values)) np.testing.assert_array_almost_equal(result.column(36), na_values == 0) np.testing.assert_array_almost_equal( result.column(37), np.emath.logn(3, values + 1.0) @@ -202,9 +186,7 @@ def test_math_functions(): def test_array_functions(): data = [[1.0, 2.0, 3.0, 3.0], [4.0, 5.0, 3.0], [6.0]] ctx = SessionContext() - batch = pa.RecordBatch.from_arrays( - [np.array(data, dtype=object)], names=["arr"] - ) + batch = pa.RecordBatch.from_arrays([np.array(data, dtype=object)], names=["arr"]) df = ctx.create_dataframe([[batch]]) def py_indexof(arr, v): @@ -310,15 +292,11 @@ def py_flatten(arr): lambda: [1.0 in r for r in data], ], [ - f.array_has_all( - col, f.make_array(*[literal(v) for v in [1.0, 3.0, 5.0]]) - ), + f.array_has_all(col, f.make_array(*[literal(v) for v in [1.0, 3.0, 5.0]])), lambda: [np.all([v in r for v in [1.0, 3.0, 5.0]]) for r in data], ], [ - f.array_has_any( - col, f.make_array(*[literal(v) for v in [1.0, 3.0, 5.0]]) - ), + f.array_has_any(col, f.make_array(*[literal(v) for v in [1.0, 3.0, 5.0]])), lambda: [np.any([v in r for v in [1.0, 3.0, 5.0]]) for r in data], ], [ @@ -339,15 +317,11 @@ def py_flatten(arr): ], [ f.array_positions(col, literal(1.0)), - lambda: [ - [i + 1 for i, _v in enumerate(r) if _v == 1.0] for r in data - ], + lambda: [[i + 1 for i, _v in enumerate(r) if _v == 1.0] for r in data], ], [ f.list_positions(col, literal(1.0)), - lambda: [ - [i + 1 for i, _v in enumerate(r) if _v == 1.0] for r in data - ], + lambda: [[i + 1 for i, _v in enumerate(r) if _v == 1.0] for r in data], ], [ f.array_ndims(col), @@ -588,18 +562,9 @@ def test_hash_functions(df): ) assert result.column(2) == pa.array( [ - b( - "185F8DB32271FE25F561A6FC938B2E26" - "4306EC304EDA518007D1764826381969" - ), - b( - "78AE647DC5544D227130A0682A51E30B" - "C7777FBB6D8A8F17007463A3ECD1D524" - ), - b( - "BB7208BC9B5D7C04F1236A82A0093A5E" - "33F40423D5BA8D4266F7092C3BA43B62" - ), + b("185F8DB32271FE25F561A6FC938B2E26" "4306EC304EDA518007D1764826381969"), + b("78AE647DC5544D227130A0682A51E30B" "C7777FBB6D8A8F17007463A3ECD1D524"), + b("BB7208BC9B5D7C04F1236A82A0093A5E" "33F40423D5BA8D4266F7092C3BA43B62"), ] ) assert result.column(3) == pa.array( @@ -645,34 +610,16 @@ def test_hash_functions(df): ) assert result.column(5) == pa.array( [ - b( - "F73A5FBF881F89B814871F46E26AD3FA" - "37CB2921C5E8561618639015B3CCBB71" - ), - b( - "B792A0383FB9E7A189EC150686579532" - "854E44B71AC394831DAED169BA85CCC5" - ), - b( - "27988A0E51812297C77A433F63523334" - "6AEE29A829DCF4F46E0F58F402C6CFCB" - ), + b("F73A5FBF881F89B814871F46E26AD3FA" "37CB2921C5E8561618639015B3CCBB71"), + b("B792A0383FB9E7A189EC150686579532" "854E44B71AC394831DAED169BA85CCC5"), + b("27988A0E51812297C77A433F63523334" "6AEE29A829DCF4F46E0F58F402C6CFCB"), ] ) assert result.column(6) == pa.array( [ - b( - "FBC2B0516EE8744D293B980779178A35" - "08850FDCFE965985782C39601B65794F" - ), - b( - "BF73D18575A736E4037D45F9E316085B" - "86C19BE6363DE6AA789E13DEAACC1C4E" - ), - b( - "C8D11B9F7237E4034ADBCD2005735F9B" - "C4C597C75AD89F4492BEC8F77D15F7EB" - ), + b("FBC2B0516EE8744D293B980779178A35" "08850FDCFE965985782C39601B65794F"), + b("BF73D18575A736E4037D45F9E316085B" "86C19BE6363DE6AA789E13DEAACC1C4E"), + b("C8D11B9F7237E4034ADBCD2005735F9B" "C4C597C75AD89F4492BEC8F77D15F7EB"), ] ) assert result.column(7) == result.column(1) # SHA-224 @@ -738,9 +685,7 @@ def test_temporal_functions(df): def test_case(df): df = df.select( - f.case(column("b")) - .when(literal(4), literal(10)) - .otherwise(literal(8)), + f.case(column("b")).when(literal(4), literal(10)).otherwise(literal(8)), f.case(column("a")) .when(literal("Hello"), literal("Hola")) .when(literal("World"), literal("Mundo")) diff --git a/datafusion/tests/test_input.py b/datafusion/tests/test_input.py index 5b1decf26..fb53d86e5 100644 --- a/datafusion/tests/test_input.py +++ b/datafusion/tests/test_input.py @@ -23,9 +23,7 @@ def test_location_input(): location_input = LocationInputPlugin() cwd = os.getcwd() - input_file = ( - cwd + "/testing/data/parquet/generated_simple_numerics/blogs.parquet" - ) + input_file = cwd + "/testing/data/parquet/generated_simple_numerics/blogs.parquet" table_name = "blog" tbl = location_input.build_table(input_file, table_name) assert "blog" == tbl.name diff --git a/datafusion/tests/test_sql.py b/datafusion/tests/test_sql.py index 0c6b26727..9d4c8f677 100644 --- a/datafusion/tests/test_sql.py +++ b/datafusion/tests/test_sql.py @@ -86,9 +86,7 @@ def test_register_csv(ctx, tmp_path): result = pa.Table.from_batches(result) assert result.schema == alternative_schema - with pytest.raises( - ValueError, match="Delimiter must be a single character" - ): + with pytest.raises(ValueError, match="Delimiter must be a single character"): ctx.register_csv("csv4", path, delimiter="wrong") with pytest.raises( @@ -134,9 +132,7 @@ def test_register_parquet_partitioned(ctx, tmp_path): ) assert ctx.tables() == {"datapp"} - result = ctx.sql( - "SELECT grp, COUNT(*) AS cnt FROM datapp GROUP BY grp" - ).collect() + result = ctx.sql("SELECT grp, COUNT(*) AS cnt FROM datapp GROUP BY grp").collect() result = pa.Table.from_batches(result) rd = result.to_pydict() @@ -240,9 +236,7 @@ def test_execute(ctx, tmp_path): assert ctx.tables() == {"t"} # count - result = ctx.sql( - "SELECT COUNT(a) AS cnt FROM t WHERE a IS NOT NULL" - ).collect() + result = ctx.sql("SELECT COUNT(a) AS cnt FROM t WHERE a IS NOT NULL").collect() expected = pa.array([7], pa.int64()) expected = [pa.RecordBatch.from_arrays([expected], ["cnt"])] @@ -280,9 +274,7 @@ def test_execute(ctx, tmp_path): ).collect() expected_a = pa.array([50.0219, 50.0152], pa.float64()) expected_cast = pa.array([50, 50], pa.int32()) - expected = [ - pa.RecordBatch.from_arrays([expected_a, expected_cast], ["a", "a_int"]) - ] + expected = [pa.RecordBatch.from_arrays([expected_a, expected_cast], ["a", "a_int"])] np.testing.assert_equal(expected[0].column(1), expected[0].column(1)) @@ -302,9 +294,7 @@ def test_cast(ctx, tmp_path): "float", ] - select = ", ".join( - [f"CAST(9 AS {t}) AS A{i}" for i, t in enumerate(valid_types)] - ) + select = ", ".join([f"CAST(9 AS {t}) AS A{i}" for i, t in enumerate(valid_types)]) # can execute, which implies that we can cast ctx.sql(f"SELECT {select} FROM t").collect() @@ -333,14 +323,10 @@ def test_udf( ctx, tmp_path, fn, input_types, output_type, input_values, expected_values ): # write to disk - path = helpers.write_parquet( - tmp_path / "a.parquet", pa.array(input_values) - ) + path = helpers.write_parquet(tmp_path / "a.parquet", pa.array(input_values)) ctx.register_parquet("t", path) - func = udf( - fn, input_types, output_type, name="func", volatility="immutable" - ) + func = udf(fn, input_types, output_type, name="func", volatility="immutable") ctx.register_udf(func) batches = ctx.sql("SELECT func(a) AS tt FROM t").collect() diff --git a/datafusion/tests/test_substrait.py b/datafusion/tests/test_substrait.py index 7c7a2c1f2..62f6413a3 100644 --- a/datafusion/tests/test_substrait.py +++ b/datafusion/tests/test_substrait.py @@ -38,22 +38,14 @@ def test_substrait_serialization(ctx): assert ctx.tables() == {"t"} # For now just make sure the method calls blow up - substrait_plan = ss.substrait.serde.serialize_to_plan( - "SELECT * FROM t", ctx - ) + substrait_plan = ss.substrait.serde.serialize_to_plan("SELECT * FROM t", ctx) substrait_bytes = substrait_plan.encode() - assert type(substrait_bytes) is bytes - substrait_bytes = ss.substrait.serde.serialize_bytes( - "SELECT * FROM t", ctx - ) + assert isinstance(substrait_bytes, bytes) + substrait_bytes = ss.substrait.serde.serialize_bytes("SELECT * FROM t", ctx) substrait_plan = ss.substrait.serde.deserialize_bytes(substrait_bytes) - logical_plan = ss.substrait.consumer.from_substrait_plan( - ctx, substrait_plan - ) + logical_plan = ss.substrait.consumer.from_substrait_plan(ctx, substrait_plan) # demonstrate how to create a DataFrame from a deserialized logical plan df = ctx.create_dataframe_from_logical_plan(logical_plan) - substrait_plan = ss.substrait.producer.to_substrait_plan( - df.logical_plan(), ctx - ) + substrait_plan = ss.substrait.producer.to_substrait_plan(df.logical_plan(), ctx) diff --git a/dev/release/check-rat-report.py b/dev/release/check-rat-report.py index 30a01116b..d3dd7c5dd 100644 --- a/dev/release/check-rat-report.py +++ b/dev/release/check-rat-report.py @@ -23,9 +23,7 @@ import xml.etree.ElementTree as ET if len(sys.argv) != 3: - sys.stderr.write( - "Usage: %s exclude_globs.lst rat_report.xml\n" % sys.argv[0] - ) + sys.stderr.write("Usage: %s exclude_globs.lst rat_report.xml\n" % sys.argv[0]) sys.exit(1) exclude_globs_filename = sys.argv[1] diff --git a/dev/release/generate-changelog.py b/dev/release/generate-changelog.py index e97f00304..01d640669 100755 --- a/dev/release/generate-changelog.py +++ b/dev/release/generate-changelog.py @@ -27,9 +27,7 @@ def print_pulls(repo_name, title, pulls): print("**{}:**".format(title)) print() for pull, commit in pulls: - url = "https://github.com/{}/pull/{}".format( - repo_name, pull.number - ) + url = "https://github.com/{}/pull/{}".format(repo_name, pull.number) print( "- {} [#{}]({}) ({})".format( pull.title, pull.number, url, commit.author.login @@ -40,9 +38,7 @@ def print_pulls(repo_name, title, pulls): def generate_changelog(repo, repo_name, tag1, tag2): # get a list of commits between two tags - print( - f"Fetching list of commits between {tag1} and {tag2}", file=sys.stderr - ) + print(f"Fetching list of commits between {tag1} and {tag2}", file=sys.stderr) comparison = repo.compare(tag1, tag2) # get the pull requests for these commits diff --git a/examples/sql-on-polars.py b/examples/sql-on-polars.py index dd7a9e021..ffcb12b70 100644 --- a/examples/sql-on-polars.py +++ b/examples/sql-on-polars.py @@ -20,7 +20,5 @@ ctx = SessionContext() ctx.register_table("taxi", "yellow_tripdata_2021-01.parquet") -df = ctx.sql( - "select passenger_count, count(*) from taxi group by passenger_count" -) +df = ctx.sql("select passenger_count, count(*) from taxi group by passenger_count") print(df) diff --git a/examples/sql-using-python-udaf.py b/examples/sql-using-python-udaf.py index 3326c4a1c..7ccf5d3cb 100644 --- a/examples/sql-using-python-udaf.py +++ b/examples/sql-using-python-udaf.py @@ -30,15 +30,11 @@ def __init__(self): def update(self, values: pa.Array) -> None: # not nice since pyarrow scalars can't be summed yet. This breaks on `None` - self._sum = pa.scalar( - self._sum.as_py() + pa.compute.sum(values).as_py() - ) + self._sum = pa.scalar(self._sum.as_py() + pa.compute.sum(values).as_py()) def merge(self, states: pa.Array) -> None: # not nice since pyarrow scalars can't be summed yet. This breaks on `None` - self._sum = pa.scalar( - self._sum.as_py() + pa.compute.sum(states).as_py() - ) + self._sum = pa.scalar(self._sum.as_py() + pa.compute.sum(states).as_py()) def state(self) -> pa.Array: return pa.array([self._sum.as_py()]) diff --git a/examples/substrait.py b/examples/substrait.py index c579751d2..23cd74649 100644 --- a/examples/substrait.py +++ b/examples/substrait.py @@ -23,9 +23,7 @@ ctx = SessionContext() # Register table with context -ctx.register_csv( - "aggregate_test_data", "./testing/data/csv/aggregate_test_100.csv" -) +ctx.register_csv("aggregate_test_data", "./testing/data/csv/aggregate_test_100.csv") substrait_plan = ss.substrait.serde.serialize_to_plan( "SELECT * FROM aggregate_test_data", ctx @@ -49,9 +47,7 @@ substrait_plan = ss.substrait.serde.deserialize_bytes(substrait_bytes) # type(df_logical_plan) -> -df_logical_plan = ss.substrait.consumer.from_substrait_plan( - ctx, substrait_plan -) +df_logical_plan = ss.substrait.consumer.from_substrait_plan(ctx, substrait_plan) # Back to Substrait Plan just for demonstration purposes # type(substrait_plan) -> diff --git a/requirements-310.txt b/requirements-310.txt index 517c31905..915f74d54 100644 --- a/requirements-310.txt +++ b/requirements-310.txt @@ -2,32 +2,16 @@ # This file is autogenerated by pip-compile with Python 3.10 # by the following command: # -# pip-compile --generate-hashes --resolver=backtracking requirements.in +# pip-compile --generate-hashes --output-file=requirements-310.txt # attrs==21.2.0 \ --hash=sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1 \ --hash=sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb # via pytest -black==21.9b0 \ - --hash=sha256:380f1b5da05e5a1429225676655dddb96f5ae8c75bdf91e53d798871b902a115 \ - --hash=sha256:7de4cfc7eb6b710de325712d40125689101d21d25283eed7e9998722cf10eb91 - # via -r requirements.in -click==8.0.3 \ - --hash=sha256:353f466495adaeb40b6b5f592f9f91cb22372351c84caeb068132442a4518ef3 \ - --hash=sha256:410e932b050f5eed773c4cda94de75971c89cdb3155a72a0831139a79e5ecb5b - # via black -flake8==4.0.1 \ - --hash=sha256:479b1304f72536a55948cb40a32dce8bb0ffe3501e26eaf292c7e60eb5e0428d \ - --hash=sha256:806e034dda44114815e23c16ef92f95c91e4c71100ff52813adf7132a6ad870d - # via -r requirements.in iniconfig==1.1.1 \ --hash=sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3 \ --hash=sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32 # via pytest -isort==5.9.3 \ - --hash=sha256:9c2ea1e62d871267b78307fe511c0838ba0da28698c5732d54e2790bf3ba9899 \ - --hash=sha256:e17d6e2b81095c9db0a03a8025a957f334d6ea30b26f9ec70805411e5c7c81f2 - # via -r requirements.in maturin==0.15.1 \ --hash=sha256:0e89a87549d671056f9358832c8aa1bb522abcf13eeca83327a58b091d4f5a98 \ --hash=sha256:229d7eb6e14455a3c69a10a4546f082c7bd5490b8ec7f50d5d10edcea600dc64 \ @@ -43,10 +27,6 @@ maturin==0.15.1 \ --hash=sha256:dabb8ff46461c6fb1d68e8972a172cf1dede3c9825a41e4a6caecc95c26ca3b4 \ --hash=sha256:f604b65fd9f0b94856e88cf8b345e21a27276297e6df4ad9305937887feda13b # via -r requirements.in -mccabe==0.6.1 \ - --hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \ - --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f - # via flake8 mypy==0.910 \ --hash=sha256:088cd9c7904b4ad80bec811053272986611b84221835e079be5bcad029e79dd9 \ --hash=sha256:0aadfb2d3935988ec3815952e44058a3100499f5be5b28c34ac9d79f002a4a9a \ @@ -75,9 +55,7 @@ mypy==0.910 \ mypy-extensions==0.4.3 \ --hash=sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d \ --hash=sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8 - # via - # black - # mypy + # via mypy numpy==1.21.3 \ --hash=sha256:043e83bfc274649c82a6f09836943e4a4aebe5e33656271c7dbf9621dd58b8ec \ --hash=sha256:160ccc1bed3a8371bf0d760971f09bfe80a3e18646620e9ded0ad159d9749baa \ @@ -119,14 +97,6 @@ packaging==21.0 \ --hash=sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7 \ --hash=sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14 # via pytest -pathspec==0.9.0 \ - --hash=sha256:7d15c4ddb0b5c802d161efc417ec1a2558ea2653c2e8ad9c19098201dc1c993a \ - --hash=sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1 - # via black -platformdirs==2.4.0 \ - --hash=sha256:367a5e80b3d04d2428ffa76d33f124cf11e8fff2acdaa9b43d545f5c7d661ef2 \ - --hash=sha256:8868bbe3c3c80d42f20156f22e7131d2fb321f5bc86a2a345375c6481a67021d - # via black pluggy==1.0.0 \ --hash=sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159 \ --hash=sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3 @@ -135,52 +105,44 @@ py==1.10.0 \ --hash=sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3 \ --hash=sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a # via pytest -pyarrow==6.0.0 \ - --hash=sha256:004185e0babc6f3c3fba6ba4f106e406a0113d0f82bb9ad9a8571a1978c45d04 \ - --hash=sha256:0204e80777ab8f4e9abd3a765a8ec07ed1e3c4630bacda50d2ce212ef0f3826f \ - --hash=sha256:072c1a0fca4509eefd7d018b78542fb7e5c63aaf5698f1c0a6e45628ae17ba44 \ - --hash=sha256:15dc0d673d3f865ca63c877bd7a2eced70b0a08969fb733a28247134b8a1f18b \ - --hash=sha256:1c38263ea438a1666b13372e7565450cfeec32dbcd1c2595749476a58465eaec \ - --hash=sha256:281ce5fa03621d786a9beb514abb09846db7f0221b50eabf543caa24037eaacd \ - --hash=sha256:2d2c681659396c745e4f1988d5dd41dcc3ad557bb8d4a8c2e44030edafc08a91 \ - --hash=sha256:376c4b5f248ae63df21fe15c194e9013753164be2d38f4b3fb8bde63ac5a1958 \ - --hash=sha256:465f87fa0be0b2928b2beeba22b5813a0203fb05d90fd8563eea48e08ecc030e \ - --hash=sha256:477c746ef42c039348a288584800e299456c80c5691401bb9b19aa9c02a427b7 \ - --hash=sha256:5144bd9db2920c7cb566c96462d62443cc239104f94771d110f74393f2fb42a2 \ - --hash=sha256:5408fa8d623e66a0445f3fb0e4027fd219bf99bfb57422d543d7b7876e2c5b55 \ - --hash=sha256:5be62679201c441356d3f2a739895dcc8d4d299f2a6eabcd2163bfb6a898abba \ - --hash=sha256:5c666bc6a1cebf01206e2dc1ab05f25f39f35d3a499e0ef5cd635225e07306ca \ - --hash=sha256:6163d82cca7541774b00503c295fe86a1722820eddb958b57f091bb6f5b0a6db \ - --hash=sha256:6a1d9a2f4ee812ed0bd4182cabef99ea914ac297274f0de086f2488093d284ef \ - --hash=sha256:7a683f71b848eb6310b4ec48c0def55dac839e9994c1ac874c9b2d3d5625def1 \ - --hash=sha256:82fe80309e01acf29e3943a1f6d3c98ec109fe1d356bc1ac37d639bcaadcf684 \ - --hash=sha256:8c23f8cdecd3d9e49f9b0f9a651ae5549d1d32fd4901fb1bdc2d327edfba844f \ - --hash=sha256:8d41dfb09ba9236cca6245f33088eb42f3c54023da281139241e0f9f3b4b754e \ - --hash=sha256:a19e58dfb04e451cd8b7bdec3ac8848373b95dfc53492c9a69789aa9074a3c1b \ - --hash=sha256:a50d2f77b86af38ceabf45617208b9105d20e7a5eebc584e7c8c0acededd82ce \ - --hash=sha256:a5bed4f948c032c40597302e9bdfa65f62295240306976ecbe43a54924c6f94f \ - --hash=sha256:ac941a147d14993987cc8b605b721735a34b3e54d167302501fb4db1ad7382c7 \ - --hash=sha256:b86d175262db1eb46afdceb36d459409eb6f8e532d3dec162f8bf572c7f57623 \ - --hash=sha256:bf3400780c4d3c9cb43b1e8a1aaf2e1b7199a0572d0a645529d2784e4d0d8497 \ - --hash=sha256:c7a6e7e0bf8779e9c3428ced85507541f3da9a0675e2f4781d4eb2c7042cbf81 \ - --hash=sha256:cc1d4a70efd583befe92d4ea6f74ed2e0aa31ccdde767cd5cae8e77c65a1c2d4 \ - --hash=sha256:d046dc78a9337baa6415be915c5a16222505233e238a1017f368243c89817eea \ - --hash=sha256:da7860688c33ca88ac05f1a487d32d96d9caa091412496c35f3d1d832145675a \ - --hash=sha256:ddf2e6e3b321adaaf716f2d5af8e92d205a9671e0cb7c0779710a567fd1dd580 \ - --hash=sha256:e81508239a71943759cee272ce625ae208092dd36ef2c6713fccee30bbcf52bb \ - --hash=sha256:ea64a48a85c631eb2a0ea13ccdec5143c85b5897836b16331ee4289d27a57247 \ - --hash=sha256:ed0be080cf595ea15ff1c9ff4097bbf1fcc4b50847d98c0a3c0412fbc6ede7e9 \ - --hash=sha256:fb701ec4a94b92102606d4e88f0b8eba34f09a5ad8e014eaa4af76f42b7f62ae \ - --hash=sha256:fbda7595f24a639bcef3419ecfac17216efacb09f7b0f1b4c4c97f900d65ca0e +pyarrow==15.0.0 \ + --hash=sha256:001fca027738c5f6be0b7a3159cc7ba16a5c52486db18160909a0831b063c4e4 \ + --hash=sha256:003d680b5e422d0204e7287bb3fa775b332b3fce2996aa69e9adea23f5c8f970 \ + --hash=sha256:036a7209c235588c2f07477fe75c07e6caced9b7b61bb897c8d4e52c4b5f9555 \ + --hash=sha256:07eb7f07dc9ecbb8dace0f58f009d3a29ee58682fcdc91337dfeb51ea618a75b \ + --hash=sha256:0a524532fd6dd482edaa563b686d754c70417c2f72742a8c990b322d4c03a15d \ + --hash=sha256:0ca9cb0039923bec49b4fe23803807e4ef39576a2bec59c32b11296464623dc2 \ + --hash=sha256:17d53a9d1b2b5bd7d5e4cd84d018e2a45bc9baaa68f7e6e3ebed45649900ba99 \ + --hash=sha256:19a8918045993349b207de72d4576af0191beef03ea655d8bdb13762f0cd6eac \ + --hash=sha256:1f500956a49aadd907eaa21d4fff75f73954605eaa41f61cb94fb008cf2e00c6 \ + --hash=sha256:2bd8a0e5296797faf9a3294e9fa2dc67aa7f10ae2207920dbebb785c77e9dbe5 \ + --hash=sha256:47af7036f64fce990bb8a5948c04722e4e3ea3e13b1007ef52dfe0aa8f23cf7f \ + --hash=sha256:5b8d43e31ca16aa6e12402fcb1e14352d0d809de70edd185c7650fe80e0769e3 \ + --hash=sha256:5db1769e5d0a77eb92344c7382d6543bea1164cca3704f84aa44e26c67e320fb \ + --hash=sha256:60a6bdb314affa9c2e0d5dddf3d9cbb9ef4a8dddaa68669975287d47ece67642 \ + --hash=sha256:66958fd1771a4d4b754cd385835e66a3ef6b12611e001d4e5edfcef5f30391e2 \ + --hash=sha256:6eda9e117f0402dfcd3cd6ec9bfee89ac5071c48fc83a84f3075b60efa96747f \ + --hash=sha256:6f87d9c4f09e049c2cade559643424da84c43a35068f2a1c4653dc5b1408a929 \ + --hash=sha256:85239b9f93278e130d86c0e6bb455dcb66fc3fd891398b9d45ace8799a871a1e \ + --hash=sha256:876858f549d540898f927eba4ef77cd549ad8d24baa3207cf1b72e5788b50e83 \ + --hash=sha256:8780b1a29d3c8b21ba6b191305a2a607de2e30dab399776ff0aa09131e266340 \ + --hash=sha256:93768ccfff85cf044c418bfeeafce9a8bb0cee091bd8fd19011aff91e58de540 \ + --hash=sha256:972a0141be402bb18e3201448c8ae62958c9c7923dfaa3b3d4530c835ac81aed \ + --hash=sha256:9950a9c9df24090d3d558b43b97753b8f5867fb8e521f29876aa021c52fda351 \ + --hash=sha256:9a3a6180c0e8f2727e6f1b1c87c72d3254cac909e609f35f22532e4115461177 \ + --hash=sha256:9ed5a78ed29d171d0acc26a305a4b7f83c122d54ff5270810ac23c75813585e4 \ + --hash=sha256:c8c287d1d479de8269398b34282e206844abb3208224dbdd7166d580804674b7 \ + --hash=sha256:d0ec076b32bacb6666e8813a22e6e5a7ef1314c8069d4ff345efa6246bc38593 \ + --hash=sha256:d1c48648f64aec09accf44140dccb92f4f94394b8d79976c426a5b79b11d4fa7 \ + --hash=sha256:d31c1d45060180131caf10f0f698e3a782db333a422038bf7fe01dace18b3a31 \ + --hash=sha256:e2617e3bf9df2a00020dd1c1c6dce5cc343d979efe10bc401c0632b0eef6ef5b \ + --hash=sha256:e8ebed6053dbe76883a822d4e8da36860f479d55a762bd9e70d8494aed87113e \ + --hash=sha256:f01fc5cf49081426429127aa2d427d9d98e1cb94a32cb961d583a70b7c4504e6 \ + --hash=sha256:f6ee87fd6892700960d90abb7b17a72a5abb3b64ee0fe8db6c782bcc2d0dc0b4 \ + --hash=sha256:f75fce89dad10c95f4bf590b765e3ae98bcc5ba9f6ce75adb828a334e26a3d40 \ + --hash=sha256:fa7cd198280dbd0c988df525e50e35b5d16873e2cdae2aaaa6363cdb64e3eec5 \ + --hash=sha256:fe0ec198ccc680f6c92723fadcb97b74f07c45ff3fdec9dd765deb04955ccf19 # via -r requirements.in -pycodestyle==2.8.0 \ - --hash=sha256:720f8b39dde8b293825e7ff02c475f3077124006db4f440dcbc9a20b76548a20 \ - --hash=sha256:eddd5847ef438ea1c7870ca7eb78a9d47ce0cdb4851a5523949f2601d0cbbe7f - # via flake8 -pyflakes==2.4.0 \ - --hash=sha256:05a85c2872edf37a4ed30b0cce2f6093e1d0581f8c19d7393122da7e25b2b24c \ - --hash=sha256:3bb3a3f256f4b7968c9c788781e4ff07dce46bdf12339dcda61053375426ee2e - # via flake8 pyparsing==3.0.3 \ --hash=sha256:9e3511118010f112a4b4b435ae50e1eaa610cda191acb9e421d60cf5fde83455 \ --hash=sha256:f8d3fe9fc404576c5164f0f0c4e382c96b85265e023c409c43d48f65da9d60d0 @@ -189,44 +151,25 @@ pytest==6.2.5 \ --hash=sha256:131b36680866a76e6781d13f101efb86cf674ebb9762eb70d3082b6f29889e89 \ --hash=sha256:7310f8d27bc79ced999e760ca304d69f6ba6c6649c0b60fb0e04a4a77cacc134 # via -r requirements.in -regex==2021.10.23 \ - --hash=sha256:0c186691a7995ef1db61205e00545bf161fb7b59cdb8c1201c89b333141c438a \ - --hash=sha256:0dcc0e71118be8c69252c207630faf13ca5e1b8583d57012aae191e7d6d28b84 \ - --hash=sha256:0f7552429dd39f70057ac5d0e897e5bfe211629652399a21671e53f2a9693a4e \ - --hash=sha256:129472cd06062fb13e7b4670a102951a3e655e9b91634432cfbdb7810af9d710 \ - --hash=sha256:13ec99df95003f56edcd307db44f06fbeb708c4ccdcf940478067dd62353181e \ - --hash=sha256:1f2b59c28afc53973d22e7bc18428721ee8ca6079becf1b36571c42627321c65 \ - --hash=sha256:2b20f544cbbeffe171911f6ce90388ad36fe3fad26b7c7a35d4762817e9ea69c \ - --hash=sha256:2fb698037c35109d3c2e30f2beb499e5ebae6e4bb8ff2e60c50b9a805a716f79 \ - --hash=sha256:34d870f9f27f2161709054d73646fc9aca49480617a65533fc2b4611c518e455 \ - --hash=sha256:391703a2abf8013d95bae39145d26b4e21531ab82e22f26cd3a181ee2644c234 \ - --hash=sha256:450dc27483548214314640c89a0f275dbc557968ed088da40bde7ef8fb52829e \ - --hash=sha256:45b65d6a275a478ac2cbd7fdbf7cc93c1982d613de4574b56fd6972ceadb8395 \ - --hash=sha256:5095a411c8479e715784a0c9236568ae72509450ee2226b649083730f3fadfc6 \ - --hash=sha256:530fc2bbb3dc1ebb17f70f7b234f90a1dd43b1b489ea38cea7be95fb21cdb5c7 \ - --hash=sha256:56f0c81c44638dfd0e2367df1a331b4ddf2e771366c4b9c5d9a473de75e3e1c7 \ - --hash=sha256:5e9c9e0ce92f27cef79e28e877c6b6988c48b16942258f3bc55d39b5f911df4f \ - --hash=sha256:6d7722136c6ed75caf84e1788df36397efdc5dbadab95e59c2bba82d4d808a4c \ - --hash=sha256:74d071dbe4b53c602edd87a7476ab23015a991374ddb228d941929ad7c8c922e \ - --hash=sha256:7b568809dca44cb75c8ebb260844ea98252c8c88396f9d203f5094e50a70355f \ - --hash=sha256:80bb5d2e92b2258188e7dcae5b188c7bf868eafdf800ea6edd0fbfc029984a88 \ - --hash=sha256:8d1cdcda6bd16268316d5db1038965acf948f2a6f43acc2e0b1641ceab443623 \ - --hash=sha256:9f665677e46c5a4d288ece12fdedf4f4204a422bb28ff05f0e6b08b7447796d1 \ - --hash=sha256:a30513828180264294953cecd942202dfda64e85195ae36c265daf4052af0464 \ - --hash=sha256:a7a986c45d1099a5de766a15de7bee3840b1e0e1a344430926af08e5297cf666 \ - --hash=sha256:a940ca7e7189d23da2bfbb38973832813eab6bd83f3bf89a977668c2f813deae \ - --hash=sha256:ab7c5684ff3538b67df3f93d66bd3369b749087871ae3786e70ef39e601345b0 \ - --hash=sha256:be04739a27be55631069b348dda0c81d8ea9822b5da10b8019b789e42d1fe452 \ - --hash=sha256:c0938ddd60cc04e8f1faf7a14a166ac939aac703745bfcd8e8f20322a7373019 \ - --hash=sha256:cb46b542133999580ffb691baf67410306833ee1e4f58ed06b6a7aaf4e046952 \ - --hash=sha256:d134757a37d8640f3c0abb41f5e68b7cf66c644f54ef1cb0573b7ea1c63e1509 \ - --hash=sha256:de557502c3bec8e634246588a94e82f1ee1b9dfcfdc453267c4fb652ff531570 \ - --hash=sha256:ded0c4a3eee56b57fcb2315e40812b173cafe79d2f992d50015f4387445737fa \ - --hash=sha256:e1dae12321b31059a1a72aaa0e6ba30156fe7e633355e445451e4021b8e122b6 \ - --hash=sha256:eb672217f7bd640411cfc69756ce721d00ae600814708d35c930930f18e8029f \ - --hash=sha256:ee684f139c91e69fe09b8e83d18b4d63bf87d9440c1eb2eeb52ee851883b1b29 \ - --hash=sha256:f3f9a91d3cc5e5b0ddf1043c0ae5fa4852f18a1c0050318baf5fc7930ecc1f9c - # via black +ruff==0.3.0 \ + --hash=sha256:0886184ba2618d815067cf43e005388967b67ab9c80df52b32ec1152ab49f53a \ + --hash=sha256:128265876c1d703e5f5e5a4543bd8be47c73a9ba223fd3989d4aa87dd06f312f \ + --hash=sha256:19eacceb4c9406f6c41af806418a26fdb23120dfe53583df76d1401c92b7c14b \ + --hash=sha256:23dbb808e2f1d68eeadd5f655485e235c102ac6f12ad31505804edced2a5ae77 \ + --hash=sha256:2f7dbba46e2827dfcb0f0cc55fba8e96ba7c8700e0a866eb8cef7d1d66c25dcb \ + --hash=sha256:3ef655c51f41d5fa879f98e40c90072b567c666a7114fa2d9fe004dffba00932 \ + --hash=sha256:5da894a29ec018a8293d3d17c797e73b374773943e8369cfc50495573d396933 \ + --hash=sha256:755c22536d7f1889be25f2baf6fedd019d0c51d079e8417d4441159f3bcd30c2 \ + --hash=sha256:7deb528029bacf845bdbb3dbb2927d8ef9b4356a5e731b10eef171e3f0a85944 \ + --hash=sha256:9343690f95710f8cf251bee1013bf43030072b9f8d012fbed6ad702ef70d360a \ + --hash=sha256:a1f3ed501a42f60f4dedb7805fa8d4534e78b4e196f536bac926f805f0743d49 \ + --hash=sha256:b08b356d06a792e49a12074b62222f9d4ea2a11dca9da9f68163b28c71bf1dd4 \ + --hash=sha256:cc30a9053ff2f1ffb505a585797c23434d5f6c838bacfe206c0e6cf38c921a1e \ + --hash=sha256:d0d3d7ef3d4f06433d592e5f7d813314a34601e6c5be8481cccb7fa760aa243e \ + --hash=sha256:dd73fe7f4c28d317855da6a7bc4aa29a1500320818dd8f27df95f70a01b8171f \ + --hash=sha256:e1e0d4381ca88fb2b73ea0766008e703f33f460295de658f5467f6f229658c19 \ + --hash=sha256:e3a4a6d46aef0a84b74fcd201a4401ea9a6cd85614f6a9435f2d33dd8cefbf83 + # via -r requirements.in toml==0.10.2 \ --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \ --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f @@ -237,13 +180,9 @@ toml==0.10.2 \ tomli==1.2.2 \ --hash=sha256:c6ce0015eb38820eaf32b5db832dbc26deb3dd427bd5f6556cf0acac2c214fee \ --hash=sha256:f04066f68f5554911363063a30b108d2b5a5b1a010aa8b6132af78489fe3aade - # via - # black - # maturin + # via maturin typing-extensions==3.10.0.2 \ --hash=sha256:49f75d16ff11f1cd258e1b988ccff82a3ca5570217d7ad8c5f48205dd99a677e \ --hash=sha256:d8226d10bc02a29bcc81df19a26e56a9647f8b0a6d4a83924139f4a8b01f17b7 \ --hash=sha256:f1d25edafde516b146ecd0613dabcc61409817af4766fbbcfb8d1ad4ec441a34 - # via - # black - # mypy + # via mypy diff --git a/requirements-311.txt b/requirements-311.txt index c5ae2ef79..876311e6c 100644 --- a/requirements-311.txt +++ b/requirements-311.txt @@ -4,46 +4,10 @@ # # pip-compile --generate-hashes --output-file=requirements-311.txt # -black==24.2.0 \ - --hash=sha256:057c3dc602eaa6fdc451069bd027a1b2635028b575a6c3acfd63193ced20d9c8 \ - --hash=sha256:08654d0797e65f2423f850fc8e16a0ce50925f9337fb4a4a176a7aa4026e63f8 \ - --hash=sha256:163baf4ef40e6897a2a9b83890e59141cc8c2a98f2dda5080dc15c00ee1e62cd \ - --hash=sha256:1e08fb9a15c914b81dd734ddd7fb10513016e5ce7e6704bdd5e1251ceee51ac9 \ - --hash=sha256:4dd76e9468d5536abd40ffbc7a247f83b2324f0c050556d9c371c2b9a9a95e31 \ - --hash=sha256:4f9de21bafcba9683853f6c96c2d515e364aee631b178eaa5145fc1c61a3cc92 \ - --hash=sha256:61a0391772490ddfb8a693c067df1ef5227257e72b0e4108482b8d41b5aee13f \ - --hash=sha256:6981eae48b3b33399c8757036c7f5d48a535b962a7c2310d19361edeef64ce29 \ - --hash=sha256:7e53a8c630f71db01b28cd9602a1ada68c937cbf2c333e6ed041390d6968faf4 \ - --hash=sha256:810d445ae6069ce64030c78ff6127cd9cd178a9ac3361435708b907d8a04c693 \ - --hash=sha256:93601c2deb321b4bad8f95df408e3fb3943d85012dddb6121336b8e24a0d1218 \ - --hash=sha256:992e451b04667116680cb88f63449267c13e1ad134f30087dec8527242e9862a \ - --hash=sha256:9db528bccb9e8e20c08e716b3b09c6bdd64da0dd129b11e160bf082d4642ac23 \ - --hash=sha256:a0057f800de6acc4407fe75bb147b0c2b5cbb7c3ed110d3e5999cd01184d53b0 \ - --hash=sha256:ba15742a13de85e9b8f3239c8f807723991fbfae24bad92d34a2b12e81904982 \ - --hash=sha256:bce4f25c27c3435e4dace4815bcb2008b87e167e3bf4ee47ccdc5ce906eb4894 \ - --hash=sha256:ca610d29415ee1a30a3f30fab7a8f4144e9d34c89a235d81292a1edb2b55f540 \ - --hash=sha256:d533d5e3259720fdbc1b37444491b024003e012c5173f7d06825a77508085430 \ - --hash=sha256:d84f29eb3ee44859052073b7636533ec995bd0f64e2fb43aeceefc70090e752b \ - --hash=sha256:e37c99f89929af50ffaf912454b3e3b47fd64109659026b678c091a4cd450fb2 \ - --hash=sha256:e8a6ae970537e67830776488bca52000eaa37fa63b9988e8c487458d9cd5ace6 \ - --hash=sha256:faf2ee02e6612577ba0181f4347bcbcf591eb122f7841ae5ba233d12c39dcb4d - # via -r requirements.in -click==8.1.7 \ - --hash=sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28 \ - --hash=sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de - # via black -flake8==7.0.0 \ - --hash=sha256:33f96621059e65eec474169085dc92bf26e7b2d47366b70be2f67ab80dc25132 \ - --hash=sha256:a6dfbb75e03252917f2473ea9653f7cd799c3064e54d4c8140044c5c065f53c3 - # via -r requirements.in iniconfig==2.0.0 \ --hash=sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3 \ --hash=sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374 # via pytest -isort==5.13.2 \ - --hash=sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109 \ - --hash=sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6 - # via -r requirements.in maturin==1.4.0 \ --hash=sha256:01473dc30aed8f2cee3572b3e99e3ea75bf09c84b028bf6077f7643a189699c8 \ --hash=sha256:076970a73da7fa3648204a584cd347b899c1ea67f8124b212bccd06728e63ed9 \ @@ -59,10 +23,6 @@ maturin==1.4.0 \ --hash=sha256:ff95a4494d9e57b6e74d4d7f8a9a2ee8ed29bd7f0e61855656ad959a432c0efc \ --hash=sha256:ffe4e967080ceb83c156e73a37d3974b30cad01c376a86dc39a76a0c6bccf9b0 # via -r requirements.in -mccabe==0.7.0 \ - --hash=sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325 \ - --hash=sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e - # via flake8 mypy==1.8.0 \ --hash=sha256:028cf9f2cae89e202d7b6593cd98db6759379f17a319b5faf4f9978d7084cdc6 \ --hash=sha256:2afecd6354bbfb6e0160f4e4ad9ba6e4e003b767dd80d85516e71f2e955ab50d \ @@ -95,9 +55,7 @@ mypy==1.8.0 \ mypy-extensions==1.0.0 \ --hash=sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d \ --hash=sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782 - # via - # black - # mypy + # via mypy numpy==1.26.4 \ --hash=sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b \ --hash=sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818 \ @@ -141,17 +99,7 @@ numpy==1.26.4 \ packaging==23.2 \ --hash=sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5 \ --hash=sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7 - # via - # black - # pytest -pathspec==0.12.1 \ - --hash=sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08 \ - --hash=sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712 - # via black -platformdirs==4.2.0 \ - --hash=sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068 \ - --hash=sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768 - # via black + # via pytest pluggy==1.4.0 \ --hash=sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981 \ --hash=sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be @@ -194,18 +142,29 @@ pyarrow==15.0.0 \ --hash=sha256:fa7cd198280dbd0c988df525e50e35b5d16873e2cdae2aaaa6363cdb64e3eec5 \ --hash=sha256:fe0ec198ccc680f6c92723fadcb97b74f07c45ff3fdec9dd765deb04955ccf19 # via -r requirements.in -pycodestyle==2.11.1 \ - --hash=sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f \ - --hash=sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67 - # via flake8 -pyflakes==3.2.0 \ - --hash=sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f \ - --hash=sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a - # via flake8 pytest==8.0.2 \ --hash=sha256:d4051d623a2e0b7e51960ba963193b09ce6daeb9759a451844a21e4ddedfc1bd \ --hash=sha256:edfaaef32ce5172d5466b5127b42e0d6d35ebbe4453f0e3505d96afd93f6b096 # via -r requirements.in +ruff==0.3.0 \ + --hash=sha256:0886184ba2618d815067cf43e005388967b67ab9c80df52b32ec1152ab49f53a \ + --hash=sha256:128265876c1d703e5f5e5a4543bd8be47c73a9ba223fd3989d4aa87dd06f312f \ + --hash=sha256:19eacceb4c9406f6c41af806418a26fdb23120dfe53583df76d1401c92b7c14b \ + --hash=sha256:23dbb808e2f1d68eeadd5f655485e235c102ac6f12ad31505804edced2a5ae77 \ + --hash=sha256:2f7dbba46e2827dfcb0f0cc55fba8e96ba7c8700e0a866eb8cef7d1d66c25dcb \ + --hash=sha256:3ef655c51f41d5fa879f98e40c90072b567c666a7114fa2d9fe004dffba00932 \ + --hash=sha256:5da894a29ec018a8293d3d17c797e73b374773943e8369cfc50495573d396933 \ + --hash=sha256:755c22536d7f1889be25f2baf6fedd019d0c51d079e8417d4441159f3bcd30c2 \ + --hash=sha256:7deb528029bacf845bdbb3dbb2927d8ef9b4356a5e731b10eef171e3f0a85944 \ + --hash=sha256:9343690f95710f8cf251bee1013bf43030072b9f8d012fbed6ad702ef70d360a \ + --hash=sha256:a1f3ed501a42f60f4dedb7805fa8d4534e78b4e196f536bac926f805f0743d49 \ + --hash=sha256:b08b356d06a792e49a12074b62222f9d4ea2a11dca9da9f68163b28c71bf1dd4 \ + --hash=sha256:cc30a9053ff2f1ffb505a585797c23434d5f6c838bacfe206c0e6cf38c921a1e \ + --hash=sha256:d0d3d7ef3d4f06433d592e5f7d813314a34601e6c5be8481cccb7fa760aa243e \ + --hash=sha256:dd73fe7f4c28d317855da6a7bc4aa29a1500320818dd8f27df95f70a01b8171f \ + --hash=sha256:e1e0d4381ca88fb2b73ea0766008e703f33f460295de658f5467f6f229658c19 \ + --hash=sha256:e3a4a6d46aef0a84b74fcd201a4401ea9a6cd85614f6a9435f2d33dd8cefbf83 + # via -r requirements.in toml==0.10.2 \ --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \ --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f diff --git a/requirements.in b/requirements.in index 09872b03b..2365d23e0 100644 --- a/requirements.in +++ b/requirements.in @@ -15,13 +15,11 @@ # specific language governing permissions and limitations # under the License. -black -flake8 -isort maturin>=0.15 mypy numpy pyarrow>=11.0.0 pytest +ruff toml importlib_metadata; python_version < "3.8"