From 11dcafa3d863ca1edf74bdc63ba941f3126d31ab Mon Sep 17 00:00:00 2001 From: saisuraj27 Date: Thu, 21 Dec 2023 22:08:04 +0530 Subject: [PATCH] Replaced Flake8 with ruff and merged setup.cfg into pyproject.toml --- .pre-commit-config.yaml | 17 +++------ ivy/__init__.py | 2 +- .../array/experimental/linear_algebra.py | 8 ++-- ivy/data_classes/container/base.py | 8 ++-- .../factorized_tensor/cp_tensor.py | 6 +-- .../factorized_tensor/parafac2_tensor.py | 2 +- .../factorized_tensor/tt_tensor.py | 10 ++--- ivy/functional/backends/numpy/manipulation.py | 2 +- .../backends/tensorflow/manipulation.py | 2 +- .../frontends/jax/numpy/manipulations.py | 2 +- .../transpose_like_operations.py | 2 +- ivy/functional/ivy/elementwise.py | 4 +- ivy/functional/ivy/experimental/layers.py | 4 +- .../ivy/experimental/linear_algebra.py | 4 +- ivy/functional/ivy/losses.py | 4 +- ivy/utils/backend/handler.py | 4 +- .../test_frontends/test_paddle/test_linalg.py | 4 +- .../test_frontends/test_torch/test_linalg.py | 2 +- .../test_functional/test_core/test_device.py | 2 +- pyproject.toml | 38 +++++++++++++++++-- setup.cfg | 25 ------------ 21 files changed, 76 insertions(+), 76 deletions(-) delete mode 100644 setup.cfg diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2e231afb1f7a7..2e7f83aff2726 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -6,6 +6,12 @@ repos: - id: trailing-whitespace - id: check-toml - id: end-of-file-fixer + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.1.8 + hooks: + # Run the linter. + - id: ruff + args: [ --fix ] - repo: https://github.com/psf/black-pre-commit-mirror rev: 23.12.0 hooks: @@ -23,21 +29,10 @@ repos: rev: v2.2.1 hooks: - id: autoflake - - repo: https://github.com/pycqa/flake8 - rev: 6.1.0 - hooks: - - id: flake8 - exclude: ^.*__init__.py$ - repo: https://github.com/PyCQA/docformatter rev: v1.7.5 hooks: - id: docformatter - - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.8 - hooks: - # Run the linter. - - id: ruff - args: [ --fix ] - repo: https://github.com/unifyai/lint-hook rev: a72ffb17562d919311653d7f593cb537d1245c19 hooks: diff --git a/ivy/__init__.py b/ivy/__init__.py index 54a493d5f7e6e..b7b3ac0a4261a 100644 --- a/ivy/__init__.py +++ b/ivy/__init__.py @@ -800,7 +800,7 @@ class Node(str): pass # Added for the finally statement try: from .compiler.replace_with import replace_with, transform_function -except: +except: # noqa: E722 pass finally: # Skip framework imports done by Ivy compiler for now diff --git a/ivy/data_classes/array/experimental/linear_algebra.py b/ivy/data_classes/array/experimental/linear_algebra.py index 6156d43f33ee0..09ff0484e31ea 100644 --- a/ivy/data_classes/array/experimental/linear_algebra.py +++ b/ivy/data_classes/array/experimental/linear_algebra.py @@ -323,8 +323,8 @@ def multi_mode_dot( If True, the matrices or vectors in in the list are transposed. For complex tensors, the conjugate transpose is used. out - optional output array, for writing the result to. It must have a shape that the - result can broadcast to. + optional output array, for writing the result to. + It must have a shape that the result can broadcast to. Returns ------- @@ -334,8 +334,8 @@ def multi_mode_dot( Notes ----- If no modes are specified, just assumes there is one matrix or vector per mode and returns: - :math:`\\text{x }\\times_0 \\text{ matrix or vec list[0] }\\times_1 \\cdots \\times_n \\text{ matrix or vec list[n] }` # noqa - """ + :math:`\\text{x }\\times_0 \\text{ matrix or vec list[0] }\\times_1 \\cdots \\times_n \\text{ matrix or vec list[n] }` + """ # noqa: E501 return ivy.multi_mode_dot( self._data, mat_or_vec_list, modes, skip, transpose, out=out ) diff --git a/ivy/data_classes/container/base.py b/ivy/data_classes/container/base.py index a265766f69aa2..b782245f53dae 100644 --- a/ivy/data_classes/container/base.py +++ b/ivy/data_classes/container/base.py @@ -1155,7 +1155,7 @@ def cont_from_disk_as_hdf5( ), ) container_dict = {} - if type(h5_obj_or_filepath) is str: + if isinstance(h5_obj_or_filepath, str): h5_obj = h5py.File(h5_obj_or_filepath, "r") else: h5_obj = h5_obj_or_filepath @@ -1238,7 +1238,7 @@ def h5_file_size(h5_obj_or_filepath): "the size of hdf5 files." ), ) - if type(h5_obj_or_filepath) is str: + if isinstance(h5_obj_or_filepath, str): h5_obj = h5py.File(h5_obj_or_filepath, "r") else: h5_obj = h5_obj_or_filepath @@ -1280,7 +1280,7 @@ def shuffle_h5_file(h5_obj_or_filepath, seed_value=0): ) if seed_value is None: seed_value = random.randint(0, 1000) - if type(h5_obj_or_filepath) is str: + if isinstance(h5_obj_or_filepath, str): h5_obj = h5py.File(h5_obj_or_filepath, "a") else: h5_obj = h5_obj_or_filepath @@ -1997,7 +1997,7 @@ def cont_to_disk_as_hdf5( "containers to disk as hdf5 files." ), ) - if type(h5_obj_or_filepath) is str: + if isinstance(h5_obj_or_filepath, str): h5_obj = h5py.File(h5_obj_or_filepath, mode) else: h5_obj = h5_obj_or_filepath diff --git a/ivy/data_classes/factorized_tensor/cp_tensor.py b/ivy/data_classes/factorized_tensor/cp_tensor.py index 7798966181f95..e81c18f929f6e 100644 --- a/ivy/data_classes/factorized_tensor/cp_tensor.py +++ b/ivy/data_classes/factorized_tensor/cp_tensor.py @@ -408,11 +408,11 @@ def cp_lstsq_grad(cp_tensor, tensor, return_loss=False, mask=None): .. math:: - \nabla 0.5 ||\\mathcal{X} - [\\mathbf{w}; \\mathbf{A}, \\mathbf{B}, \\mathbf{C}]||^2 # noqa + \nabla 0.5 ||\\mathcal{X} - [\\mathbf{w}; \\mathbf{A}, \\mathbf{B}, \\mathbf{C}]||^2 where :math:`[\\mathbf{w}; \\mathbf{A}, \\mathbf{B}, \\mathbf{C}]` is the CP decomposition with weights - :math:`\\mathbf{w}` and factor matrices :math:`\\mathbf{A}`, :math:`\\mathbf{B}` and :math:`\\mathbf{C}`. # noqa + :math:`\\mathbf{w}` and factor matrices :math:`\\mathbf{A}`, :math:`\\mathbf{B}` and :math:`\\mathbf{C}`. Note that this does not return the gradient with respect to the weights even if CP is normalized. @@ -444,7 +444,7 @@ def cp_lstsq_grad(cp_tensor, tensor, return_loss=False, mask=None): loss : float Scalar quantity of the loss function corresponding to cp_gradient. Only returned if return_loss = True. - """ + """ # noqa: E501 ivy.CPTensor.validate_cp_tensor(cp_tensor) _, factors = cp_tensor diff --git a/ivy/data_classes/factorized_tensor/parafac2_tensor.py b/ivy/data_classes/factorized_tensor/parafac2_tensor.py index 391467f6fb15f..78d07a91ff6ce 100644 --- a/ivy/data_classes/factorized_tensor/parafac2_tensor.py +++ b/ivy/data_classes/factorized_tensor/parafac2_tensor.py @@ -419,7 +419,7 @@ def parafac2_to_slices(parafac2_tensor, validate=True): weights = None decomposition = weights, (A, B, C), projections - I, _ = A.shape + I, _ = A.shape # noqa: E741 return [ ivy.Parafac2Tensor.parafac2_to_slice(decomposition, i, validate=False) for i in range(I) diff --git a/ivy/data_classes/factorized_tensor/tt_tensor.py b/ivy/data_classes/factorized_tensor/tt_tensor.py index 783fa8e6d6f15..c226155527441 100644 --- a/ivy/data_classes/factorized_tensor/tt_tensor.py +++ b/ivy/data_classes/factorized_tensor/tt_tensor.py @@ -108,7 +108,7 @@ def validate_tt_tensor(tt_tensor): def tt_to_tensor(factors): """Return the full tensor whose TT decomposition is given by 'factors'. - Re-assembles 'factors', which represent a tensor in TT/Matrix-Product-State format # noqa: E501 + Re-assembles 'factors', which represent a tensor in TT/Matrix-Product-State format into the corresponding full tensor Parameters @@ -120,7 +120,7 @@ def tt_to_tensor(factors): ------- output_tensor tensor whose TT/MPS decomposition was given by 'factors' - """ + """ # noqa: E501 if isinstance(factors, (float, int)): return factors @@ -213,8 +213,8 @@ def validate_tt_rank( shape of the tensor to decompose rank way to determine the rank, by default 'same' - if 'same': rank is computed to keep the number of parameters (at most) the same # noqa: E501 - if float, computes a rank so as to keep rank percent of the original number of parameters # noqa: E501 + if 'same': rank is computed to keep the number of parameters (at most) the same + if float, computes a rank so as to keep rank percent of the original number of parameters if int or tuple, just returns rank constant_rank if True, the *same* rank will be chosen for each modes @@ -233,7 +233,7 @@ def validate_tt_rank( ------- rank rank of the decomposition - """ + """ # noqa: E501 if rounding == "ceil": rounding_fn = ivy.ceil elif rounding == "floor": diff --git a/ivy/functional/backends/numpy/manipulation.py b/ivy/functional/backends/numpy/manipulation.py index 0f59794f34ad9..9845a2d2061e5 100644 --- a/ivy/functional/backends/numpy/manipulation.py +++ b/ivy/functional/backends/numpy/manipulation.py @@ -70,7 +70,7 @@ def flip( return x if axis is None: axis = list(range(num_dims)) - if type(axis) is int: + if isinstance(axis, int): axis = [axis] axis = [item + num_dims if item < 0 else item for item in axis] return np.flip(x, axis) diff --git a/ivy/functional/backends/tensorflow/manipulation.py b/ivy/functional/backends/tensorflow/manipulation.py index 9ef86b5bc16bb..e2d6f8f13a2be 100644 --- a/ivy/functional/backends/tensorflow/manipulation.py +++ b/ivy/functional/backends/tensorflow/manipulation.py @@ -85,7 +85,7 @@ def flip( new_axis = list(range(num_dims)) else: new_axis = axis - if type(new_axis) is int: + if isinstance(new_axis, int): new_axis = [new_axis] else: new_axis = new_axis diff --git a/ivy/functional/frontends/jax/numpy/manipulations.py b/ivy/functional/frontends/jax/numpy/manipulations.py index f3f3c1721c546..61fe0fe6279ff 100644 --- a/ivy/functional/frontends/jax/numpy/manipulations.py +++ b/ivy/functional/frontends/jax/numpy/manipulations.py @@ -331,7 +331,7 @@ def transpose(a, axes=None): return a if not axes: axes = list(range(len(a.shape)))[::-1] - if type(axes) is int: + if isinstance(axes, int): axes = [axes] if (len(a.shape) == 0 and not axes) or (len(a.shape) == 1 and axes[0] == 0): return a diff --git a/ivy/functional/frontends/numpy/manipulation_routines/transpose_like_operations.py b/ivy/functional/frontends/numpy/manipulation_routines/transpose_like_operations.py index 696e7bb89c673..ca35eba5eeb30 100644 --- a/ivy/functional/frontends/numpy/manipulation_routines/transpose_like_operations.py +++ b/ivy/functional/frontends/numpy/manipulation_routines/transpose_like_operations.py @@ -32,7 +32,7 @@ def swapaxes(a, axis1, axis2): def transpose(array, /, *, axes=None): if not axes: axes = list(range(len(array.shape)))[::-1] - if type(axes) is int: + if isinstance(axes, int): axes = [axes] if (len(array.shape) == 0 and not axes) or (len(array.shape) == 1 and axes[0] == 0): return array diff --git a/ivy/functional/ivy/elementwise.py b/ivy/functional/ivy/elementwise.py index b4bbe720a143d..7997380dec22b 100644 --- a/ivy/functional/ivy/elementwise.py +++ b/ivy/functional/ivy/elementwise.py @@ -121,7 +121,7 @@ def abs( a: ivy.array([0., 2.6, 3.5]), b: ivy.array([4.5, 5.3, 0., 2.3]) } - """ + """ # noqa: E501 return ivy.current_backend(x).abs(x, out=out) @@ -6109,7 +6109,7 @@ def sqrt( b: ivy.array([[7., 1.], [0., 4.47]]) } - """ + """ # noqa: E501 return ivy.current_backend(x).sqrt(x, out=out) diff --git a/ivy/functional/ivy/experimental/layers.py b/ivy/functional/ivy/experimental/layers.py index f860e166c8d75..c356149a01f3e 100644 --- a/ivy/functional/ivy/experimental/layers.py +++ b/ivy/functional/ivy/experimental/layers.py @@ -92,7 +92,7 @@ def max_pool1d( [[16., 17., 18., 19.]]]) >>> x = ivy.arange(0, 24.).reshape((2, 3, 4)) - >>> print(ivy.max_pool1d(x, 2, 2, [(1,0)], data_format="NCW", dilation=2, ceil_mode=True)) # noqa + >>> print(ivy.max_pool1d(x, 2, 2, [(1,0)], data_format="NCW", dilation=2, ceil_mode=True)) ivy.array([[[ 1., 3.], [ 5., 7.], [ 9., 11.]], @@ -100,7 +100,7 @@ def max_pool1d( [[13., 15.], [17., 19.], [21., 23.]]]) - """ + """ # noqa: E501 return ivy.current_backend(x).max_pool1d( x, kernel, diff --git a/ivy/functional/ivy/experimental/linear_algebra.py b/ivy/functional/ivy/experimental/linear_algebra.py index 92e92992e53af..dd0685bead55b 100644 --- a/ivy/functional/ivy/experimental/linear_algebra.py +++ b/ivy/functional/ivy/experimental/linear_algebra.py @@ -922,8 +922,8 @@ def multi_mode_dot( Notes ----- If no modes are specified, just assumes there is one matrix or vector per mode and returns: - :math:`\\text{x }\\times_0 \\text{ matrix or vec list[0] }\\times_1 \\cdots \\times_n \\text{ matrix or vec list[n] }` # noqa - """ + :math:`\\text{x }\\times_0 \\text{ matrix or vec list[0] }\\times_1 \\cdots \\times_n \\text{ matrix or vec list[n] }` + """ # noqa: E501 if modes is None: modes = range(len(mat_or_vec_list)) diff --git a/ivy/functional/ivy/losses.py b/ivy/functional/ivy/losses.py index 56bce17a0e5cc..8310df572f06c 100644 --- a/ivy/functional/ivy/losses.py +++ b/ivy/functional/ivy/losses.py @@ -159,7 +159,7 @@ def binary_cross_entropy( >>> x = ivy.array([[0, 1, 1, 0]]) >>> y = ivy.array([[2.6, 6.2, 3.7, 5.3]]) >>> pos_weight = ivy.array([1, 2, 3, 4]) - >>> z = ivy.binary_cross_entropy(x, y, pos_weight=pos_weight, from_logits=True, reduction='sum', axis=1) # noqa: E501 + >>> z = ivy.binary_cross_entropy(x, y, pos_weight=pos_weight, from_logits=True, reduction='sum', axis=1) ivy.array([8.05393649]) >>> x = ivy.array([[0, 1, 1, 0]]) @@ -216,7 +216,7 @@ def binary_cross_entropy( >>> z = ivy.binary_cross_entropy(x, y) >>> print(z) ivy.array([0.223, 0.223, 0.223, 0.223]) - """ + """ # noqa: E501 ivy.utils.assertions.check_elem_in_list(reduction, ["none", "sum", "mean"]) if not (0.0 <= epsilon <= 1.0): diff --git a/ivy/utils/backend/handler.py b/ivy/utils/backend/handler.py index 702766db6cb79..282d5beb047dc 100644 --- a/ivy/utils/backend/handler.py +++ b/ivy/utils/backend/handler.py @@ -96,7 +96,7 @@ def _determine_backend_from_args(args): >>> x = jnp.array([1]) >>> print(_determine_backend_from_args(x)) # noqa - """ + """ # noqa: E501 arg_type = type(args) if isinstance(args, ivy.Array): args = args.data @@ -175,7 +175,7 @@ def current_backend(*args, **kwargs): >>> x = np.array([2.0]) >>> print(ivy.current_backend(x)) # noqa - """ + """ # noqa: E501 global implicit_backend # if a global backend has been set with # set_backend then this will be returned diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_linalg.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_linalg.py index 26547bb45a8f6..019bc40ada839 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_linalg.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_linalg.py @@ -569,7 +569,7 @@ def test_paddle_eig( ret = [ivy.to_numpy(x).astype("float64") for x in ret] frontend_ret = [np.asarray(x, dtype=np.float64) for x in frontend_ret] - l, v = ret + l, v = ret # noqa: E741 front_l, front_v = frontend_ret assert_all_close( @@ -619,7 +619,7 @@ def test_paddle_eigh( ret = [ivy.to_numpy(x).astype("float64") for x in ret] frontend_ret = [np.asarray(x, dtype=np.float64) for x in frontend_ret] - l, v = ret + l, v = ret # noqa: E741 front_l, front_v = frontend_ret assert_all_close( diff --git a/ivy_tests/test_ivy/test_frontends/test_torch/test_linalg.py b/ivy_tests/test_ivy/test_frontends/test_torch/test_linalg.py index 4923c2051c28b..3c8a75dbe9702 100644 --- a/ivy_tests/test_ivy/test_frontends/test_torch/test_linalg.py +++ b/ivy_tests/test_ivy/test_frontends/test_torch/test_linalg.py @@ -458,7 +458,7 @@ def test_torch_eig( ret = [ivy.to_numpy(x).astype("float64") for x in ret] frontend_ret = [np.asarray(x, dtype=np.float64) for x in frontend_ret] - l, v = ret + l, v = ret # noqa: E741 front_l, front_v = frontend_ret assert_all_close( diff --git a/ivy_tests/test_ivy/test_functional/test_core/test_device.py b/ivy_tests/test_ivy/test_functional/test_core/test_device.py index 8145e909d85ad..05300aa6cd446 100644 --- a/ivy_tests/test_ivy/test_functional/test_core/test_device.py +++ b/ivy_tests/test_ivy/test_functional/test_core/test_device.py @@ -372,7 +372,7 @@ def test_num_cpu_cores(backend_fw): # using multiprocessing module too because ivy uses psutil as basis. p_cpu_cores = psutil.cpu_count() m_cpu_cores = multiprocessing.cpu_count() - assert type(ivy_backend.num_cpu_cores()) == int + assert isinstance(ivy_backend.num_cpu_cores(), int) assert ivy_backend.num_cpu_cores() == p_cpu_cores assert ivy_backend.num_cpu_cores() == m_cpu_cores diff --git a/pyproject.toml b/pyproject.toml index 23f6dc2c85e4a..deb45218b37d4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,12 +6,41 @@ requires = [ ] build-backend = "setuptools.build_meta" + +[tool.docformatter] +wrap-summaries = 88 +pre-summary-newline = true + + +[tool.autoflake] +in-place = true +remove-all-unused-imports = true +ignore-init-module-imports = true +remove-duplicate-keys = true +remove-unused-variables = true +quiet = true +ignore-pass-after-docstring = true +exclude = ["__init__.py"] + + [tool.ruff] +line-length = 88 target-version = "py38" -select = ["D"] # Enabling Only pydocstyle checks as of now (https://docs.astral.sh/ruff/rules/#pydocstyle-d) +select = [ + # pyflakes + "F", + # pycodestyle + "E", "W", + # pydocstyle + "D" +] + ignore = [ + "E203", # Whitespace-before-punctuation. + "E402", # Module-import-not-at-top-of-file. + "E731", # Do not assign a lambda expression, use a def. "D100", # Missing docstring in public module. "D101", # Missing docstring in public class. "D102", # Missing docstring in public method. @@ -32,6 +61,7 @@ ignore = [ "D417", # Missing argument description in the docstring for argument "X". ] -exclude = [ -'ivy/functional/(frontends|backends)/(?!.*/func_wrapper\.py$).*(?!__init__\.py$)', -] +[tool.ruff.per-file-ignores] +'ivy/functional/(frontends|backends)/(?!.*/func_wrapper\.py$).*(?!__init__\.py$)' = ["D"] +"**/__init__.py" = ["F401","F403","F405","F811","F821", "E501"] +"ivy/functional/frontends/paddle/**" = ["F401", "F403", "F405"] diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 5f868b342a912..0000000000000 --- a/setup.cfg +++ /dev/null @@ -1,25 +0,0 @@ -[flake8] -max-line-length = 88 -ignore = E203, E402, E731, E704, W503, W504, W291, W293 - -per-file-ignores = - **/__init__.py: F401,F403,F405,F811,F821 - ivy/functional/frontends/paddle/**: F401,F403,F405 - -[autoflake] -in-place = true -remove-all-unused-imports = true -ignore-init-module-imports = true -remove-duplicate-keys = true -remove-unused-variables = true -quiet = true -ignore-pass-after-docstring = true -exclude = __init__.py - -[pydocstyle] -convention = numpy -add-ignore = D100,D101,D102,D103,D104,D105,D106,D107,D400,D205 - -[docformatter] -wrap-summaries = 88 -pre-summary-newline = true