Skip to content

Commit

Permalink
Use ruff format to replace black | chore(lint) (#1153)
Browse files Browse the repository at this point in the history
- Bumps [ruff](https://github.com/astral-sh/ruff) from 0.1.4 to 0.1.5.
- Replace `black` with `ruff format` because it runs much faster
(582.6ms vs 2.762s) and is a drop in replacement for black (with slight
variation in formatting results): https://docs.astral.sh/ruff/formatter/

---------

Signed-off-by: dependabot[bot] <[email protected]>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Justin Chu <[email protected]>
Co-authored-by: Justin Chu <[email protected]>
  • Loading branch information
3 people authored Nov 14, 2023
1 parent c08c00f commit 8907207
Show file tree
Hide file tree
Showing 12 changed files with 32 additions and 38 deletions.
4 changes: 2 additions & 2 deletions .lintrunner.toml
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ init_command = [
]

[[linter]]
code = 'BLACK-ISORT'
code = 'RUFF-FORMAT'
include_patterns = [
'**/*.py',
]
Expand All @@ -82,7 +82,7 @@ command = [
'-m',
'lintrunner_adapters',
'run',
'black_isort_linter',
'ruff_format_linter',
'--',
'@{{PATHSFILE}}'
]
Expand Down
4 changes: 1 addition & 3 deletions docs/examples/04_plot_eager_mode_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,7 @@


@script()
def linear(
A: FLOAT["N", "K"], W: FLOAT["K", "M"], Bias: FLOAT["M"]
) -> FLOAT["N", "M"]: # noqa: F821
def linear(A: FLOAT["N", "K"], W: FLOAT["K", "M"], Bias: FLOAT["M"]) -> FLOAT["N", "M"]: # noqa: F821
T1 = op.MatMul(A, W)
T2 = op.Add(T1, Bias)
Y = op.Relu(T2)
Expand Down
3 changes: 1 addition & 2 deletions onnxscript/_internal/param_manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,7 @@ def separate_input_attributes_from_arguments(
else:
onnx_attributes[param.name] = kwargs[param.name]
elif (
param.is_attribute
and param.default is not values._EmptyDefault # pylint: disable=protected-access
param.is_attribute and param.default is not values._EmptyDefault # pylint: disable=protected-access
):
# User did not provide the attribute
if fill_defaults:
Expand Down
3 changes: 1 addition & 2 deletions onnxscript/converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,8 @@
import onnx

import onnxscript
from onnxscript import irbuilder, onnx_types, sourceinfo
from onnxscript import irbuilder, onnx_types, sourceinfo, values
from onnxscript import type_annotation as ta
from onnxscript import values
from onnxscript._internal import analysis, ast_utils, autocast, param_manipulation

PY_VERSION_GE_39 = ast_utils.PY_VERSION_GE_39
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,8 @@
import os
import re
import textwrap
from pathlib import Path
from typing import Any, Dict, List, Sequence

import black
import isort
import torch
import torchgen.gen
import torchgen.model
Expand Down Expand Up @@ -319,15 +316,6 @@ def main(args: argparse.Namespace) -> None:
)
py_module.accept(cg.PythonWriter(f))

# Format the generated files so that they pass linting.
# line_length=95 is to match the lintrunner rules.
isort.file(output_path)
black.format_file_in_place(
Path(output_path),
fast=True,
mode=black.Mode(line_length=95),
write_back=black.WriteBack.YES,
)
print("Done.")


Expand Down
2 changes: 1 addition & 1 deletion onnxscript/function_libs/torch_lib/graph_building.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ def symbolic_value(self) -> torch.Value:
def _unwrap_tensor_to_torch_value(
value: Union[
ValidArgumentType, Mapping[str, ValidArgumentType], Sequence[ValidArgumentType]
]
],
) -> Union[
ValidTorchValueType,
Dict[str, ValidTorchValueType],
Expand Down
17 changes: 12 additions & 5 deletions onnxscript/function_libs/torch_lib/ops/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,9 @@ def aten__log_softmax_half(

@torch_op("aten::_log_softmax")
def aten__log_softmax(
self: TFloatHighPrecision, dim: int, half_to_float: bool # pylint: disable=unused-argument
self: TFloatHighPrecision,
dim: int,
half_to_float: bool, # pylint: disable=unused-argument
) -> TFloatHighPrecision:
"""_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor"""

Expand Down Expand Up @@ -1582,7 +1584,8 @@ def aten_clamp_min(self: TReal, min_: TReal) -> TReal:

@torch_op("aten::clone")
def aten_clone(
self: TTensor, memory_format: str = "" # pylint: disable=unused-argument
self: TTensor,
memory_format: str = "", # pylint: disable=unused-argument
) -> TTensor:
"""clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor"""

Expand Down Expand Up @@ -1717,7 +1720,8 @@ def aten_constant_pad_nd(self: TTensor, pad: INT64, value: float = 0.0) -> TTens

@torch_op("aten::contiguous")
def aten_contiguous(
self: TTensor, memory_format: str = "contiguous_format" # pylint: disable=unused-argument
self: TTensor,
memory_format: str = "contiguous_format", # pylint: disable=unused-argument
) -> TTensor:
"""contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a)"""

Expand Down Expand Up @@ -2043,7 +2047,9 @@ def aten_convolution_overrideable(

@torch_op("aten::copy")
def aten_copy(
self: TTensor, src: TTensor, non_blocking: bool = False # pylint: disable=unused-argument
self: TTensor,
src: TTensor,
non_blocking: bool = False, # pylint: disable=unused-argument
) -> TTensor:
"""copy(Tensor self, Tensor src, bool non_blocking=False) -> Tensor"""

Expand Down Expand Up @@ -3145,7 +3151,8 @@ def aten_empty_quantized(

@torch_op("aten::empty_strided")
def aten_empty_strided(
size: INT64, stride: INT64 # pylint: disable=unused-argument
size: INT64,
stride: INT64, # pylint: disable=unused-argument
) -> TTensor: # type: ignore[type-var]
# empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor

Expand Down
3 changes: 2 additions & 1 deletion onnxscript/tests/function_libs/torch_lib/ops_test_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -1998,7 +1998,8 @@ def _where_input_wrangler(
"ops.aten.tensor.bool", core_ops.aten_tensor_bool
), # Custom from extra_opinfo
TorchLibOpInfo(
"ops.aten.tensor.float", core_ops.aten_tensor_float # Custom from extra_opinfo
"ops.aten.tensor.float",
core_ops.aten_tensor_float, # Custom from extra_opinfo
),
TorchLibOpInfo(
"ops.aten.tensor.int", core_ops.aten_tensor_int
Expand Down
4 changes: 1 addition & 3 deletions onnxscript/type_annotation.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,9 +225,7 @@ def pytype_to_type_strings(pytype: TypeAnnotationValue) -> list[str]:
if isinstance(pytype, typing.TypeVar):
constraints = pytype.__constraints__
if constraints:
return pytype_to_type_strings(
Union.__getitem__(constraints)
) # pylint: disable=unnecessary-dunder-call
return pytype_to_type_strings(Union.__getitem__(constraints)) # pylint: disable=unnecessary-dunder-call
bound = pytype.__bound__
if bound is None:
return list(ALL_TENSOR_TYPE_STRINGS)
Expand Down
10 changes: 8 additions & 2 deletions onnxscript/values.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,14 @@
import types
import typing
from enum import IntFlag
from typing import _GenericAlias # type: ignore[attr-defined]
from typing import Any, ClassVar, Optional, Protocol, Sequence
from typing import ( # type: ignore[attr-defined]
Any,
ClassVar,
Optional,
Protocol,
Sequence,
_GenericAlias,
)

import onnx
import onnx.defs
Expand Down
3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,7 @@ select = [
"E", # pycodestyle
"F", # Pyflakes
"G", # flake8-logging-format
"I", # isort
"ISC", # flake8-implicit-str-concat
"N", # pep8-naming
"NPY", # modern numpy
Expand Down Expand Up @@ -143,7 +144,7 @@ ignore = [
"UP006", # keep-runtime-typing
"UP007", # keep-runtime-typing
]
line-length = 120
line-length = 95
ignore-init-module-imports = true

[tool.ruff.per-file-ignores]
Expand Down
5 changes: 1 addition & 4 deletions requirements/lintrunner/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,13 +1,10 @@
# This file is auto updated by dependabot
lintrunner-adapters>=0.8.0
# RUFF, RUFF-FIX
ruff==0.1.4
ruff==0.1.5
# MYPY
mypy==1.6.1
types-PyYAML==6.0.12.11
# BLACK-ISORT
black==23.10.1
isort==5.12.0
# PYLINT
pylint==2.17.6
# EDITORCONFIG-CHECKER
Expand Down

0 comments on commit 8907207

Please sign in to comment.