Skip to content

Commit

Permalink
[pre-commit.ci] pre-commit suggestions (#2136)
Browse files Browse the repository at this point in the history
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Jirka <[email protected]>
  • Loading branch information
pre-commit-ci[bot] and Borda authored Oct 3, 2023
1 parent 64e2d19 commit 2387f2a
Show file tree
Hide file tree
Showing 24 changed files with 55 additions and 58 deletions.
2 changes: 1 addition & 1 deletion .github/assistant.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def set_min_torch_by_python(fpath: str = "requirements/base.txt") -> None:
return
with open(fpath) as fp:
reqs = parse_requirements(fp.readlines())
pkg_ver = [p for p in reqs if p.name == "torch"][0]
pkg_ver = next(p for p in reqs if p.name == "torch")
pt_ver = min([LooseVersion(v[1]) for v in pkg_ver.specs])
pt_ver = max(LooseVersion(LUT_PYTHON_TORCH[py_ver]), pt_ver)
with open(fpath) as fp:
Expand Down
16 changes: 8 additions & 8 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -38,22 +38,22 @@ repos:
- id: detect-private-key

- repo: https://github.com/asottile/pyupgrade
rev: v3.9.0
rev: v3.14.0
hooks:
- id: pyupgrade
args: [--py38-plus]
args: ["--py38-plus"]
name: Upgrade code

- repo: https://github.com/codespell-project/codespell
rev: v2.2.5
rev: v2.2.6
hooks:
- id: codespell
additional_dependencies: [tomli]
#args: ["--write-changes"]
args: ["--write-changes"]
exclude: pyproject.toml

- repo: https://github.com/crate-ci/typos
rev: v1.16.12
rev: v1.16.17
hooks:
- id: typos
# empty to do not write fixes
Expand All @@ -68,13 +68,13 @@ repos:
args: ["--in-place"]

- repo: https://github.com/psf/black
rev: 23.7.0
rev: 23.9.1
hooks:
- id: black
name: Format code

- repo: https://github.com/executablebooks/mdformat
rev: 0.7.16
rev: 0.7.17
hooks:
- id: mdformat
additional_dependencies:
Expand Down Expand Up @@ -130,7 +130,7 @@ repos:
- id: text-unicode-replacement-char

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.0.277
rev: v0.0.292
hooks:
- id: ruff
args: ["--fix"]
5 changes: 3 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,8 @@ addopts = [
#filterwarnings = ["error::FutureWarning"] # ToDo
xfail_strict = true
junit_duration_report = "call"

[tool.coverage.report]
exclude_lines = ["pragma: no cover", "pass"]

[tool.coverage.run]
parallel = true
concurrency = "thread"
Expand Down Expand Up @@ -81,6 +79,7 @@ wil = "wil"


[tool.ruff]
target-version = "py38"
line-length = 120
# Enable Pyflakes `E` and `F` codes by default.
select = [
Expand Down Expand Up @@ -122,6 +121,8 @@ ignore = [
"S301", # todo: `pickle` and modules that wrap it can be unsafe when used to deserialize untrusted data, possible security issue # todo
"S310", # todo: Audit URL open for permitted schemes. Allowing use of `file:` or custom schemes is often unexpected. # todo
"B905", # todo: `zip()` without an explicit `strict=` parameter
"PYI024", # todo: Use `typing.NamedTuple` instead of `collections.namedtuple`
"PYI041", # todo: Use `float` instead of `int | float``
]
# Exclude a variety of commonly ignored directories.
exclude = [
Expand Down
12 changes: 6 additions & 6 deletions src/torchmetrics/audio/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,16 +41,16 @@
]

if _PESQ_AVAILABLE:
from torchmetrics.audio.pesq import PerceptualEvaluationSpeechQuality # noqa: F401
from torchmetrics.audio.pesq import PerceptualEvaluationSpeechQuality

__all__.append("PerceptualEvaluationSpeechQuality")
__all__ += ["PerceptualEvaluationSpeechQuality"]

if _PYSTOI_AVAILABLE:
from torchmetrics.audio.stoi import ShortTimeObjectiveIntelligibility # noqa: F401
from torchmetrics.audio.stoi import ShortTimeObjectiveIntelligibility

__all__.append("ShortTimeObjectiveIntelligibility")
__all__ += ["ShortTimeObjectiveIntelligibility"]

if _GAMMATONE_AVAILABLE and _TORCHAUDIO_AVAILABLE and _TORCHAUDIO_GREATER_EQUAL_0_10:
from torchmetrics.audio.srmr import SpeechReverberationModulationEnergyRatio # noqa: F401
from torchmetrics.audio.srmr import SpeechReverberationModulationEnergyRatio

__all__.append("SpeechReverberationModulationEnergyRatio")
__all__ += ["SpeechReverberationModulationEnergyRatio"]
2 changes: 1 addition & 1 deletion src/torchmetrics/audio/stoi.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ class ShortTimeObjectiveIntelligibility(Metric):
The STOI-measure is intrusive, i.e., a function of the clean and degraded speech signals. STOI may be a good
alternative to the speech intelligibility index (SII) or the speech transmission index (STI), when you are
interested in the effect of nonlinear processing to noisy speech, e.g., noise reduction, binary masking algorithms,
on speech intelligibility. Description taken from `Cees Taal's website`_ and for further defails see `STOI ref1`_
on speech intelligibility. Description taken from `Cees Taal's website`_ and for further details see `STOI ref1`_
and `STOI ref2`_.
This metric is a wrapper for the `pystoi package`_. As the implementation backend implementation only supports
Expand Down
4 changes: 2 additions & 2 deletions src/torchmetrics/detection/mean_ap.py
Original file line number Diff line number Diff line change
Expand Up @@ -879,7 +879,7 @@ def _get_coco_format(
f"Invalid input box of sample {image_id}, element {k} (expected 4 values, got {len(image_box)})"
)

if type(image_label) != int:
if not isinstance(image_label, int):
raise ValueError(
f"Invalid input class of sample {image_id}, element {k}"
f" (expected value of type integer, got type {type(image_label)})"
Expand Down Expand Up @@ -915,7 +915,7 @@ def _get_coco_format(

if scores is not None:
score = scores[image_id][k].cpu().tolist()
if type(score) != float:
if not isinstance(score, float):
raise ValueError(
f"Invalid input score of sample {image_id}, element {k}"
f" (expected value of type float, got type {type(score)})"
Expand Down
12 changes: 6 additions & 6 deletions src/torchmetrics/functional/audio/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,16 +42,16 @@
]

if _PESQ_AVAILABLE:
from torchmetrics.functional.audio.pesq import perceptual_evaluation_speech_quality # noqa: F401
from torchmetrics.functional.audio.pesq import perceptual_evaluation_speech_quality

__all__.append("perceptual_evaluation_speech_quality")
__all__ += ["perceptual_evaluation_speech_quality"]

if _PYSTOI_AVAILABLE:
from torchmetrics.functional.audio.stoi import short_time_objective_intelligibility # noqa: F401
from torchmetrics.functional.audio.stoi import short_time_objective_intelligibility

__all__.append("short_time_objective_intelligibility")
__all__ += ["short_time_objective_intelligibility"]

if _GAMMATONE_AVAILABLE and _TORCHAUDIO_AVAILABLE and _TORCHAUDIO_GREATER_EQUAL_0_10:
from torchmetrics.functional.audio.srmr import speech_reverberation_modulation_energy_ratio # noqa: F401
from torchmetrics.functional.audio.srmr import speech_reverberation_modulation_energy_ratio

__all__.append("speech_reverberation_modulation_energy_ratio")
__all__ += ["speech_reverberation_modulation_energy_ratio"]
4 changes: 2 additions & 2 deletions src/torchmetrics/functional/audio/sdr.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def _symmetric_toeplitz(vector: Tensor) -> Tensor:
def _compute_autocorr_crosscorr(target: Tensor, preds: Tensor, corr_len: int) -> Tuple[Tensor, Tensor]:
r"""Compute the auto correlation of `target` and the cross correlation of `target` and `preds`.
This calculation is done using the fast Fourier transform (FFT). Let's denotes the symmetric Toeplitz matric of the
This calculation is done using the fast Fourier transform (FFT). Let's denotes the symmetric Toeplitz metric of the
auto correlation of `target` as `R`, the cross correlation as 'b', then solving the equation `Rh=b` could have `h`
as the coordinate of `preds` in the column space of the `corr_len` shifts of `target`.
Expand All @@ -81,7 +81,7 @@ def _compute_autocorr_crosscorr(target: Tensor, preds: Tensor, corr_len: int) ->
n_fft = 2 ** math.ceil(math.log2(preds.shape[-1] + target.shape[-1] - 1))

# computes the auto correlation of `target`
# r_0 is the first row of the symmetric Toeplitz matric
# r_0 is the first row of the symmetric Toeplitz metric
t_fft = torch.fft.rfft(target, n=n_fft, dim=-1)
r_0 = torch.fft.irfft(t_fft.real**2 + t_fft.imag**2, n=n_fft)[..., :corr_len]

Expand Down
2 changes: 1 addition & 1 deletion src/torchmetrics/functional/audio/stoi.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def short_time_objective_intelligibility(
STOI-measure is intrusive, i.e., a function of the clean and degraded speech signals. STOI may be a good alternative
to the speech intelligibility index (SII) or the speech transmission index (STI), when you are interested in
the effect of nonlinear processing to noisy speech, e.g., noise reduction, binary masking algorithms, on speech
intelligibility. Description taken from `Cees Taal's website`_ and for further defails see `STOI ref1`_ and
intelligibility. Description taken from `Cees Taal's website`_ and for further details see `STOI ref1`_ and
`STOI ref2`_.
This metric is a wrapper for the `pystoi package`_. As the implementation backend implementation only supports
Expand Down
14 changes: 6 additions & 8 deletions src/torchmetrics/functional/detection/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,15 +22,13 @@
__all__ = ["modified_panoptic_quality", "panoptic_quality"]

if _TORCHVISION_AVAILABLE and _TORCHVISION_GREATER_EQUAL_0_8:
from torchmetrics.functional.detection.giou import generalized_intersection_over_union # noqa: F401
from torchmetrics.functional.detection.iou import intersection_over_union # noqa: F401
from torchmetrics.functional.detection.giou import generalized_intersection_over_union
from torchmetrics.functional.detection.iou import intersection_over_union

__all__.append("generalized_intersection_over_union")
__all__.append("intersection_over_union")
__all__ += ["generalized_intersection_over_union", "intersection_over_union"]

if _TORCHVISION_AVAILABLE and _TORCHVISION_GREATER_EQUAL_0_13:
from torchmetrics.functional.detection.ciou import complete_intersection_over_union # noqa: F401
from torchmetrics.functional.detection.diou import distance_intersection_over_union # noqa: F401
from torchmetrics.functional.detection.ciou import complete_intersection_over_union
from torchmetrics.functional.detection.diou import distance_intersection_over_union

__all__.append("complete_intersection_over_union")
__all__.append("distance_intersection_over_union")
__all__ += ["complete_intersection_over_union", "distance_intersection_over_union"]
2 changes: 1 addition & 1 deletion src/torchmetrics/functional/image/ssim.py
Original file line number Diff line number Diff line change
Expand Up @@ -479,7 +479,7 @@ def multiscale_structural_similarity_index_measure(
the range is calculated as the difference and input is clamped between the values.
k1: Parameter of structural similarity index measure.
k2: Parameter of structural similarity index measure.
betas: Exponent parameters for individual similarities and contrastive sensitivies returned by different image
betas: Exponent parameters for individual similarities and contrastive sensitivities returned by different image
resolutions.
normalize: When MultiScaleSSIM loss is used for training, it is desirable to use normalizes to improve the
training stability. This `normalize` argument is out of scope of the original implementation [1], and it is
Expand Down
7 changes: 3 additions & 4 deletions src/torchmetrics/functional/text/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,7 @@


if _TRANSFORMERS_GREATER_EQUAL_4_4:
from torchmetrics.functional.text.bert import bert_score # noqa: F401
from torchmetrics.functional.text.infolm import infolm # noqa: F401
from torchmetrics.functional.text.bert import bert_score
from torchmetrics.functional.text.infolm import infolm

__all__.append("bert_score")
__all__.append("infolm")
__all__ += ["bert_score", "infolm"]
2 changes: 1 addition & 1 deletion src/torchmetrics/functional/text/chrf.py
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ def _calculate_fscore(
beta: A parameter determining an importance of recall w.r.t. precision. If `beta=1`, their importance is equal.
Return:
A chrF/chrF++ score. This function is universal both for sentence-level and corpus-level calucation.
A chrF/chrF++ score. This function is universal both for sentence-level and corpus-level calculation.
"""

Expand Down
4 changes: 2 additions & 2 deletions src/torchmetrics/functional/text/eed.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ def _eed_function(
next_row = [inf] * (len(hyp) + 1)

for w in range(1, len(ref) + 1):
for i in range(0, len(hyp) + 1):
for i in range(len(hyp) + 1):
if i > 0:
next_row[i] = min(
next_row[i - 1] + deletion,
Expand Down Expand Up @@ -252,7 +252,7 @@ def _eed_compute(sentence_level_scores: List[Tensor]) -> Tensor:
def _preprocess_sentences(
preds: Union[str, Sequence[str]],
target: Sequence[Union[str, Sequence[str]]],
language: Union[Literal["en"], Literal["ja"]],
language: Literal["en", "ja"],
) -> Tuple[Union[str, Sequence[str]], Sequence[Union[str, Sequence[str]]]]:
"""Preprocess strings according to language requirements.
Expand Down
2 changes: 1 addition & 1 deletion src/torchmetrics/functional/text/helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ def _add_cache(self, prediction_tokens: List[str], edit_distance: List[List[Tupl
node = value[0] # type: ignore

def _find_cache(self, prediction_tokens: List[str]) -> Tuple[int, List[List[Tuple[int, _EditOperations]]]]:
"""Find the already calculated rows of the Levenshtein edit distance matric.
"""Find the already calculated rows of the Levenshtein edit distance metric.
Args:
prediction_tokens: A tokenized predicted sentence.
Expand Down
2 changes: 1 addition & 1 deletion src/torchmetrics/functional/text/rouge.py
Original file line number Diff line number Diff line change
Expand Up @@ -490,7 +490,7 @@ def rouge_score(
if not isinstance(rouge_keys, tuple):
rouge_keys = (rouge_keys,)
for key in rouge_keys:
if key not in ALLOWED_ROUGE_KEYS.keys():
if key not in ALLOWED_ROUGE_KEYS:
raise ValueError(f"Got unknown rouge key {key}. Expected to be one of {list(ALLOWED_ROUGE_KEYS.keys())}")
rouge_keys_values = [ALLOWED_ROUGE_KEYS[key] for key in rouge_keys]

Expand Down
2 changes: 1 addition & 1 deletion src/torchmetrics/functional/text/sacre_bleu.py
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,7 @@ def sacre_bleu_score(
if tokenize not in AVAILABLE_TOKENIZERS:
raise ValueError(f"Argument `tokenize` expected to be one of {AVAILABLE_TOKENIZERS} but got {tokenize}.")

if tokenize not in _SacreBLEUTokenizer._TOKENIZE_FN.keys():
if tokenize not in _SacreBLEUTokenizer._TOKENIZE_FN:
raise ValueError(
f"Unsupported tokenizer selected. Please, choose one of {list(_SacreBLEUTokenizer._TOKENIZE_FN.keys())}"
)
Expand Down
2 changes: 1 addition & 1 deletion src/torchmetrics/functional/text/squad.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ def _squad_input_check(
)

answers: Dict[str, Union[List[str], List[int]]] = target["answers"] # type: ignore[assignment]
if "text" not in answers.keys():
if "text" not in answers:
raise KeyError(
"Expected keys in a 'answers' are 'text'."
"Please make sure that 'answer' maps to a `SQuAD` format dictionary.\n"
Expand Down
2 changes: 1 addition & 1 deletion src/torchmetrics/image/ssim.py
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,7 @@ class MultiScaleStructuralSimilarityIndexMeasure(Metric):
The ``data_range`` must be given when ``dim`` is not None.
k1: Parameter of structural similarity index measure.
k2: Parameter of structural similarity index measure.
betas: Exponent parameters for individual similarities and contrastive sensitivies returned by different image
betas: Exponent parameters for individual similarities and contrastive sensitivities returned by different image
resolutions.
normalize: When MultiScaleStructuralSimilarityIndexMeasure loss is used for training, it is desirable to use
normalizes to improve the training stability. This `normalize` argument is out of scope of the original
Expand Down
2 changes: 1 addition & 1 deletion src/torchmetrics/retrieval/fall_out.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ class RetrievalFallOut(RetrievalMetric):
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``fo@k`` (:class:`~torch.Tensor`): A tensor with the computed metric
- ``fallout@k`` (:class:`~torch.Tensor`): A tensor with the computed metric
All ``indexes``, ``preds`` and ``target`` must have the same dimension and will be flatten at the beginning,
so that for example, a tensor of shape ``(N, M)`` is treated as ``(N * M, )``. Predictions will be first grouped by
Expand Down
7 changes: 3 additions & 4 deletions src/torchmetrics/text/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,7 @@
]

if _TRANSFORMERS_GREATER_EQUAL_4_4:
from torchmetrics.text.bert import BERTScore # noqa: F401
from torchmetrics.text.infolm import InfoLM # noqa: F401
from torchmetrics.text.bert import BERTScore
from torchmetrics.text.infolm import InfoLM

__all__.append("BERTScore")
__all__.append("InfoLM")
__all__ += ["BERTScore", "InfoLM"]
2 changes: 1 addition & 1 deletion src/torchmetrics/wrappers/tracker.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ def compute_all(self) -> Any:
"""Compute the metric value for all tracked metrics.
Return:
By default will try stacking the results from all increaments into a single tensor if the tracked base
By default will try stacking the results from all increments into a single tensor if the tracked base
object is a single metric. If a metric collection is provided a dict of stacked tensors will be returned.
If the stacking process fails a list of the computed results will be returned.
Expand Down
2 changes: 1 addition & 1 deletion tests/unittests/bases/test_composition.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def test_metrics_add(second_operand, expected_result):

@pytest.mark.parametrize(
("second_operand", "expected_result"),
[(DummyMetric(3), tensor(2)), (3, tensor(2)), (3, tensor(2)), (tensor(3), tensor(2))],
[(DummyMetric(3), tensor(2)), (3, tensor(2)), (tensor(3), tensor(2))],
)
def test_metrics_and(second_operand, expected_result):
"""Test that `and` operator works and returns a compositional metric."""
Expand Down
2 changes: 1 addition & 1 deletion tests/unittests/text/test_rouge.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ def _compute_rouge_score(
aggregator_avg = BootstrapAggregator()

if accumulate == "best":
key_curr = list(list_results[0].keys())[0]
key_curr = next(iter(list_results[0].keys()))
all_fmeasure = torch.tensor([v[key_curr].fmeasure for v in list_results])
highest_idx = torch.argmax(all_fmeasure).item()
aggregator.add_scores(list_results[highest_idx])
Expand Down

0 comments on commit 2387f2a

Please sign in to comment.