From 7cd8bb931e451893201fbbbe4662d4d6ce9f4bb3 Mon Sep 17 00:00:00 2001 From: Jirka Borovec <6035284+Borda@users.noreply.github.com> Date: Tue, 26 Sep 2023 17:29:55 +0200 Subject: [PATCH] check typos & fixing some... (#2102) (cherry picked from commit 1ddacab0da6628adb15278e1b31bdd4ab721ae23) --- .github/workflows/ci-checks.yml | 1 + .github/workflows/docs-build.yml | 1 + .pre-commit-config.yaml | 9 +++++++++ pyproject.toml | 19 +++++++++++++++++++ src/torchmetrics/classification/accuracy.py | 2 +- .../functional/regression/tweedie_deviance.py | 2 +- src/torchmetrics/functional/text/chrf.py | 8 ++++---- src/torchmetrics/functional/text/infolm.py | 4 ++-- src/torchmetrics/functional/text/ter.py | 2 +- src/torchmetrics/functional/text/wil.py | 10 +++++----- src/torchmetrics/image/fid.py | 6 +++--- src/torchmetrics/text/wil.py | 6 +++--- tests/unittests/bases/test_collections.py | 2 +- .../pairwise/test_pairwise_distance.py | 2 +- 14 files changed, 52 insertions(+), 22 deletions(-) diff --git a/.github/workflows/ci-checks.yml b/.github/workflows/ci-checks.yml index 4beef4f4f3a..68c652ba50f 100644 --- a/.github/workflows/ci-checks.yml +++ b/.github/workflows/ci-checks.yml @@ -5,6 +5,7 @@ on: branches: [master, "release/*"] pull_request: branches: [master, "release/*"] + types: [opened, reopened, ready_for_review, synchronize] concurrency: group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref }} diff --git a/.github/workflows/docs-build.yml b/.github/workflows/docs-build.yml index fbd66c1d3af..ebd4c552bde 100644 --- a/.github/workflows/docs-build.yml +++ b/.github/workflows/docs-build.yml @@ -6,6 +6,7 @@ on: tags: ["*"] pull_request: branches: ["master", "release/*"] + types: [opened, reopened, ready_for_review, synchronize] concurrency: group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 541bd782ae3..43de956fd86 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -50,6 +50,15 @@ repos: - id: codespell additional_dependencies: [tomli] #args: ["--write-changes"] + exclude: pyproject.toml + + - repo: https://github.com/crate-ci/typos + rev: v1.16.12 + hooks: + - id: typos + # empty to do not write fixes + args: [] + exclude: pyproject.toml - repo: https://github.com/PyCQA/docformatter rev: v1.7.5 diff --git a/pyproject.toml b/pyproject.toml index 63e1a1f16e1..8fcbef0e92a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -60,6 +60,25 @@ ignore-words-list = """ archiv """ +[tool.typos.default] +extend-ignore-identifiers-re = [ + # *sigh* this just isn't worth the cost of fixing + "AttributeID.*Supress.*", +] + +[tool.typos.default.extend-identifiers] +# *sigh* this just isn't worth the cost of fixing +MAPE = "MAPE" +WIL = "WIL" +Raison = "Raison" + +[tool.typos.default.extend-words] +# Don't correct the surname "Teh" +fpr = "fpr" +mape = "mape" +wil = "wil" + + [tool.ruff] line-length = 120 diff --git a/src/torchmetrics/classification/accuracy.py b/src/torchmetrics/classification/accuracy.py index 8f6278f4a39..60188aff5c9 100644 --- a/src/torchmetrics/classification/accuracy.py +++ b/src/torchmetrics/classification/accuracy.py @@ -45,7 +45,7 @@ class BinaryAccuracy(BinaryStatScores): As output to ``forward`` and ``compute`` the metric returns the following output: - - ``ba`` (:class:`~torch.Tensor`): If ``multidim_average`` is set to ``global``, metric returns a scalar value. + - ``acc`` (:class:`~torch.Tensor`): If ``multidim_average`` is set to ``global``, metric returns a scalar value. If ``multidim_average`` is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample. diff --git a/src/torchmetrics/functional/regression/tweedie_deviance.py b/src/torchmetrics/functional/regression/tweedie_deviance.py index 0365ec48d76..e3508dc04c7 100644 --- a/src/torchmetrics/functional/regression/tweedie_deviance.py +++ b/src/torchmetrics/functional/regression/tweedie_deviance.py @@ -88,7 +88,7 @@ def _tweedie_deviance_score_compute(sum_deviance_score: Tensor, num_observations """Compute Deviance Score. Args: - sum_deviance_score: Sum of deviance scores accumalated until now. + sum_deviance_score: Sum of deviance scores accumulated until now. num_observations: Number of observations encountered until now. Example: diff --git a/src/torchmetrics/functional/text/chrf.py b/src/torchmetrics/functional/text/chrf.py index 81d6e362a49..4ed62c7fc49 100644 --- a/src/torchmetrics/functional/text/chrf.py +++ b/src/torchmetrics/functional/text/chrf.py @@ -94,7 +94,7 @@ def _get_characters(sentence: str, whitespace: bool) -> List[str]: return list(sentence.strip().replace(" ", "")) -def _separate_word_and_punctiation(word: str) -> List[str]: +def _separate_word_and_punctuation(word: str) -> List[str]: """Separates out punctuations from beginning and end of words for chrF. Adapted from https://github.com/m-popovic/chrF and @@ -117,7 +117,7 @@ def _separate_word_and_punctiation(word: str) -> List[str]: return [word] -def _get_words_and_punctiation(sentence: str) -> List[str]: +def _get_words_and_punctuation(sentence: str) -> List[str]: """Separates out punctuations from beginning and end of words for chrF for all words in the sentence. Args: @@ -127,7 +127,7 @@ def _get_words_and_punctiation(sentence: str) -> List[str]: An aggregated list of separated words and punctuations. """ - return sum((_separate_word_and_punctiation(word) for word in sentence.strip().split()), []) + return sum((_separate_word_and_punctuation(word) for word in sentence.strip().split()), []) def _ngram_counts(char_or_word_list: List[str], n_gram_order: int) -> Dict[int, Dict[Tuple[str, ...], Tensor]]: @@ -180,7 +180,7 @@ def _char_and_word_ngrams_counts( if lowercase: sentence = sentence.lower() char_n_grams_counts = _ngram_counts(_get_characters(sentence, whitespace), n_char_order) - word_n_grams_counts = _ngram_counts(_get_words_and_punctiation(sentence), n_word_order) + word_n_grams_counts = _ngram_counts(_get_words_and_punctuation(sentence), n_word_order) return char_n_grams_counts, word_n_grams_counts def _get_total_ngrams(n_grams_counts: Dict[int, Dict[Tuple[str, ...], Tensor]]) -> Dict[int, Tensor]: diff --git a/src/torchmetrics/functional/text/infolm.py b/src/torchmetrics/functional/text/infolm.py index 691f666c9ab..0d1d36d281c 100644 --- a/src/torchmetrics/functional/text/infolm.py +++ b/src/torchmetrics/functional/text/infolm.py @@ -137,9 +137,9 @@ def __init__( self.alpha = alpha or 0 self.beta = beta or 0 - def __call__(self, preds_distribution: Tensor, target_distribtuion: Tensor) -> Tensor: + def __call__(self, preds_distribution: Tensor, target_distribution: Tensor) -> Tensor: information_measure_function = getattr(self, f"_calculate_{self.information_measure.value}") - return torch.nan_to_num(information_measure_function(preds_distribution, target_distribtuion)) + return torch.nan_to_num(information_measure_function(preds_distribution, target_distribution)) @staticmethod def _calculate_kl_divergence(preds_distribution: Tensor, target_distribution: Tensor) -> Tensor: diff --git a/src/torchmetrics/functional/text/ter.py b/src/torchmetrics/functional/text/ter.py index 256b07e4e86..4020f4e1d63 100644 --- a/src/torchmetrics/functional/text/ter.py +++ b/src/torchmetrics/functional/text/ter.py @@ -134,7 +134,7 @@ def _normalize_general_and_western(sentence: str) -> str: (r">", ">"), # tokenize punctuation (r"([{-~[-` -&(-+:-@/])", r" \1 "), - # handle possesives + # handle possessive (r"'s ", r" 's "), (r"'s$", r" 's"), # tokenize period and comma unless preceded by a digit diff --git a/src/torchmetrics/functional/text/wil.py b/src/torchmetrics/functional/text/wil.py index b4744ddb9e5..bb0d5f7e0d8 100644 --- a/src/torchmetrics/functional/text/wil.py +++ b/src/torchmetrics/functional/text/wil.py @@ -19,11 +19,11 @@ from torchmetrics.functional.text.helper import _edit_distance -def _wil_update( +def _word_info_lost_update( preds: Union[str, List[str]], target: Union[str, List[str]], ) -> Tuple[Tensor, Tensor, Tensor]: - """Update the wil score with the current set of references and predictions. + """Update the WIL score with the current set of references and predictions. Args: preds: Transcription(s) to score as a string or list of strings @@ -54,7 +54,7 @@ def _wil_update( return errors - total, target_total, preds_total -def _wil_compute(errors: Tensor, target_total: Tensor, preds_total: Tensor) -> Tensor: +def _word_info_lost_compute(errors: Tensor, target_total: Tensor, preds_total: Tensor) -> Tensor: """Compute the Word Information Lost. Args: @@ -90,5 +90,5 @@ def word_information_lost(preds: Union[str, List[str]], target: Union[str, List[ tensor(0.6528) """ - errors, target_total, preds_total = _wil_update(preds, target) - return _wil_compute(errors, target_total, preds_total) + errors, target_total, preds_total = _word_info_lost_update(preds, target) + return _word_info_lost_compute(errors, target_total, preds_total) diff --git a/src/torchmetrics/image/fid.py b/src/torchmetrics/image/fid.py index 775cc448fd0..e8ea1582ff5 100644 --- a/src/torchmetrics/image/fid.py +++ b/src/torchmetrics/image/fid.py @@ -313,13 +313,13 @@ def __init__( raise ValueError("Argument `normalize` expected to be a bool") self.normalize = normalize - mx_num_feets = (num_features, num_features) + mx_num_feats = (num_features, num_features) self.add_state("real_features_sum", torch.zeros(num_features).double(), dist_reduce_fx="sum") - self.add_state("real_features_cov_sum", torch.zeros(mx_num_feets).double(), dist_reduce_fx="sum") + self.add_state("real_features_cov_sum", torch.zeros(mx_num_feats).double(), dist_reduce_fx="sum") self.add_state("real_features_num_samples", torch.tensor(0).long(), dist_reduce_fx="sum") self.add_state("fake_features_sum", torch.zeros(num_features).double(), dist_reduce_fx="sum") - self.add_state("fake_features_cov_sum", torch.zeros(mx_num_feets).double(), dist_reduce_fx="sum") + self.add_state("fake_features_cov_sum", torch.zeros(mx_num_feats).double(), dist_reduce_fx="sum") self.add_state("fake_features_num_samples", torch.tensor(0).long(), dist_reduce_fx="sum") def update(self, imgs: Tensor, real: bool) -> None: diff --git a/src/torchmetrics/text/wil.py b/src/torchmetrics/text/wil.py index b94dfd90982..3d831b5c562 100644 --- a/src/torchmetrics/text/wil.py +++ b/src/torchmetrics/text/wil.py @@ -15,7 +15,7 @@ from torch import Tensor, tensor -from torchmetrics.functional.text.wil import _wil_compute, _wil_update +from torchmetrics.functional.text.wil import _word_info_lost_compute, _word_info_lost_update from torchmetrics.metric import Metric from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE @@ -82,14 +82,14 @@ def __init__( def update(self, preds: Union[str, List[str]], target: Union[str, List[str]]) -> None: """Update state with predictions and targets.""" - errors, target_total, preds_total = _wil_update(preds, target) + errors, target_total, preds_total = _word_info_lost_update(preds, target) self.errors += errors self.target_total += target_total self.preds_total += preds_total def compute(self) -> Tensor: """Calculate the Word Information Lost.""" - return _wil_compute(self.errors, self.target_total, self.preds_total) + return _word_info_lost_compute(self.errors, self.target_total, self.preds_total) def plot( self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None diff --git a/tests/unittests/bases/test_collections.py b/tests/unittests/bases/test_collections.py index 98ae0c97f9c..6f3e64d1b6c 100644 --- a/tests/unittests/bases/test_collections.py +++ b/tests/unittests/bases/test_collections.py @@ -557,7 +557,7 @@ def test_compute_on_different_dtype(): def test_error_on_wrong_specified_compute_groups(): - """Test that error is raised if user mis-specify the compute groups.""" + """Test that error is raised if user miss-specify the compute groups.""" with pytest.raises(ValueError, match="Input MulticlassAccuracy in `compute_groups`.*"): MetricCollection( MulticlassConfusionMatrix(3), diff --git a/tests/unittests/pairwise/test_pairwise_distance.py b/tests/unittests/pairwise/test_pairwise_distance.py index 8287021a13f..d80377df432 100644 --- a/tests/unittests/pairwise/test_pairwise_distance.py +++ b/tests/unittests/pairwise/test_pairwise_distance.py @@ -153,7 +153,7 @@ def test_error_on_wrong_shapes(metric): (partial(pairwise_minkowski_distance, exponent=3), partial(pairwise_distances, metric="minkowski", p=3)), ], ) -def test_precison_case(metric_functional, sk_fn): +def test_precision_case(metric_functional, sk_fn): """Test that metrics are robust towars cases where high precision is needed.""" x = torch.tensor([[772.0, 112.0], [772.20001, 112.0]]) res1 = metric_functional(x, zero_diagonal=False)