Skip to content

Commit

Permalink
hunting some more typos (#2112)
Browse files Browse the repository at this point in the history
* calcualte

* insesitivity

* AVAILABEL

* specifity

* indeces

* Avarage

* _irs_mis_sz

* symetrical

* _irs_mis_sz_fn

---------

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
Borda and pre-commit-ci[bot] authored Sep 26, 2023
1 parent 64f17f0 commit 9aecaf4
Show file tree
Hide file tree
Showing 20 changed files with 51 additions and 51 deletions.
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -430,7 +430,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Fixed mAP calculation for areas with 0 predictions ([#1080](https://github.com/Lightning-AI/metrics/pull/1080))
- Fixed bug where avg precision state and auroc state was not merge when using MetricCollections ([#1086](https://github.com/Lightning-AI/metrics/pull/1086))
- Skip box conversion if no boxes are present in `MeanAveragePrecision` ([#1097](https://github.com/Lightning-AI/metrics/pull/1097))
- Fixed inconsistency in docs and code when setting `average="none"` in `AvaragePrecision` metric ([#1116](https://github.com/Lightning-AI/metrics/pull/1116))
- Fixed inconsistency in docs and code when setting `average="none"` in `AveragePrecision` metric ([#1116](https://github.com/Lightning-AI/metrics/pull/1116))


## [0.9.1] - 2022-06-08
Expand Down
6 changes: 3 additions & 3 deletions src/torchmetrics/audio/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,10 @@
SignalNoiseRatio,
)
from torchmetrics.utilities.imports import (
_GAMMATONE_AVAILABEL,
_GAMMATONE_AVAILABLE,
_PESQ_AVAILABLE,
_PYSTOI_AVAILABLE,
_TORCHAUDIO_AVAILABEL,
_TORCHAUDIO_AVAILABLE,
_TORCHAUDIO_GREATER_EQUAL_0_10,
)

Expand All @@ -50,7 +50,7 @@

__all__.append("ShortTimeObjectiveIntelligibility")

if _GAMMATONE_AVAILABEL and _TORCHAUDIO_AVAILABEL and _TORCHAUDIO_GREATER_EQUAL_0_10:
if _GAMMATONE_AVAILABLE and _TORCHAUDIO_AVAILABLE and _TORCHAUDIO_GREATER_EQUAL_0_10:
from torchmetrics.audio.srmr import SpeechReverberationModulationEnergyRatio # noqa: F401

__all__.append("SpeechReverberationModulationEnergyRatio")
8 changes: 4 additions & 4 deletions src/torchmetrics/audio/srmr.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,14 @@
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import (
_GAMMATONE_AVAILABEL,
_GAMMATONE_AVAILABLE,
_MATPLOTLIB_AVAILABLE,
_TORCHAUDIO_AVAILABEL,
_TORCHAUDIO_AVAILABLE,
_TORCHAUDIO_GREATER_EQUAL_0_10,
)
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE

if not all([_GAMMATONE_AVAILABEL, _TORCHAUDIO_AVAILABEL, _TORCHAUDIO_GREATER_EQUAL_0_10]):
if not all([_GAMMATONE_AVAILABLE, _TORCHAUDIO_AVAILABLE, _TORCHAUDIO_GREATER_EQUAL_0_10]):
__doctest_skip__ = ["SpeechReverberationModulationEnergyRatio", "SpeechReverberationModulationEnergyRatio.plot"]
elif not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["SpeechReverberationModulationEnergyRatio.plot"]
Expand Down Expand Up @@ -106,7 +106,7 @@ def __init__(
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if not _TORCHAUDIO_AVAILABEL or not _TORCHAUDIO_GREATER_EQUAL_0_10 or not _GAMMATONE_AVAILABEL:
if not _TORCHAUDIO_AVAILABLE or not _TORCHAUDIO_GREATER_EQUAL_0_10 or not _GAMMATONE_AVAILABLE:
raise ModuleNotFoundError(
"speech_reverberation_modulation_energy_ratio requires you to have `gammatone` and"
" `torchaudio>=0.10` installed. Either install as ``pip install torchmetrics[audio]`` or "
Expand Down
6 changes: 3 additions & 3 deletions src/torchmetrics/functional/audio/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,10 @@
signal_noise_ratio,
)
from torchmetrics.utilities.imports import (
_GAMMATONE_AVAILABEL,
_GAMMATONE_AVAILABLE,
_PESQ_AVAILABLE,
_PYSTOI_AVAILABLE,
_TORCHAUDIO_AVAILABEL,
_TORCHAUDIO_AVAILABLE,
_TORCHAUDIO_GREATER_EQUAL_0_10,
)

Expand All @@ -51,7 +51,7 @@

__all__.append("short_time_objective_intelligibility")

if _GAMMATONE_AVAILABEL and _TORCHAUDIO_AVAILABEL and _TORCHAUDIO_GREATER_EQUAL_0_10:
if _GAMMATONE_AVAILABLE and _TORCHAUDIO_AVAILABLE and _TORCHAUDIO_GREATER_EQUAL_0_10:
from torchmetrics.functional.audio.srmr import speech_reverberation_modulation_energy_ratio # noqa: F401

__all__.append("speech_reverberation_modulation_energy_ratio")
10 changes: 5 additions & 5 deletions src/torchmetrics/functional/audio/srmr.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,18 +25,18 @@

from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.imports import (
_GAMMATONE_AVAILABEL,
_TORCHAUDIO_AVAILABEL,
_GAMMATONE_AVAILABLE,
_TORCHAUDIO_AVAILABLE,
_TORCHAUDIO_GREATER_EQUAL_0_10,
)

if _TORCHAUDIO_AVAILABEL and _TORCHAUDIO_GREATER_EQUAL_0_10:
if _TORCHAUDIO_AVAILABLE and _TORCHAUDIO_GREATER_EQUAL_0_10:
from torchaudio.functional.filtering import lfilter
else:
lfilter = None
__doctest_skip__ = ["speech_reverberation_modulation_energy_ratio"]

if _GAMMATONE_AVAILABEL:
if _GAMMATONE_AVAILABLE:
from gammatone.fftweight import fft_gtgram
from gammatone.filters import centre_freqs, make_erb_filters
else:
Expand Down Expand Up @@ -233,7 +233,7 @@ def speech_reverberation_modulation_energy_ratio(
tensor([0.3354], dtype=torch.float64)
"""
if not _TORCHAUDIO_AVAILABEL or not _TORCHAUDIO_GREATER_EQUAL_0_10 or not _GAMMATONE_AVAILABEL:
if not _TORCHAUDIO_AVAILABLE or not _TORCHAUDIO_GREATER_EQUAL_0_10 or not _GAMMATONE_AVAILABLE:
raise ModuleNotFoundError(
"speech_reverberation_modulation_energy_ratio requires you to have `gammatone` and"
" `torchaudio>=0.10` installed. Either install as ``pip install torchmetrics[audio]`` or "
Expand Down
6 changes: 3 additions & 3 deletions src/torchmetrics/functional/classification/dice.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,9 +50,9 @@ def _dice_compute(

if average == AverageMethod.NONE and mdmc_average != MDMCAverageMethod.SAMPLEWISE:
# a class is not present if there exists no TPs, no FPs, and no FNs
meaningless_indeces = torch.nonzero((tp | fn | fp) == 0).cpu()
numerator[meaningless_indeces, ...] = -1
denominator[meaningless_indeces, ...] = -1
meaningless_indices = torch.nonzero((tp | fn | fp) == 0).cpu()
numerator[meaningless_indices, ...] = -1
denominator[meaningless_indices, ...] = -1

return _reduce_stat_scores(
numerator=numerator,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -422,8 +422,8 @@ def specicity_at_sensitivity(
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:func:`~torchmetrics.functional.classification.binary_specificity_at_sensitivity`,
:func:`~torchmetrics.functional.classification.multiclass_specicity_at_sensitivity` and
:func:`~torchmetrics.functional.classification.multilabel_specifity_at_sensitvity` for the specific details of
:func:`~torchmetrics.functional.classification.multiclass_specificity_at_sensitivity` and
:func:`~torchmetrics.functional.classification.multilabel_specificity_at_sensitivity` for the specific details of
each argument influence and examples.
"""
Expand Down
4 changes: 2 additions & 2 deletions src/torchmetrics/functional/clustering/adjusted_rand_score.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@
from torch import Tensor

from torchmetrics.functional.clustering.utils import (
calcualte_pair_cluster_confusion_matrix,
calculate_contingency_matrix,
calculate_pair_cluster_confusion_matrix,
check_cluster_labels,
)

Expand Down Expand Up @@ -46,7 +46,7 @@ def _adjusted_rand_score_compute(contingency: Tensor) -> Tensor:
rand_score: rand score
"""
(tn, fp), (fn, tp) = calcualte_pair_cluster_confusion_matrix(contingency=contingency)
(tn, fp), (fn, tp) = calculate_pair_cluster_confusion_matrix(contingency=contingency)
if fn == 0 and fp == 0:
return torch.ones_like(tn, dtype=torch.float32)
return 2.0 * (tp * tn - fn * fp) / ((tp + fn) * (fn + tn) + (tp + fp) * (fp + tn))
Expand Down
4 changes: 2 additions & 2 deletions src/torchmetrics/functional/clustering/rand_score.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@
from torch import Tensor

from torchmetrics.functional.clustering.utils import (
calcualte_pair_cluster_confusion_matrix,
calculate_contingency_matrix,
calculate_pair_cluster_confusion_matrix,
check_cluster_labels,
)

Expand Down Expand Up @@ -46,7 +46,7 @@ def _rand_score_compute(contingency: Tensor) -> Tensor:
rand_score: rand score
"""
pair_matrix = calcualte_pair_cluster_confusion_matrix(contingency=contingency)
pair_matrix = calculate_pair_cluster_confusion_matrix(contingency=contingency)

numerator = pair_matrix.diagonal().sum()
denominator = pair_matrix.sum()
Expand Down
8 changes: 4 additions & 4 deletions src/torchmetrics/functional/clustering/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ def _validate_intrinsic_labels_to_samples(num_labels: int, num_samples: int) ->
)


def calcualte_pair_cluster_confusion_matrix(
def calculate_pair_cluster_confusion_matrix(
preds: Optional[Tensor] = None,
target: Optional[Tensor] = None,
contingency: Optional[Tensor] = None,
Expand Down Expand Up @@ -247,15 +247,15 @@ def calcualte_pair_cluster_confusion_matrix(
Example:
>>> import torch
>>> from torchmetrics.functional.clustering.utils import calcualte_pair_cluster_confusion_matrix
>>> from torchmetrics.functional.clustering.utils import calculate_pair_cluster_confusion_matrix
>>> preds = torch.tensor([0, 0, 1, 1])
>>> target = torch.tensor([1, 1, 0, 0])
>>> calcualte_pair_cluster_confusion_matrix(preds, target)
>>> calculate_pair_cluster_confusion_matrix(preds, target)
tensor([[8, 0],
[0, 4]])
>>> preds = torch.tensor([0, 0, 1, 2])
>>> target = torch.tensor([0, 0, 1, 1])
>>> calcualte_pair_cluster_confusion_matrix(preds, target)
>>> calculate_pair_cluster_confusion_matrix(preds, target)
tensor([[8, 2],
[0, 2]])
Expand Down
2 changes: 1 addition & 1 deletion src/torchmetrics/functional/regression/kl_divergence.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def kl_divergence(
Where :math:`P` and :math:`Q` are probability distributions where :math:`P` usually represents a distribution
over data and :math:`Q` is often a prior or approximation of :math:`P`. It should be noted that the KL divergence
is a non-symetrical metric i.e. :math:`D_{KL}(P||Q) \neq D_{KL}(Q||P)`.
is a non-symmetrical metric i.e. :math:`D_{KL}(P||Q) \neq D_{KL}(Q||P)`.
Args:
p: data distribution with shape ``[N, d]``
Expand Down
2 changes: 1 addition & 1 deletion src/torchmetrics/functional/text/chrf.py
Original file line number Diff line number Diff line change
Expand Up @@ -559,7 +559,7 @@ def chrf_score(
metric is equivalent to the original chrF.
beta:
A parameter determining an importance of recall w.r.t. precision. If `beta=1`, their importance is equal.
lowercase: An indication whether to enable case-insesitivity.
lowercase: An indication whether to enable case-insensitivity.
whitespace: An indication whether to keep whitespaces during character n-gram extraction.
return_sentence_level_score: An indication whether a sentence-level chrF/chrF++ score to be returned.
Expand Down
4 changes: 2 additions & 2 deletions src/torchmetrics/functional/text/ter.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def __init__(
Args:
normalize: An indication whether a general tokenization to be applied.
no_punctuation: An indication whteher a punctuation to be removed from the sentences.
lowercase: An indication whether to enable case-insesitivity.
lowercase: An indication whether to enable case-insensitivity.
asian_support: An indication whether asian characters to be processed.
"""
Expand Down Expand Up @@ -551,7 +551,7 @@ def translation_edit_rate(
target: An iterable of iterables of reference corpus.
normalize: An indication whether a general tokenization to be applied.
no_punctuation: An indication whteher a punctuation to be removed from the sentences.
lowercase: An indication whether to enable case-insesitivity.
lowercase: An indication whether to enable case-insensitivity.
asian_support: An indication whether asian characters to be processed.
return_sentence_level_score: An indication whether a sentence-level TER to be returned.
Expand Down
2 changes: 1 addition & 1 deletion src/torchmetrics/regression/kl_divergence.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ class KLDivergence(Metric):
Where :math:`P` and :math:`Q` are probability distributions where :math:`P` usually represents a distribution
over data and :math:`Q` is often a prior or approximation of :math:`P`. It should be noted that the KL divergence
is a non-symetrical metric i.e. :math:`D_{KL}(P||Q) \neq D_{KL}(Q||P)`.
is a non-symmetrical metric i.e. :math:`D_{KL}(P||Q) \neq D_{KL}(Q||P)`.
As input to ``forward`` and ``update`` the metric accepts the following input:
Expand Down
2 changes: 1 addition & 1 deletion src/torchmetrics/text/chrf.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ class CHRFScore(Metric):
n_word_order: A word n-gram order. If ``n_word_order=2``, the metric refers to the official chrF++.
If ``n_word_order=0``, the metric is equivalent to the original ChrF.
beta: parameter determining an importance of recall w.r.t. precision. If ``beta=1``, their importance is equal.
lowercase: An indication whether to enable case-insesitivity.
lowercase: An indication whether to enable case-insensitivity.
whitespace: An indication whether keep whitespaces during n-gram extraction.
return_sentence_level_score: An indication whether a sentence-level chrF/chrF++ score to be returned.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Expand Down
2 changes: 1 addition & 1 deletion src/torchmetrics/text/ter.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ class TranslationEditRate(Metric):
Args:
normalize: An indication whether a general tokenization to be applied.
no_punctuation: An indication whteher a punctuation to be removed from the sentences.
lowercase: An indication whether to enable case-insesitivity.
lowercase: An indication whether to enable case-insensitivity.
asian_support: An indication whether asian characters to be processed.
return_sentence_level_score: An indication whether a sentence-level TER to be returned.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Expand Down
4 changes: 2 additions & 2 deletions src/torchmetrics/utilities/imports.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,8 @@
_TRANSFORMERS_GREATER_EQUAL_4_4: Optional[bool] = compare_version("transformers", operator.ge, "4.4.0")
_TRANSFORMERS_GREATER_EQUAL_4_10: Optional[bool] = compare_version("transformers", operator.ge, "4.10.0")
_PESQ_AVAILABLE: bool = package_available("pesq")
_GAMMATONE_AVAILABEL: bool = package_available("gammatone")
_TORCHAUDIO_AVAILABEL: bool = package_available("torchaudio")
_GAMMATONE_AVAILABLE: bool = package_available("gammatone")
_TORCHAUDIO_AVAILABLE: bool = package_available("torchaudio")
_TORCHAUDIO_GREATER_EQUAL_0_10: Optional[bool] = compare_version("torchaudio", operator.ge, "0.10.0")
_SACREBLEU_AVAILABLE: bool = package_available("sacrebleu")
_REGEX_AVAILABLE: bool = package_available("regex")
Expand Down
2 changes: 1 addition & 1 deletion tests/unittests/audio/test_sdr.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def _average_metric(preds: Tensor, target: Tensor, metric_func: Callable) -> Ten
],
)
class TestSDR(MetricTester):
"""Test class for `SingalDistortionRatio` metric."""
"""Test class for `SignalDistortionRatio` metric."""

atol = 1e-2

Expand Down
4 changes: 2 additions & 2 deletions tests/unittests/clustering/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,10 @@
from sklearn.metrics.cluster import pair_confusion_matrix as sklearn_pair_confusion_matrix
from sklearn.metrics.cluster._supervised import _generalized_average as sklearn_generalized_average
from torchmetrics.functional.clustering.utils import (
calcualte_pair_cluster_confusion_matrix,
calculate_contingency_matrix,
calculate_entropy,
calculate_generalized_mean,
calculate_pair_cluster_confusion_matrix,
)

from unittests import BATCH_SIZE, NUM_BATCHES
Expand Down Expand Up @@ -113,6 +113,6 @@ class TestPairClusterConfusionMatrix:

def test_pair_cluster_confusion_matrix(self, preds, target):
"""Check that pair cluster confusion matrix is calculated correctly."""
tm_res = calcualte_pair_cluster_confusion_matrix(preds, target)
tm_res = calculate_pair_cluster_confusion_matrix(preds, target)
sklearn_res = sklearn_pair_confusion_matrix(preds, target)
assert np.allclose(tm_res, sklearn_res, atol=self.atol)
20 changes: 10 additions & 10 deletions tests/unittests/retrieval/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@
from unittests.retrieval.inputs import _input_retrieval_scores_float_target as _irs_float_tgt
from unittests.retrieval.inputs import _input_retrieval_scores_for_adaptive_k as _irs_adpt_k
from unittests.retrieval.inputs import _input_retrieval_scores_int_target as _irs_int_tgt
from unittests.retrieval.inputs import _input_retrieval_scores_mismatching_sizes as _irs_mis_sz
from unittests.retrieval.inputs import _input_retrieval_scores_mismatching_sizes_func as _irs_mis_sz_fn
from unittests.retrieval.inputs import _input_retrieval_scores_mismatching_sizes as _irs_bad_sz
from unittests.retrieval.inputs import _input_retrieval_scores_mismatching_sizes_func as _irs_bad_sz_fn
from unittests.retrieval.inputs import _input_retrieval_scores_no_target as _irs_no_tgt
from unittests.retrieval.inputs import _input_retrieval_scores_with_ignore_index as _irs_ii
from unittests.retrieval.inputs import _input_retrieval_scores_wrong_targets as _irs_bad_tgt
Expand Down Expand Up @@ -136,7 +136,7 @@ def _concat_tests(*tests: Tuple[Dict]) -> Dict:
"argnames": "preds,target,message,metric_args",
"argvalues": [
# check input shapes are consistent (func)
(_irs_mis_sz_fn.preds, _irs_mis_sz_fn.target, "`preds` and `target` must be of the same shape", {}),
(_irs_bad_sz_fn.preds, _irs_bad_sz_fn.target, "`preds` and `target` must be of the same shape", {}),
# check input tensors are not empty
(_irs_empty.preds, _irs_empty.target, "`preds` and `target` must be non-empty and non-scalar tensors", {}),
# check on input dtypes
Expand All @@ -150,7 +150,7 @@ def _concat_tests(*tests: Tuple[Dict]) -> Dict:
"argnames": "preds,target,message,metric_args",
"argvalues": [
# check input shapes are consistent (func)
(_irs_mis_sz_fn.preds, _irs_mis_sz_fn.target, "`preds` and `target` must be of the same shape", {}),
(_irs_bad_sz_fn.preds, _irs_bad_sz_fn.target, "`preds` and `target` must be of the same shape", {}),
# check input tensors are not empty
(_irs_empty.preds, _irs_empty.target, "`preds` and `target` must be non-empty and non-scalar tensors", {}),
# check on input dtypes
Expand Down Expand Up @@ -224,9 +224,9 @@ def _concat_tests(*tests: Tuple[Dict]) -> Dict:
),
# check input shapes are consistent
(
_irs_mis_sz.indexes,
_irs_mis_sz.preds,
_irs_mis_sz.target,
_irs_bad_sz.indexes,
_irs_bad_sz.preds,
_irs_bad_sz.target,
"`indexes`, `preds` and `target` must be of the same shape",
{"empty_target_action": "skip"},
),
Expand Down Expand Up @@ -278,9 +278,9 @@ def _concat_tests(*tests: Tuple[Dict]) -> Dict:
),
# check input shapes are consistent
(
_irs_mis_sz.indexes,
_irs_mis_sz.preds,
_irs_mis_sz.target,
_irs_bad_sz.indexes,
_irs_bad_sz.preds,
_irs_bad_sz.target,
"`indexes`, `preds` and `target` must be of the same shape",
{"empty_target_action": "skip"},
),
Expand Down

0 comments on commit 9aecaf4

Please sign in to comment.