diff --git a/CHANGELOG.md b/CHANGELOG.md index 8336adff876..7eee849cf93 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -430,7 +430,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Fixed mAP calculation for areas with 0 predictions ([#1080](https://github.com/Lightning-AI/metrics/pull/1080)) - Fixed bug where avg precision state and auroc state was not merge when using MetricCollections ([#1086](https://github.com/Lightning-AI/metrics/pull/1086)) - Skip box conversion if no boxes are present in `MeanAveragePrecision` ([#1097](https://github.com/Lightning-AI/metrics/pull/1097)) -- Fixed inconsistency in docs and code when setting `average="none"` in `AvaragePrecision` metric ([#1116](https://github.com/Lightning-AI/metrics/pull/1116)) +- Fixed inconsistency in docs and code when setting `average="none"` in `AveragePrecision` metric ([#1116](https://github.com/Lightning-AI/metrics/pull/1116)) ## [0.9.1] - 2022-06-08 diff --git a/src/torchmetrics/audio/__init__.py b/src/torchmetrics/audio/__init__.py index 68d6b56ec27..1df6c22645e 100644 --- a/src/torchmetrics/audio/__init__.py +++ b/src/torchmetrics/audio/__init__.py @@ -23,10 +23,10 @@ SignalNoiseRatio, ) from torchmetrics.utilities.imports import ( - _GAMMATONE_AVAILABEL, + _GAMMATONE_AVAILABLE, _PESQ_AVAILABLE, _PYSTOI_AVAILABLE, - _TORCHAUDIO_AVAILABEL, + _TORCHAUDIO_AVAILABLE, _TORCHAUDIO_GREATER_EQUAL_0_10, ) @@ -50,7 +50,7 @@ __all__.append("ShortTimeObjectiveIntelligibility") -if _GAMMATONE_AVAILABEL and _TORCHAUDIO_AVAILABEL and _TORCHAUDIO_GREATER_EQUAL_0_10: +if _GAMMATONE_AVAILABLE and _TORCHAUDIO_AVAILABLE and _TORCHAUDIO_GREATER_EQUAL_0_10: from torchmetrics.audio.srmr import SpeechReverberationModulationEnergyRatio # noqa: F401 __all__.append("SpeechReverberationModulationEnergyRatio") diff --git a/src/torchmetrics/audio/srmr.py b/src/torchmetrics/audio/srmr.py index b0a439d6c08..620e8743fd2 100644 --- a/src/torchmetrics/audio/srmr.py +++ b/src/torchmetrics/audio/srmr.py @@ -21,14 +21,14 @@ ) from torchmetrics.metric import Metric from torchmetrics.utilities.imports import ( - _GAMMATONE_AVAILABEL, + _GAMMATONE_AVAILABLE, _MATPLOTLIB_AVAILABLE, - _TORCHAUDIO_AVAILABEL, + _TORCHAUDIO_AVAILABLE, _TORCHAUDIO_GREATER_EQUAL_0_10, ) from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE -if not all([_GAMMATONE_AVAILABEL, _TORCHAUDIO_AVAILABEL, _TORCHAUDIO_GREATER_EQUAL_0_10]): +if not all([_GAMMATONE_AVAILABLE, _TORCHAUDIO_AVAILABLE, _TORCHAUDIO_GREATER_EQUAL_0_10]): __doctest_skip__ = ["SpeechReverberationModulationEnergyRatio", "SpeechReverberationModulationEnergyRatio.plot"] elif not _MATPLOTLIB_AVAILABLE: __doctest_skip__ = ["SpeechReverberationModulationEnergyRatio.plot"] @@ -106,7 +106,7 @@ def __init__( **kwargs: Any, ) -> None: super().__init__(**kwargs) - if not _TORCHAUDIO_AVAILABEL or not _TORCHAUDIO_GREATER_EQUAL_0_10 or not _GAMMATONE_AVAILABEL: + if not _TORCHAUDIO_AVAILABLE or not _TORCHAUDIO_GREATER_EQUAL_0_10 or not _GAMMATONE_AVAILABLE: raise ModuleNotFoundError( "speech_reverberation_modulation_energy_ratio requires you to have `gammatone` and" " `torchaudio>=0.10` installed. Either install as ``pip install torchmetrics[audio]`` or " diff --git a/src/torchmetrics/functional/audio/__init__.py b/src/torchmetrics/functional/audio/__init__.py index a0982d3443d..b6469c7aace 100644 --- a/src/torchmetrics/functional/audio/__init__.py +++ b/src/torchmetrics/functional/audio/__init__.py @@ -23,10 +23,10 @@ signal_noise_ratio, ) from torchmetrics.utilities.imports import ( - _GAMMATONE_AVAILABEL, + _GAMMATONE_AVAILABLE, _PESQ_AVAILABLE, _PYSTOI_AVAILABLE, - _TORCHAUDIO_AVAILABEL, + _TORCHAUDIO_AVAILABLE, _TORCHAUDIO_GREATER_EQUAL_0_10, ) @@ -51,7 +51,7 @@ __all__.append("short_time_objective_intelligibility") -if _GAMMATONE_AVAILABEL and _TORCHAUDIO_AVAILABEL and _TORCHAUDIO_GREATER_EQUAL_0_10: +if _GAMMATONE_AVAILABLE and _TORCHAUDIO_AVAILABLE and _TORCHAUDIO_GREATER_EQUAL_0_10: from torchmetrics.functional.audio.srmr import speech_reverberation_modulation_energy_ratio # noqa: F401 __all__.append("speech_reverberation_modulation_energy_ratio") diff --git a/src/torchmetrics/functional/audio/srmr.py b/src/torchmetrics/functional/audio/srmr.py index 485c263e1dc..c03349e742d 100644 --- a/src/torchmetrics/functional/audio/srmr.py +++ b/src/torchmetrics/functional/audio/srmr.py @@ -25,18 +25,18 @@ from torchmetrics.utilities import rank_zero_warn from torchmetrics.utilities.imports import ( - _GAMMATONE_AVAILABEL, - _TORCHAUDIO_AVAILABEL, + _GAMMATONE_AVAILABLE, + _TORCHAUDIO_AVAILABLE, _TORCHAUDIO_GREATER_EQUAL_0_10, ) -if _TORCHAUDIO_AVAILABEL and _TORCHAUDIO_GREATER_EQUAL_0_10: +if _TORCHAUDIO_AVAILABLE and _TORCHAUDIO_GREATER_EQUAL_0_10: from torchaudio.functional.filtering import lfilter else: lfilter = None __doctest_skip__ = ["speech_reverberation_modulation_energy_ratio"] -if _GAMMATONE_AVAILABEL: +if _GAMMATONE_AVAILABLE: from gammatone.fftweight import fft_gtgram from gammatone.filters import centre_freqs, make_erb_filters else: @@ -233,7 +233,7 @@ def speech_reverberation_modulation_energy_ratio( tensor([0.3354], dtype=torch.float64) """ - if not _TORCHAUDIO_AVAILABEL or not _TORCHAUDIO_GREATER_EQUAL_0_10 or not _GAMMATONE_AVAILABEL: + if not _TORCHAUDIO_AVAILABLE or not _TORCHAUDIO_GREATER_EQUAL_0_10 or not _GAMMATONE_AVAILABLE: raise ModuleNotFoundError( "speech_reverberation_modulation_energy_ratio requires you to have `gammatone` and" " `torchaudio>=0.10` installed. Either install as ``pip install torchmetrics[audio]`` or " diff --git a/src/torchmetrics/functional/classification/dice.py b/src/torchmetrics/functional/classification/dice.py index 13d70fe0181..49d66ea9361 100644 --- a/src/torchmetrics/functional/classification/dice.py +++ b/src/torchmetrics/functional/classification/dice.py @@ -50,9 +50,9 @@ def _dice_compute( if average == AverageMethod.NONE and mdmc_average != MDMCAverageMethod.SAMPLEWISE: # a class is not present if there exists no TPs, no FPs, and no FNs - meaningless_indeces = torch.nonzero((tp | fn | fp) == 0).cpu() - numerator[meaningless_indeces, ...] = -1 - denominator[meaningless_indeces, ...] = -1 + meaningless_indices = torch.nonzero((tp | fn | fp) == 0).cpu() + numerator[meaningless_indices, ...] = -1 + denominator[meaningless_indices, ...] = -1 return _reduce_stat_scores( numerator=numerator, diff --git a/src/torchmetrics/functional/classification/specificity_sensitivity.py b/src/torchmetrics/functional/classification/specificity_sensitivity.py index 6851917e1ab..a44948f8570 100644 --- a/src/torchmetrics/functional/classification/specificity_sensitivity.py +++ b/src/torchmetrics/functional/classification/specificity_sensitivity.py @@ -422,8 +422,8 @@ def specicity_at_sensitivity( This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of :func:`~torchmetrics.functional.classification.binary_specificity_at_sensitivity`, - :func:`~torchmetrics.functional.classification.multiclass_specicity_at_sensitivity` and - :func:`~torchmetrics.functional.classification.multilabel_specifity_at_sensitvity` for the specific details of + :func:`~torchmetrics.functional.classification.multiclass_specificity_at_sensitivity` and + :func:`~torchmetrics.functional.classification.multilabel_specificity_at_sensitivity` for the specific details of each argument influence and examples. """ diff --git a/src/torchmetrics/functional/clustering/adjusted_rand_score.py b/src/torchmetrics/functional/clustering/adjusted_rand_score.py index 59d742862f8..5733d8b49d7 100644 --- a/src/torchmetrics/functional/clustering/adjusted_rand_score.py +++ b/src/torchmetrics/functional/clustering/adjusted_rand_score.py @@ -15,8 +15,8 @@ from torch import Tensor from torchmetrics.functional.clustering.utils import ( - calcualte_pair_cluster_confusion_matrix, calculate_contingency_matrix, + calculate_pair_cluster_confusion_matrix, check_cluster_labels, ) @@ -46,7 +46,7 @@ def _adjusted_rand_score_compute(contingency: Tensor) -> Tensor: rand_score: rand score """ - (tn, fp), (fn, tp) = calcualte_pair_cluster_confusion_matrix(contingency=contingency) + (tn, fp), (fn, tp) = calculate_pair_cluster_confusion_matrix(contingency=contingency) if fn == 0 and fp == 0: return torch.ones_like(tn, dtype=torch.float32) return 2.0 * (tp * tn - fn * fp) / ((tp + fn) * (fn + tn) + (tp + fp) * (fp + tn)) diff --git a/src/torchmetrics/functional/clustering/rand_score.py b/src/torchmetrics/functional/clustering/rand_score.py index 2e6848f46d5..51b719641fd 100644 --- a/src/torchmetrics/functional/clustering/rand_score.py +++ b/src/torchmetrics/functional/clustering/rand_score.py @@ -15,8 +15,8 @@ from torch import Tensor from torchmetrics.functional.clustering.utils import ( - calcualte_pair_cluster_confusion_matrix, calculate_contingency_matrix, + calculate_pair_cluster_confusion_matrix, check_cluster_labels, ) @@ -46,7 +46,7 @@ def _rand_score_compute(contingency: Tensor) -> Tensor: rand_score: rand score """ - pair_matrix = calcualte_pair_cluster_confusion_matrix(contingency=contingency) + pair_matrix = calculate_pair_cluster_confusion_matrix(contingency=contingency) numerator = pair_matrix.diagonal().sum() denominator = pair_matrix.sum() diff --git a/src/torchmetrics/functional/clustering/utils.py b/src/torchmetrics/functional/clustering/utils.py index 8588c305742..4edc503da30 100644 --- a/src/torchmetrics/functional/clustering/utils.py +++ b/src/torchmetrics/functional/clustering/utils.py @@ -214,7 +214,7 @@ def _validate_intrinsic_labels_to_samples(num_labels: int, num_samples: int) -> ) -def calcualte_pair_cluster_confusion_matrix( +def calculate_pair_cluster_confusion_matrix( preds: Optional[Tensor] = None, target: Optional[Tensor] = None, contingency: Optional[Tensor] = None, @@ -247,15 +247,15 @@ def calcualte_pair_cluster_confusion_matrix( Example: >>> import torch - >>> from torchmetrics.functional.clustering.utils import calcualte_pair_cluster_confusion_matrix + >>> from torchmetrics.functional.clustering.utils import calculate_pair_cluster_confusion_matrix >>> preds = torch.tensor([0, 0, 1, 1]) >>> target = torch.tensor([1, 1, 0, 0]) - >>> calcualte_pair_cluster_confusion_matrix(preds, target) + >>> calculate_pair_cluster_confusion_matrix(preds, target) tensor([[8, 0], [0, 4]]) >>> preds = torch.tensor([0, 0, 1, 2]) >>> target = torch.tensor([0, 0, 1, 1]) - >>> calcualte_pair_cluster_confusion_matrix(preds, target) + >>> calculate_pair_cluster_confusion_matrix(preds, target) tensor([[8, 2], [0, 2]]) diff --git a/src/torchmetrics/functional/regression/kl_divergence.py b/src/torchmetrics/functional/regression/kl_divergence.py index 3aca064cc35..6e6563aee71 100644 --- a/src/torchmetrics/functional/regression/kl_divergence.py +++ b/src/torchmetrics/functional/regression/kl_divergence.py @@ -89,7 +89,7 @@ def kl_divergence( Where :math:`P` and :math:`Q` are probability distributions where :math:`P` usually represents a distribution over data and :math:`Q` is often a prior or approximation of :math:`P`. It should be noted that the KL divergence - is a non-symetrical metric i.e. :math:`D_{KL}(P||Q) \neq D_{KL}(Q||P)`. + is a non-symmetrical metric i.e. :math:`D_{KL}(P||Q) \neq D_{KL}(Q||P)`. Args: p: data distribution with shape ``[N, d]`` diff --git a/src/torchmetrics/functional/text/chrf.py b/src/torchmetrics/functional/text/chrf.py index 85d62f58a83..81d6e362a49 100644 --- a/src/torchmetrics/functional/text/chrf.py +++ b/src/torchmetrics/functional/text/chrf.py @@ -559,7 +559,7 @@ def chrf_score( metric is equivalent to the original chrF. beta: A parameter determining an importance of recall w.r.t. precision. If `beta=1`, their importance is equal. - lowercase: An indication whether to enable case-insesitivity. + lowercase: An indication whether to enable case-insensitivity. whitespace: An indication whether to keep whitespaces during character n-gram extraction. return_sentence_level_score: An indication whether a sentence-level chrF/chrF++ score to be returned. diff --git a/src/torchmetrics/functional/text/ter.py b/src/torchmetrics/functional/text/ter.py index f57b014909f..256b07e4e86 100644 --- a/src/torchmetrics/functional/text/ter.py +++ b/src/torchmetrics/functional/text/ter.py @@ -80,7 +80,7 @@ def __init__( Args: normalize: An indication whether a general tokenization to be applied. no_punctuation: An indication whteher a punctuation to be removed from the sentences. - lowercase: An indication whether to enable case-insesitivity. + lowercase: An indication whether to enable case-insensitivity. asian_support: An indication whether asian characters to be processed. """ @@ -551,7 +551,7 @@ def translation_edit_rate( target: An iterable of iterables of reference corpus. normalize: An indication whether a general tokenization to be applied. no_punctuation: An indication whteher a punctuation to be removed from the sentences. - lowercase: An indication whether to enable case-insesitivity. + lowercase: An indication whether to enable case-insensitivity. asian_support: An indication whether asian characters to be processed. return_sentence_level_score: An indication whether a sentence-level TER to be returned. diff --git a/src/torchmetrics/regression/kl_divergence.py b/src/torchmetrics/regression/kl_divergence.py index ee797d8c795..7cb3d478fe6 100644 --- a/src/torchmetrics/regression/kl_divergence.py +++ b/src/torchmetrics/regression/kl_divergence.py @@ -35,7 +35,7 @@ class KLDivergence(Metric): Where :math:`P` and :math:`Q` are probability distributions where :math:`P` usually represents a distribution over data and :math:`Q` is often a prior or approximation of :math:`P`. It should be noted that the KL divergence - is a non-symetrical metric i.e. :math:`D_{KL}(P||Q) \neq D_{KL}(Q||P)`. + is a non-symmetrical metric i.e. :math:`D_{KL}(P||Q) \neq D_{KL}(Q||P)`. As input to ``forward`` and ``update`` the metric accepts the following input: diff --git a/src/torchmetrics/text/chrf.py b/src/torchmetrics/text/chrf.py index 3d109cbc6c5..1ff412ab1a4 100644 --- a/src/torchmetrics/text/chrf.py +++ b/src/torchmetrics/text/chrf.py @@ -71,7 +71,7 @@ class CHRFScore(Metric): n_word_order: A word n-gram order. If ``n_word_order=2``, the metric refers to the official chrF++. If ``n_word_order=0``, the metric is equivalent to the original ChrF. beta: parameter determining an importance of recall w.r.t. precision. If ``beta=1``, their importance is equal. - lowercase: An indication whether to enable case-insesitivity. + lowercase: An indication whether to enable case-insensitivity. whitespace: An indication whether keep whitespaces during n-gram extraction. return_sentence_level_score: An indication whether a sentence-level chrF/chrF++ score to be returned. kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. diff --git a/src/torchmetrics/text/ter.py b/src/torchmetrics/text/ter.py index f484e673b18..98ec0a90235 100644 --- a/src/torchmetrics/text/ter.py +++ b/src/torchmetrics/text/ter.py @@ -45,7 +45,7 @@ class TranslationEditRate(Metric): Args: normalize: An indication whether a general tokenization to be applied. no_punctuation: An indication whteher a punctuation to be removed from the sentences. - lowercase: An indication whether to enable case-insesitivity. + lowercase: An indication whether to enable case-insensitivity. asian_support: An indication whether asian characters to be processed. return_sentence_level_score: An indication whether a sentence-level TER to be returned. kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. diff --git a/src/torchmetrics/utilities/imports.py b/src/torchmetrics/utilities/imports.py index fa6e8bfea69..e1ffba72a71 100644 --- a/src/torchmetrics/utilities/imports.py +++ b/src/torchmetrics/utilities/imports.py @@ -45,8 +45,8 @@ _TRANSFORMERS_GREATER_EQUAL_4_4: Optional[bool] = compare_version("transformers", operator.ge, "4.4.0") _TRANSFORMERS_GREATER_EQUAL_4_10: Optional[bool] = compare_version("transformers", operator.ge, "4.10.0") _PESQ_AVAILABLE: bool = package_available("pesq") -_GAMMATONE_AVAILABEL: bool = package_available("gammatone") -_TORCHAUDIO_AVAILABEL: bool = package_available("torchaudio") +_GAMMATONE_AVAILABLE: bool = package_available("gammatone") +_TORCHAUDIO_AVAILABLE: bool = package_available("torchaudio") _TORCHAUDIO_GREATER_EQUAL_0_10: Optional[bool] = compare_version("torchaudio", operator.ge, "0.10.0") _SACREBLEU_AVAILABLE: bool = package_available("sacrebleu") _REGEX_AVAILABLE: bool = package_available("regex") diff --git a/tests/unittests/audio/test_sdr.py b/tests/unittests/audio/test_sdr.py index a44947fb910..2389be0d941 100644 --- a/tests/unittests/audio/test_sdr.py +++ b/tests/unittests/audio/test_sdr.py @@ -75,7 +75,7 @@ def _average_metric(preds: Tensor, target: Tensor, metric_func: Callable) -> Ten ], ) class TestSDR(MetricTester): - """Test class for `SingalDistortionRatio` metric.""" + """Test class for `SignalDistortionRatio` metric.""" atol = 1e-2 diff --git a/tests/unittests/clustering/test_utils.py b/tests/unittests/clustering/test_utils.py index 03550b061c0..6fc8577f15c 100644 --- a/tests/unittests/clustering/test_utils.py +++ b/tests/unittests/clustering/test_utils.py @@ -21,10 +21,10 @@ from sklearn.metrics.cluster import pair_confusion_matrix as sklearn_pair_confusion_matrix from sklearn.metrics.cluster._supervised import _generalized_average as sklearn_generalized_average from torchmetrics.functional.clustering.utils import ( - calcualte_pair_cluster_confusion_matrix, calculate_contingency_matrix, calculate_entropy, calculate_generalized_mean, + calculate_pair_cluster_confusion_matrix, ) from unittests import BATCH_SIZE, NUM_BATCHES @@ -113,6 +113,6 @@ class TestPairClusterConfusionMatrix: def test_pair_cluster_confusion_matrix(self, preds, target): """Check that pair cluster confusion matrix is calculated correctly.""" - tm_res = calcualte_pair_cluster_confusion_matrix(preds, target) + tm_res = calculate_pair_cluster_confusion_matrix(preds, target) sklearn_res = sklearn_pair_confusion_matrix(preds, target) assert np.allclose(tm_res, sklearn_res, atol=self.atol) diff --git a/tests/unittests/retrieval/helpers.py b/tests/unittests/retrieval/helpers.py index c50efe11452..66820708207 100644 --- a/tests/unittests/retrieval/helpers.py +++ b/tests/unittests/retrieval/helpers.py @@ -29,8 +29,8 @@ from unittests.retrieval.inputs import _input_retrieval_scores_float_target as _irs_float_tgt from unittests.retrieval.inputs import _input_retrieval_scores_for_adaptive_k as _irs_adpt_k from unittests.retrieval.inputs import _input_retrieval_scores_int_target as _irs_int_tgt -from unittests.retrieval.inputs import _input_retrieval_scores_mismatching_sizes as _irs_mis_sz -from unittests.retrieval.inputs import _input_retrieval_scores_mismatching_sizes_func as _irs_mis_sz_fn +from unittests.retrieval.inputs import _input_retrieval_scores_mismatching_sizes as _irs_bad_sz +from unittests.retrieval.inputs import _input_retrieval_scores_mismatching_sizes_func as _irs_bad_sz_fn from unittests.retrieval.inputs import _input_retrieval_scores_no_target as _irs_no_tgt from unittests.retrieval.inputs import _input_retrieval_scores_with_ignore_index as _irs_ii from unittests.retrieval.inputs import _input_retrieval_scores_wrong_targets as _irs_bad_tgt @@ -136,7 +136,7 @@ def _concat_tests(*tests: Tuple[Dict]) -> Dict: "argnames": "preds,target,message,metric_args", "argvalues": [ # check input shapes are consistent (func) - (_irs_mis_sz_fn.preds, _irs_mis_sz_fn.target, "`preds` and `target` must be of the same shape", {}), + (_irs_bad_sz_fn.preds, _irs_bad_sz_fn.target, "`preds` and `target` must be of the same shape", {}), # check input tensors are not empty (_irs_empty.preds, _irs_empty.target, "`preds` and `target` must be non-empty and non-scalar tensors", {}), # check on input dtypes @@ -150,7 +150,7 @@ def _concat_tests(*tests: Tuple[Dict]) -> Dict: "argnames": "preds,target,message,metric_args", "argvalues": [ # check input shapes are consistent (func) - (_irs_mis_sz_fn.preds, _irs_mis_sz_fn.target, "`preds` and `target` must be of the same shape", {}), + (_irs_bad_sz_fn.preds, _irs_bad_sz_fn.target, "`preds` and `target` must be of the same shape", {}), # check input tensors are not empty (_irs_empty.preds, _irs_empty.target, "`preds` and `target` must be non-empty and non-scalar tensors", {}), # check on input dtypes @@ -224,9 +224,9 @@ def _concat_tests(*tests: Tuple[Dict]) -> Dict: ), # check input shapes are consistent ( - _irs_mis_sz.indexes, - _irs_mis_sz.preds, - _irs_mis_sz.target, + _irs_bad_sz.indexes, + _irs_bad_sz.preds, + _irs_bad_sz.target, "`indexes`, `preds` and `target` must be of the same shape", {"empty_target_action": "skip"}, ), @@ -278,9 +278,9 @@ def _concat_tests(*tests: Tuple[Dict]) -> Dict: ), # check input shapes are consistent ( - _irs_mis_sz.indexes, - _irs_mis_sz.preds, - _irs_mis_sz.target, + _irs_bad_sz.indexes, + _irs_bad_sz.preds, + _irs_bad_sz.target, "`indexes`, `preds` and `target` must be of the same shape", {"empty_target_action": "skip"}, ),