diff --git a/.github/actions/push-caches/action.yml b/.github/actions/push-caches/action.yml index 822d5ff4310..f6f79e9b55d 100644 --- a/.github/actions/push-caches/action.yml +++ b/.github/actions/push-caches/action.yml @@ -32,11 +32,11 @@ runs: # run: | # import os # fp = 'requirements.dump' - # with open(fp) as fo: - # lines = [ln.strip() for ln in fo.readlines()] + # with open(fp) as fopen: + # lines = [ln.strip() for ln in fopen.readlines()] # lines = [ln.split('+')[0] for ln in lines if '-e ' not in ln] - # with open(fp, 'w') as fw: - # fw.writelines([ln + os.linesep for ln in lines]) + # with open(fp, 'w') as fwrite: + # fwrite.writelines([ln + os.linesep for ln in lines]) # shell: python - name: Dump wheels diff --git a/.github/workflows/clear-cache.yml b/.github/workflows/clear-cache.yml index bf72e44435f..05364125e69 100644 --- a/.github/workflows/clear-cache.yml +++ b/.github/workflows/clear-cache.yml @@ -4,7 +4,7 @@ on: workflow_dispatch: inputs: pattern: - description: "patter for cleaning cache" + description: "pattern for cleaning cache" default: "pip|conda" required: false type: string diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d865c0281e0..541bd782ae3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -44,6 +44,13 @@ repos: args: [--py38-plus] name: Upgrade code + - repo: https://github.com/codespell-project/codespell + rev: v2.2.5 + hooks: + - id: codespell + additional_dependencies: [tomli] + #args: ["--write-changes"] + - repo: https://github.com/PyCQA/docformatter rev: v1.7.5 hooks: diff --git a/CHANGELOG.md b/CHANGELOG.md index ffa3c30c41c..8336adff876 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -238,7 +238,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Fixed padding removal for 3d input in `MSSSIM` ([#1674](https://github.com/Lightning-AI/torchmetrics/pull/1674)) - Fixed `max_det_threshold` in MAP detection ([#1712](https://github.com/Lightning-AI/torchmetrics/pull/1712)) - Fixed states being saved in metrics that use `register_buffer` ([#1728](https://github.com/Lightning-AI/torchmetrics/pull/1728)) -- Fixed states not being correctly synced and device transfered in `MeanAveragePrecision` for `iou_type="segm"` ([#1763](https://github.com/Lightning-AI/torchmetrics/pull/1763)) +- Fixed states not being correctly synced and device transferred in `MeanAveragePrecision` for `iou_type="segm"` ([#1763](https://github.com/Lightning-AI/torchmetrics/pull/1763)) - Fixed use of `prefix` and `postfix` in nested `MetricCollection` ([#1773](https://github.com/Lightning-AI/torchmetrics/pull/1773)) - Fixed `ax` plotting logging in `MetricCollection ([#1783](https://github.com/Lightning-AI/torchmetrics/pull/1783)) - Fixed lookup for punkt sources being downloaded in `RougeScore` ([#1789](https://github.com/Lightning-AI/torchmetrics/pull/1789)) diff --git a/README.md b/README.md index 4d723d0c064..f4afc50eac8 100644 --- a/README.md +++ b/README.md @@ -201,7 +201,7 @@ def metric_ddp(rank, world_size): acc = metric.compute() print(f"Accuracy on all data: {acc}, accelerator rank: {rank}") - # Reseting internal state such that metric ready for new data + # Resetting internal state such that metric ready for new data metric.reset() # cleanup @@ -278,7 +278,7 @@ acc = torchmetrics.functional.classification.multiclass_accuracy( ### Covered domains and example metrics In total TorchMetrics contains [100+ metrics](https://lightning.ai/docs/torchmetrics/stable/all-metrics.html), which -convers the following domains: +covers the following domains: - Audio - Classification @@ -298,7 +298,7 @@ Each domain may require some additional dependencies which can be installed with #### Plotting Visualization of metrics can be important to help understand what is going on with your machine learning algorithms. -Torchmetrics have build-in plotting support (install dependencies with `pip install torchmetrics[visual]`) for nearly +Torchmetrics have built-in plotting support (install dependencies with `pip install torchmetrics[visual]`) for nearly all modular metrics through the `.plot` method. Simply call the method to get a simple visualization of any metric! ```python diff --git a/docs/paper_JOSS/paper.bib b/docs/paper_JOSS/paper.bib index 9003715499a..372b9cf3fbc 100644 --- a/docs/paper_JOSS/paper.bib +++ b/docs/paper_JOSS/paper.bib @@ -66,7 +66,7 @@ @article{scikit_learn @misc{keras, title={Keras}, - author={Chollet, Fran\c{c}ois and others}, + author={Chollet, François and others}, year={2015}, publisher={GitHub}, howpublished={\url{https://github.com/fchollet/keras}}, diff --git a/docs/paper_JOSS/paper.md b/docs/paper_JOSS/paper.md index 6833d974608..39d59911f73 100644 --- a/docs/paper_JOSS/paper.md +++ b/docs/paper_JOSS/paper.md @@ -103,6 +103,6 @@ TorchMetrics is released under the Apache 2.0 license. The source code is availa # Acknowledgement -The TorchMetrics team thanks Thomas Chaton, Ethan Harris, Carlos Mocholí, Sean Narenthiran, Adrian Wälchli, and Ananth Subramaniam for contributing ideas, participating in discussions on API design, and completing Pull Request reviews. We also thank all of our open-source contributors for reporting and resolving issues with this package. We are grateful to the PyTorch Lightning team for their ongoing and dedicated support of this project, and Grid.ai for providing computing resources and cloud credits needed to run our Continuos Integrations. +The TorchMetrics team thanks Thomas Chaton, Ethan Harris, Carlos Mocholí, Sean Narenthiran, Adrian Wälchli, and Ananth Subramaniam for contributing ideas, participating in discussions on API design, and completing Pull Request reviews. We also thank all of our open-source contributors for reporting and resolving issues with this package. We are grateful to the PyTorch Lightning team for their ongoing and dedicated support of this project, and Grid.ai for providing computing resources and cloud credits needed to run our Continuous Integrations. # References diff --git a/docs/source/all-metrics.rst b/docs/source/all-metrics.rst index 9c24937301f..8435cf2ff14 100644 --- a/docs/source/all-metrics.rst +++ b/docs/source/all-metrics.rst @@ -1,4 +1,4 @@ -.. this page is refering other pages with `customcarditem`; bypass hierarchy is patch with redirect +.. this page is referring other pages with `customcarditem`; bypass hierarchy is patch with redirect All TorchMetrics ================ diff --git a/docs/source/conf.py b/docs/source/conf.py index d8dde4988f6..3f116bd77d3 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -67,14 +67,14 @@ def _set_root_image_path(page_path: str): """Set relative path to be from the root, drop all `../` in images used gallery.""" - with open(page_path, encoding="UTF-8") as fo: - body = fo.read() + with open(page_path, encoding="UTF-8") as fopen: + body = fopen.read() found = re.findall(r" :image: (.*)\.svg", body) for occur in found: occur_ = occur.replace("../", "") body = body.replace(occur, occur_) - with open(page_path, "w", encoding="UTF-8") as fo: - fo.write(body) + with open(page_path, "w", encoding="UTF-8") as fopen: + fopen.write(body) if SPHINX_FETCH_ASSETS: diff --git a/docs/source/pages/implement.rst b/docs/source/pages/implement.rst index 5c4a6479bce..dd92da371cc 100644 --- a/docs/source/pages/implement.rst +++ b/docs/source/pages/implement.rst @@ -215,7 +215,7 @@ can behave in two ways: 5. Calls ``compute()`` to calculate metric for current batch. 6. Restores the global state. -2. If ``full_state_update`` is ``False`` (default) the metric state of one batch is completly independent of the state +2. If ``full_state_update`` is ``False`` (default) the metric state of one batch is completely independent of the state of other batches, which means that we only need to call ``update`` once. 1. Caches the global state. diff --git a/docs/source/pages/lightning.rst b/docs/source/pages/lightning.rst index ab359b198ae..131251501d1 100644 --- a/docs/source/pages/lightning.rst +++ b/docs/source/pages/lightning.rst @@ -165,7 +165,7 @@ The following contains a list of pitfalls to be aware of: * Modular metrics contain internal states that should belong to only one DataLoader. In case you are using multiple DataLoaders, it is recommended to initialize a separate modular metric instances for each DataLoader and use them separately. The same holds - for using seperate metrics for training, validation and testing. + for using separate metrics for training, validation and testing. .. testcode:: python @@ -194,7 +194,7 @@ The following contains a list of pitfalls to be aware of: * Calling ``self.log("val", self.metric(preds, target))`` with the intention of logging the metric object. Because ``self.metric(preds, target)`` corresponds to calling the forward method, this will return a tensor and not the - metric object. Such logging will be wrong in this case. Instead it is important to seperate into seperate lines: + metric object. Such logging will be wrong in this case. Instead, it is essential to separate into several lines: .. testcode:: python diff --git a/docs/source/pages/overview.rst b/docs/source/pages/overview.rst index 3b3ca95005f..edf1a2d7162 100644 --- a/docs/source/pages/overview.rst +++ b/docs/source/pages/overview.rst @@ -117,7 +117,7 @@ the native `MetricCollection`_ module can also be used to wrap multiple metrics. self.metric1 = BinaryAccuracy() self.metric2 = nn.ModuleList(BinaryAccuracy()) self.metric3 = nn.ModuleDict({'accuracy': BinaryAccuracy()}) - self.metric4 = MetricCollection([BinaryAccuracy()]) # torchmetrics build-in collection class + self.metric4 = MetricCollection([BinaryAccuracy()]) # torchmetrics built-in collection class def forward(self, batch): data, target = batch @@ -205,8 +205,8 @@ Most metrics in our collection can be used with 16-bit precision (``torch.half`` the following limitations: * In general ``pytorch`` had better support for 16-bit precision much earlier on GPU than CPU. Therefore, we - recommend that anyone that want to use metrics with half precision on CPU, upgrade to atleast pytorch v1.6 - where support for operations such as addition, subtraction, multiplication ect. was added. + recommend that anyone that want to use metrics with half precision on CPU, upgrade to at least pytorch v1.6 + where support for operations such as addition, subtraction, multiplication etc. was added. * Some metrics does not work at all in half precision on CPU. We have explicitly stated this in their docstring, but they are also listed below: @@ -216,9 +216,9 @@ the following limitations: You can always check the precision/dtype of the metric by checking the `.dtype` property. -****************** -Metric Arithmetics -****************** +***************** +Metric Arithmetic +***************** Metrics support most of python built-in operators for arithmetic, logic and bitwise operations. @@ -484,7 +484,7 @@ argument can help: of GPU. Only applies to metric states that are lists. - ``compute_with_cache``: This argument indicates if the result after calling the ``compute`` method should be cached. - By default this is ``True`` meaning that repeated calls to ``compute`` (with no change to the metric state inbetween) + By default this is ``True`` meaning that repeated calls to ``compute`` (with no change to the metric state in between) does not recompute the metric but just returns the cache. By setting it to ``False`` the metric will be recomputed every time ``compute`` is called, but it can also help clean up a bit of memory. diff --git a/docs/source/pages/plotting.rst b/docs/source/pages/plotting.rst index 40d1031962e..ce811224de2 100644 --- a/docs/source/pages/plotting.rst +++ b/docs/source/pages/plotting.rst @@ -17,7 +17,7 @@ Plotting `Scienceplot package `_ is also installed and all plots in Torchmetrics will default to using that style. -Torchmetrics comes with build-in support for quick visualization of your metrics, by simply using the ``.plot`` method +Torchmetrics comes with built-in support for quick visualization of your metrics, by simply using the ``.plot`` method that all modular metrics implement. This method provides a consistent interface for basic plotting of all metrics. .. code-block:: python @@ -146,7 +146,7 @@ a model over time, we could do it like this: :include-source: false Do note that metrics that do not return simple scalar tensors, such as `ConfusionMatrix`, `ROC` that have specialized -visualzation does not support plotting multiple steps, out of the box and the user needs to manually plot the values +visualization does not support plotting multiple steps, out of the box and the user needs to manually plot the values for each step. ******************************** @@ -235,7 +235,7 @@ to rely on ``MetricTracker`` to keep track of the metrics over multiple steps. # Extract all metrics from all steps all_results = tracker.compute_all() - # Constuct a single figure with appropriate layout for all metrics + # Construct a single figure with appropriate layout for all metrics fig = plt.figure(layout="constrained") ax1 = plt.subplot(2, 2, 1) ax2 = plt.subplot(2, 2, 2) @@ -245,7 +245,7 @@ to rely on ``MetricTracker`` to keep track of the metrics over multiple steps. confmat.plot(val=all_results[-1]['BinaryConfusionMatrix'], ax=ax1) roc.plot(all_results[-1]["BinaryROC"], ax=ax2) - # For the remainig we plot the full history, but we need to extract the scalar values from the results + # For the remaining we plot the full history, but we need to extract the scalar values from the results scalar_results = [ {k: v for k, v in ar.items() if isinstance(v, torch.Tensor) and v.numel() == 1} for ar in all_results ] diff --git a/docs/source/pages/quickstart.rst b/docs/source/pages/quickstart.rst index 0ba3c3853e4..f05ac869160 100644 --- a/docs/source/pages/quickstart.rst +++ b/docs/source/pages/quickstart.rst @@ -101,7 +101,7 @@ The code below shows how to use the class-based interface: acc = metric.compute() print(f"Accuracy on all data: {acc}") - # Reseting internal state such that metric ready for new data + # Resetting internal state such that metric ready for new data metric.reset() .. testoutput:: diff --git a/docs/source/references/utilities.rst b/docs/source/references/utilities.rst index 41acbdbcdfc..68660a2959b 100644 --- a/docs/source/references/utilities.rst +++ b/docs/source/references/utilities.rst @@ -59,7 +59,7 @@ dim_zero_sum torchmetrics.utilities.distributed ********************************** -The `distributed` utilities are used to help with syncronization of metrics across multiple processes. +The `distributed` utilities are used to help with synchronization of metrics across multiple processes. gather_all_tensors ~~~~~~~~~~~~~~~~~~ diff --git a/pyproject.toml b/pyproject.toml index cc7e635dcd1..63e1a1f16e1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,8 +19,7 @@ addopts = [ "--color=yes", "--disable-pytest-warnings", ] -# ToDo -#filterwarnings = ["error::FutureWarning"] +#filterwarnings = ["error::FutureWarning"] # ToDo xfail_strict = true junit_duration_report = "call" @@ -40,10 +39,27 @@ exclude = "(.eggs|.git|.hg|.mypy_cache|.venv|_build|buck-out|build|dist)" [tool.docformatter] recursive = true -wrap-summaries = 120 +# some docstring start with r""" +wrap-summaries = 119 wrap-descriptions = 120 blank = true +[tool.codespell] +#skip = '*.py' +quiet-level = 3 +# Todo: comma separated list of words; waiting for: +# https://github.com/codespell-project/codespell/issues/2839#issuecomment-1731601603 +# Todo: also adding links until they ignored by its: nature +# https://github.com/codespell-project/codespell/issues/2243#issuecomment-1732019960 +ignore-words-list = """ + rouge, \ + mape, \ + wil, \ + fpr, \ + raison, \ + archiv +""" + [tool.ruff] line-length = 120 diff --git a/requirements/image.txt b/requirements/image.txt index 7e02ee2ccb5..c12e2836794 100644 --- a/requirements/image.txt +++ b/requirements/image.txt @@ -3,5 +3,5 @@ scipy >1.0.0, <1.11.0 torchvision >=0.8, <=0.15.2 -torch-fidelity <=0.4.0 # bumping to alow install version from master, now used in testing +torch-fidelity <=0.4.0 # bumping to allow install version from master, now used in testing lpips <=0.1.4 diff --git a/src/torchmetrics/__about__.py b/src/torchmetrics/__about__.py index 83ee50887f4..cef6654df87 100644 --- a/src/torchmetrics/__about__.py +++ b/src/torchmetrics/__about__.py @@ -12,7 +12,7 @@ Pytorch Lightning, but got split off so users could take advantage of the large collection of metrics implemented without having to install Pytorch Lightning (even though we would love for you to try it out). We currently have around 100+ metrics implemented and we continuously are adding more metrics, both within -already covered domains (classification, regression ect.) but also new domains (object detection ect.). +already covered domains (classification, regression etc.) but also new domains (object detection etc.). We make sure that all our metrics are rigorously tested such that you can trust them. """ diff --git a/src/torchmetrics/aggregation.py b/src/torchmetrics/aggregation.py index 2e90d7dfff6..15bc09ef33b 100644 --- a/src/torchmetrics/aggregation.py +++ b/src/torchmetrics/aggregation.py @@ -34,10 +34,10 @@ class BaseAggregator(Metric): fn: string specifying the reduction function default_value: default tensor value to use for the metric state nan_strategy: options: - - ``'error'``: if any `nan` values are encounted will give a RuntimeError - - ``'warn'``: if any `nan` values are encounted will give a warning and continue + - ``'error'``: if any `nan` values are encountered will give a RuntimeError + - ``'warn'``: if any `nan` values are encountered will give a warning and continue - ``'ignore'``: all `nan` values are silently removed - - a float: if a float is provided will impude any `nan` values with this value + - a float: if a float is provided will impute any `nan` values with this value state_name: name of the metric state kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. @@ -89,10 +89,10 @@ def _cast_and_nan_check_input( weight = torch.ones_like(x) if nans.any() or nans_weight.any(): if self.nan_strategy == "error": - raise RuntimeError("Encounted `nan` values in tensor") + raise RuntimeError("Encountered `nan` values in tensor") if self.nan_strategy in ("ignore", "warn"): if self.nan_strategy == "warn": - rank_zero_warn("Encounted `nan` values in tensor. Will be removed.", UserWarning) + rank_zero_warn("Encountered `nan` values in tensor. Will be removed.", UserWarning) x = x[~(nans | nans_weight)] weight = weight[~(nans | nans_weight)] else: @@ -117,7 +117,7 @@ class MaxMetric(BaseAggregator): As input to ``forward`` and ``update`` the metric accepts the following input - ``value`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float values with - arbitary shape ``(...,)``. + arbitrary shape ``(...,)``. As output of `forward` and `compute` the metric returns the following output @@ -125,10 +125,10 @@ class MaxMetric(BaseAggregator): Args: nan_strategy: options: - - ``'error'``: if any `nan` values are encounted will give a RuntimeError - - ``'warn'``: if any `nan` values are encounted will give a warning and continue + - ``'error'``: if any `nan` values are encountered will give a RuntimeError + - ``'warn'``: if any `nan` values are encountered will give a warning and continue - ``'ignore'``: all `nan` values are silently removed - - a float: if a float is provided will impude any `nan` values with this value + - a float: if a float is provided will impute any `nan` values with this value kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. @@ -222,7 +222,7 @@ class MinMetric(BaseAggregator): As input to ``forward`` and ``update`` the metric accepts the following input - ``value`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float values with - arbitary shape ``(...,)``. + arbitrary shape ``(...,)``. As output of `forward` and `compute` the metric returns the following output @@ -230,10 +230,10 @@ class MinMetric(BaseAggregator): Args: nan_strategy: options: - - ``'error'``: if any `nan` values are encounted will give a RuntimeError - - ``'warn'``: if any `nan` values are encounted will give a warning and continue + - ``'error'``: if any `nan` values are encountered will give a RuntimeError + - ``'warn'``: if any `nan` values are encountered will give a warning and continue - ``'ignore'``: all `nan` values are silently removed - - a float: if a float is provided will impude any `nan` values with this value + - a float: if a float is provided will impute any `nan` values with this value kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. @@ -327,7 +327,7 @@ class SumMetric(BaseAggregator): As input to ``forward`` and ``update`` the metric accepts the following input - ``value`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float values with - arbitary shape ``(...,)``. + arbitrary shape ``(...,)``. As output of `forward` and `compute` the metric returns the following output @@ -335,10 +335,10 @@ class SumMetric(BaseAggregator): Args: nan_strategy: options: - - ``'error'``: if any `nan` values are encounted will give a RuntimeError - - ``'warn'``: if any `nan` values are encounted will give a warning and continue + - ``'error'``: if any `nan` values are encountered will give a RuntimeError + - ``'warn'``: if any `nan` values are encountered will give a warning and continue - ``'ignore'``: all `nan` values are silently removed - - a float: if a float is provided will impude any `nan` values with this value + - a float: if a float is provided will impute any `nan` values with this value kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. @@ -432,7 +432,7 @@ class CatMetric(BaseAggregator): As input to ``forward`` and ``update`` the metric accepts the following input - ``value`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float values with - arbitary shape ``(...,)``. + arbitrary shape ``(...,)``. As output of `forward` and `compute` the metric returns the following output @@ -440,10 +440,10 @@ class CatMetric(BaseAggregator): Args: nan_strategy: options: - - ``'error'``: if any `nan` values are encounted will give a RuntimeError - - ``'warn'``: if any `nan` values are encounted will give a warning and continue + - ``'error'``: if any `nan` values are encountered will give a RuntimeError + - ``'warn'``: if any `nan` values are encountered will give a warning and continue - ``'ignore'``: all `nan` values are silently removed - - a float: if a float is provided will impude any `nan` values with this value + - a float: if a float is provided will impute any `nan` values with this value kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. @@ -496,9 +496,9 @@ class MeanMetric(BaseAggregator): As input to ``forward`` and ``update`` the metric accepts the following input - ``value`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float values with - arbitary shape ``(...,)``. + arbitrary shape ``(...,)``. - ``weight`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float value with - arbitary shape ``(...,)``. Needs to be broadcastable with the shape of ``value`` tensor. + arbitrary shape ``(...,)``. Needs to be broadcastable with the shape of ``value`` tensor. As output of `forward` and `compute` the metric returns the following output @@ -506,10 +506,10 @@ class MeanMetric(BaseAggregator): Args: nan_strategy: options: - - ``'error'``: if any `nan` values are encounted will give a RuntimeError - - ``'warn'``: if any `nan` values are encounted will give a warning and continue + - ``'error'``: if any `nan` values are encountered will give a RuntimeError + - ``'warn'``: if any `nan` values are encountered will give a warning and continue - ``'ignore'``: all `nan` values are silently removed - - a float: if a float is provided will impude any `nan` values with this value + - a float: if a float is provided will impute any `nan` values with this value kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. @@ -623,7 +623,7 @@ class RunningMean(Running): As input to ``forward`` and ``update`` the metric accepts the following input - ``value`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float values with - arbitary shape ``(...,)``. + arbitrary shape ``(...,)``. As output of `forward` and `compute` the metric returns the following output @@ -632,10 +632,10 @@ class RunningMean(Running): Args: window: The size of the running window. nan_strategy: options: - - ``'error'``: if any `nan` values are encounted will give a RuntimeError - - ``'warn'``: if any `nan` values are encounted will give a warning and continue + - ``'error'``: if any `nan` values are encountered will give a RuntimeError + - ``'warn'``: if any `nan` values are encountered will give a warning and continue - ``'ignore'``: all `nan` values are silently removed - - a float: if a float is provided will impude any `nan` values with this value + - a float: if a float is provided will impute any `nan` values with this value kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. @@ -680,7 +680,7 @@ class RunningSum(Running): As input to ``forward`` and ``update`` the metric accepts the following input - ``value`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float values with - arbitary shape ``(...,)``. + arbitrary shape ``(...,)``. As output of `forward` and `compute` the metric returns the following output @@ -689,10 +689,10 @@ class RunningSum(Running): Args: window: The size of the running window. nan_strategy: options: - - ``'error'``: if any `nan` values are encounted will give a RuntimeError - - ``'warn'``: if any `nan` values are encounted will give a warning and continue + - ``'error'``: if any `nan` values are encountered will give a RuntimeError + - ``'warn'``: if any `nan` values are encountered will give a warning and continue - ``'ignore'``: all `nan` values are silently removed - - a float: if a float is provided will impude any `nan` values with this value + - a float: if a float is provided will impute any `nan` values with this value kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. diff --git a/src/torchmetrics/audio/pesq.py b/src/torchmetrics/audio/pesq.py index 534a30f203b..8a5554a61df 100644 --- a/src/torchmetrics/audio/pesq.py +++ b/src/torchmetrics/audio/pesq.py @@ -30,7 +30,7 @@ class PerceptualEvaluationSpeechQuality(Metric): """Calculate `Perceptual Evaluation of Speech Quality`_ (PESQ). It's a recognized industry standard for audio quality that takes into considerations characteristics such as: - audio sharpness, call volume, background noise, clipping, audio interference ect. PESQ returns a score between + audio sharpness, call volume, background noise, clipping, audio interference etc. PESQ returns a score between -0.5 and 4.5 with the higher scores indicating a better quality. This metric is a wrapper for the `pesq package`_. Note that input will be moved to ``cpu`` to perform the metric @@ -54,7 +54,7 @@ class PerceptualEvaluationSpeechQuality(Metric): fs: sampling frequency, should be 16000 or 8000 (Hz) mode: ``'wb'`` (wide-band) or ``'nb'`` (narrow-band) keep_same_device: whether to move the pesq value to the device of preds - n_processes: integer specifiying the number of processes to run in parallel for the metric calculation. + n_processes: integer specifying the number of processes to run in parallel for the metric calculation. Only applies to batches of data and if ``multiprocessing`` package is installed. kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. diff --git a/src/torchmetrics/classification/accuracy.py b/src/torchmetrics/classification/accuracy.py index 8deb4fe5544..8f6278f4a39 100644 --- a/src/torchmetrics/classification/accuracy.py +++ b/src/torchmetrics/classification/accuracy.py @@ -40,7 +40,7 @@ class BinaryAccuracy(BinaryStatScores): - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, ...)``. If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid - per element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` As output to ``forward`` and ``compute`` the metric returns the following output: @@ -177,7 +177,7 @@ class MulticlassAccuracy(MulticlassStatScores): - If ``average=None/'none'``, the shape will be ``(N, C)`` Args: - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes average: Defines the reduction that is applied over labels. Should be one of the following: @@ -307,7 +307,7 @@ class MultilabelAccuracy(MultilabelStatScores): - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, C, ...)``. If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per - element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)`` As output to ``forward`` and ``compute`` the metric returns the following output: @@ -326,7 +326,7 @@ class MultilabelAccuracy(MultilabelStatScores): - If ``average=None/'none'``, the shape will be ``(N, C)`` Args: - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels threshold: Threshold for transforming probability to binary (0,1) predictions average: Defines the reduction that is applied over labels. Should be one of the following: diff --git a/src/torchmetrics/classification/auroc.py b/src/torchmetrics/classification/auroc.py index 950f8f34f9d..e8973ca226b 100644 --- a/src/torchmetrics/classification/auroc.py +++ b/src/torchmetrics/classification/auroc.py @@ -173,7 +173,7 @@ class MulticlassAUROC(MulticlassPrecisionRecallCurve): corresponds to random guessing. For multiclass the metric is calculated by iteratively treating each class as the positive class and all other - classes as the negative, which is refered to as the one-vs-rest approach. One-vs-one is currently not supported by + classes as the negative, which is referred to as the one-vs-rest approach. One-vs-one is currently not supported by this metric. By default the reported metric is then the average over all classes, but this behavior can be changed by setting the ``average`` argument. @@ -199,7 +199,7 @@ class MulticlassAUROC(MulticlassPrecisionRecallCurve): size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory). Args: - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes average: Defines the reduction that is applied over classes. Should be one of the following: @@ -346,7 +346,7 @@ class MultilabelAUROC(MultilabelPrecisionRecallCurve): size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory). Args: - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels average: Defines the reduction that is applied over labels. Should be one of the following: diff --git a/src/torchmetrics/classification/average_precision.py b/src/torchmetrics/classification/average_precision.py index e579cfde173..ae26d212a51 100644 --- a/src/torchmetrics/classification/average_precision.py +++ b/src/torchmetrics/classification/average_precision.py @@ -172,7 +172,7 @@ class MulticlassAveragePrecision(MulticlassPrecisionRecallCurve): equivalent to the area under the precision-recall curve (AUPRC). For multiclass the metric is calculated by iteratively treating each class as the positive class and all other - classes as the negative, which is refered to as the one-vs-rest approach. One-vs-one is currently not supported by + classes as the negative, which is referred to as the one-vs-rest approach. One-vs-one is currently not supported by this metric. By default the reported metric is then the average over all classes, but this behavior can be changed by setting the ``average`` argument. @@ -198,7 +198,7 @@ class MulticlassAveragePrecision(MulticlassPrecisionRecallCurve): size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory). Args: - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes average: Defines the reduction that is applied over classes. Should be one of the following: @@ -349,7 +349,7 @@ class MultilabelAveragePrecision(MultilabelPrecisionRecallCurve): size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory). Args: - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels average: Defines the reduction that is applied over labels. Should be one of the following: diff --git a/src/torchmetrics/classification/calibration_error.py b/src/torchmetrics/classification/calibration_error.py index 26a98f7ea90..d939bf99daa 100644 --- a/src/torchmetrics/classification/calibration_error.py +++ b/src/torchmetrics/classification/calibration_error.py @@ -221,7 +221,7 @@ class MulticlassCalibrationError(Metric): - ``mcce`` (:class:`~torch.Tensor`): A scalar tensor containing the calibration error Args: - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes n_bins: Number of bins to use when computing the metric. norm: Norm used to compare empirical and expected probability bins. ignore_index: diff --git a/src/torchmetrics/classification/cohen_kappa.py b/src/torchmetrics/classification/cohen_kappa.py index fb73f8cf408..213fa261ea3 100644 --- a/src/torchmetrics/classification/cohen_kappa.py +++ b/src/torchmetrics/classification/cohen_kappa.py @@ -47,7 +47,7 @@ class labels. - ``preds`` (:class:`~torch.Tensor`): A int or float tensor of shape ``(N, ...)``. If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per element. - Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. .. note:: @@ -182,7 +182,7 @@ class labels. - ``mcck`` (:class:`~torch.Tensor`): A tensor containing cohen kappa score Args: - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes ignore_index: Specifies a target value that is ignored and does not contribute to the metric calculation weights: Weighting type to calculate the score. Choose from: diff --git a/src/torchmetrics/classification/confusion_matrix.py b/src/torchmetrics/classification/confusion_matrix.py index 42f58c2a72f..b01007cfd94 100644 --- a/src/torchmetrics/classification/confusion_matrix.py +++ b/src/torchmetrics/classification/confusion_matrix.py @@ -66,7 +66,7 @@ class BinaryConfusionMatrix(Metric): - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, ...)``. If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per - element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. As output to ``forward`` and ``compute`` the metric returns the following output: @@ -202,7 +202,7 @@ class MulticlassConfusionMatrix(Metric): - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, ...)``. If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per - element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. As output to ``forward`` and ``compute`` the metric returns the following output: @@ -210,7 +210,7 @@ class MulticlassConfusionMatrix(Metric): - ``confusion_matrix``: [num_classes, num_classes] matrix Args: - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes ignore_index: Specifies a target value that is ignored and does not contribute to the metric calculation normalize: Normalization mode for confusion matrix. Choose from: @@ -342,7 +342,7 @@ class MultilabelConfusionMatrix(Metric): As input to 'update' the metric accepts the following input: - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, C, ...)`` @@ -351,7 +351,7 @@ class MultilabelConfusionMatrix(Metric): - ``confusion matrix``: [num_labels,2,2] matrix Args: - num_classes: Integer specifing the number of labels + num_classes: Integer specifying the number of labels threshold: Threshold for transforming probability to binary (0,1) predictions ignore_index: Specifies a target value that is ignored and does not contribute to the metric calculation diff --git a/src/torchmetrics/classification/dice.py b/src/torchmetrics/classification/dice.py index 650d0824ae6..98317134859 100644 --- a/src/torchmetrics/classification/dice.py +++ b/src/torchmetrics/classification/dice.py @@ -232,7 +232,7 @@ def update(self, preds: Tensor, target: Tensor) -> None: @no_type_check def _get_final_stats(self) -> Tuple[Tensor, Tensor, Tensor, Tensor]: - """Perform concatenation on the stat scores if neccesary, before passing them to a compute function.""" + """Perform concatenation on the stat scores if necessary, before passing them to a compute function.""" tp = torch.cat(self.tp) if isinstance(self.tp, list) else self.tp fp = torch.cat(self.fp) if isinstance(self.fp, list) else self.fp tn = torch.cat(self.tn) if isinstance(self.tn, list) else self.tn diff --git a/src/torchmetrics/classification/exact_match.py b/src/torchmetrics/classification/exact_match.py index 3d9bf724ccc..481441a820c 100644 --- a/src/torchmetrics/classification/exact_match.py +++ b/src/torchmetrics/classification/exact_match.py @@ -63,7 +63,7 @@ class MulticlassExactMatch(Metric): - If ``multidim_average`` is set to ``samplewise`` the output will be a tensor of shape ``(N,)`` Args: - num_classes: Integer specifing the number of labels + num_classes: Integer specifying the number of labels multidim_average: Defines how additionally dimensions ``...`` should be handled. Should be one of the following: @@ -203,7 +203,7 @@ class MultilabelExactMatch(Metric): - ``preds`` (:class:`~torch.Tensor`): An int tensor or float tensor of shape ``(N, C, ..)``. If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and will auto apply - sigmoid per element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``. @@ -215,7 +215,7 @@ class MultilabelExactMatch(Metric): - If ``multidim_average`` is set to ``samplewise`` the output will be a tensor of shape ``(N,)`` Args: - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels threshold: Threshold for transforming probability to binary (0,1) predictions multidim_average: Defines how additionally dimensions ``...`` should be handled. Should be one of the following: diff --git a/src/torchmetrics/classification/f_beta.py b/src/torchmetrics/classification/f_beta.py index 1be071a97b4..16f8a1408b4 100644 --- a/src/torchmetrics/classification/f_beta.py +++ b/src/torchmetrics/classification/f_beta.py @@ -55,7 +55,7 @@ class BinaryFBetaScore(BinaryStatScores): - ``preds`` (:class:`~torch.Tensor`): An int tensor or float tensor of shape ``(N, ...)``. If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid - per element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. As output to ``forward`` and ``compute`` the metric returns the following output: @@ -220,7 +220,7 @@ class MulticlassFBetaScore(MulticlassStatScores): Args: beta: Weighting between precision and recall in calculation. Setting to 1 corresponds to equal weight - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes average: Defines the reduction that is applied over labels. Should be one of the following: @@ -379,7 +379,7 @@ class MultilabelFBetaScore(MultilabelStatScores): - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, C, ...)``. If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid - per element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``. @@ -400,7 +400,7 @@ class MultilabelFBetaScore(MultilabelStatScores): Args: beta: Weighting between precision and recall in calculation. Setting to 1 corresponds to equal weight - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels threshold: Threshold for transforming probability to binary (0,1) predictions average: Defines the reduction that is applied over labels. Should be one of the following: @@ -555,7 +555,7 @@ class BinaryF1Score(BinaryFBetaScore): - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, ...)``. If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per - element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` As output to ``forward`` and ``compute`` the metric returns the following output: @@ -709,7 +709,7 @@ class MulticlassF1Score(MulticlassFBetaScore): Args: preds: Tensor with predictions target: Tensor with true labels - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes average: Defines the reduction that is applied over labels. Should be one of the following: @@ -857,7 +857,7 @@ class MultilabelF1Score(MultilabelFBetaScore): - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, C, ...)``. If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and - will auto apply sigmoid per element. Addtionally, we convert to int tensor with thresholding using the value + will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``. @@ -877,7 +877,7 @@ class MultilabelF1Score(MultilabelFBetaScore): - If ``average=None/'none'``, the shape will be ``(N, C)``` Args: - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels threshold: Threshold for transforming probability to binary (0,1) predictions average: Defines the reduction that is applied over labels. Should be one of the following: diff --git a/src/torchmetrics/classification/group_fairness.py b/src/torchmetrics/classification/group_fairness.py index 9e46dd36891..4ddd06dc39d 100644 --- a/src/torchmetrics/classification/group_fairness.py +++ b/src/torchmetrics/classification/group_fairness.py @@ -64,7 +64,7 @@ class BinaryGroupStatRates(_AbstractGroupStatScores): Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, ...)``. - ``groups`` (int tensor): ``(N, ...)``. The group identifiers should be ``0, 1, ..., (num_groups - 1)``. @@ -159,7 +159,7 @@ class BinaryFairness(_AbstractGroupStatScores): Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``groups`` (int tensor): ``(N, ...)``. The group identifiers should be ``0, 1, ..., (num_groups - 1)``. - ``target`` (int tensor): ``(N, ...)``. diff --git a/src/torchmetrics/classification/hamming.py b/src/torchmetrics/classification/hamming.py index db32bdca389..dd577d92b76 100644 --- a/src/torchmetrics/classification/hamming.py +++ b/src/torchmetrics/classification/hamming.py @@ -46,7 +46,7 @@ class BinaryHammingDistance(BinaryStatScores): - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, ...)``. If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per - element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. @@ -188,7 +188,7 @@ class MulticlassHammingDistance(MulticlassStatScores): - If ``average=None/'none'``, the shape will be ``(N, C)`` Args: - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes average: Defines the reduction that is applied over labels. Should be one of the following: @@ -320,7 +320,7 @@ class MultilabelHammingDistance(MultilabelStatScores): - ``preds`` (:class:`~torch.Tensor`): An int tensor or float tensor of shape ``(N, C, ...)``. If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and will auto - apply sigmoid per element. Addtionally, we convert to int tensor with thresholding using the value in + apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``. @@ -341,7 +341,7 @@ class MultilabelHammingDistance(MultilabelStatScores): - If ``average=None/'none'``, the shape will be ``(N, C)`` Args: - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels threshold: Threshold for transforming probability to binary (0,1) predictions average: Defines the reduction that is applied over labels. Should be one of the following: diff --git a/src/torchmetrics/classification/hinge.py b/src/torchmetrics/classification/hinge.py index b6cb059f7f4..0530fd60c81 100644 --- a/src/torchmetrics/classification/hinge.py +++ b/src/torchmetrics/classification/hinge.py @@ -196,7 +196,7 @@ class MulticlassHingeLoss(Metric): - ``mchl`` (:class:`~torch.Tensor`): A tensor containing the multi-class hinge loss. Args: - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes squared: If True, this will compute the squared hinge loss. Otherwise, computes the regular hinge loss. multiclass_mode: diff --git a/src/torchmetrics/classification/jaccard.py b/src/torchmetrics/classification/jaccard.py index c82f011edb2..a4f07febb34 100644 --- a/src/torchmetrics/classification/jaccard.py +++ b/src/torchmetrics/classification/jaccard.py @@ -49,7 +49,7 @@ class BinaryJaccardIndex(BinaryConfusionMatrix): - ``preds`` (:class:`~torch.Tensor`): A int or float tensor of shape ``(N, ...)``. If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per element. - Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. .. note:: @@ -173,7 +173,7 @@ class MulticlassJaccardIndex(MulticlassConfusionMatrix): - ``mcji`` (:class:`~torch.Tensor`): A tensor containing the Multi-class Jaccard Index. Args: - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes ignore_index: Specifies a target value that is ignored and does not contribute to the metric calculation average: @@ -292,7 +292,7 @@ class MultilabelJaccardIndex(MultilabelConfusionMatrix): - ``preds`` (:class:`~torch.Tensor`): A int tensor or float tensor of shape ``(N, C, ...)``. If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and will auto apply - sigmoid per element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)`` .. note:: @@ -303,7 +303,7 @@ class MultilabelJaccardIndex(MultilabelConfusionMatrix): - ``mlji`` (:class:`~torch.Tensor`): A tensor containing the Multi-label Jaccard Index loss. Args: - num_classes: Integer specifing the number of labels + num_classes: Integer specifying the number of labels threshold: Threshold for transforming probability to binary (0,1) predictions ignore_index: Specifies a target value that is ignored and does not contribute to the metric calculation diff --git a/src/torchmetrics/classification/matthews_corrcoef.py b/src/torchmetrics/classification/matthews_corrcoef.py index 6a030be950f..fff62187377 100644 --- a/src/torchmetrics/classification/matthews_corrcoef.py +++ b/src/torchmetrics/classification/matthews_corrcoef.py @@ -45,7 +45,7 @@ class BinaryMatthewsCorrCoef(BinaryConfusionMatrix): - ``preds`` (:class:`~torch.Tensor`): A int tensor or float tensor of shape ``(N, ...)``. If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid - per element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` .. note:: @@ -164,7 +164,7 @@ class MulticlassMatthewsCorrCoef(MulticlassConfusionMatrix): - ``mcmcc`` (:class:`~torch.Tensor`): A tensor containing the Multi-class Matthews Correlation Coefficient. Args: - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes ignore_index: Specifies a target value that is ignored and does not contribute to the metric calculation validate_args: bool indicating if input arguments and tensors should be validated for correctness. @@ -265,7 +265,7 @@ class MultilabelMatthewsCorrCoef(MultilabelConfusionMatrix): - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, C, ...)``. If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid - per element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)`` .. note:: @@ -276,7 +276,7 @@ class MultilabelMatthewsCorrCoef(MultilabelConfusionMatrix): - ``mlmcc`` (:class:`~torch.Tensor`): A tensor containing the Multi-label Matthews Correlation Coefficient. Args: - num_classes: Integer specifing the number of labels + num_classes: Integer specifying the number of labels threshold: Threshold for transforming probability to binary (0,1) predictions ignore_index: Specifies a target value that is ignored and does not contribute to the metric calculation diff --git a/src/torchmetrics/classification/precision_fixed_recall.py b/src/torchmetrics/classification/precision_fixed_recall.py index 039392f3a1b..ebaed8896fd 100644 --- a/src/torchmetrics/classification/precision_fixed_recall.py +++ b/src/torchmetrics/classification/precision_fixed_recall.py @@ -210,7 +210,7 @@ class MulticlassPrecisionAtFixedRecall(MulticlassPrecisionRecallCurve): size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory). Args: - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes min_recall: float value specifying minimum recall threshold. thresholds: Can be one of: @@ -354,7 +354,7 @@ class MultilabelPrecisionAtFixedRecall(MultilabelPrecisionRecallCurve): size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory). Args: - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels min_recall: float value specifying minimum recall threshold. thresholds: Can be one of: diff --git a/src/torchmetrics/classification/precision_recall.py b/src/torchmetrics/classification/precision_recall.py index 37adf6fcbb8..d3530b4c769 100644 --- a/src/torchmetrics/classification/precision_recall.py +++ b/src/torchmetrics/classification/precision_recall.py @@ -48,7 +48,7 @@ class BinaryPrecision(BinaryStatScores): - ``preds`` (:class:`~torch.Tensor`): A int or float tensor of shape ``(N, ...)``. If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per - element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. As output to ``forward`` and ``compute`` the metric returns the following output: @@ -188,7 +188,7 @@ class MulticlassPrecision(MulticlassStatScores): - If ``average=None/'none'``, the shape will be ``(N, C)`` Args: - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes average: Defines the reduction that is applied over labels. Should be one of the following: @@ -321,7 +321,7 @@ class MultilabelPrecision(MultilabelStatScores): - ``preds`` (:class:`~torch.Tensor`): An int tensor or float tensor of shape ``(N, C, ...)``. If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and - will auto apply sigmoid per element. Addtionally, we convert to int tensor with thresholding using the value + will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``. @@ -341,7 +341,7 @@ class MultilabelPrecision(MultilabelStatScores): - If ``average=None/'none'``, the shape will be ``(N, C)`` Args: - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels threshold: Threshold for transforming probability to binary (0,1) predictions average: Defines the reduction that is applied over labels. Should be one of the following: @@ -470,7 +470,7 @@ class BinaryRecall(BinaryStatScores): - ``preds`` (:class:`~torch.Tensor`): An int tensor or float tensor of shape ``(N, ...)``. If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and will auto apply - sigmoid per element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` As output to ``forward`` and ``compute`` the metric returns the following output: @@ -609,7 +609,7 @@ class MulticlassRecall(MulticlassStatScores): - If ``average=None/'none'``, the shape will be ``(N, C)`` Args: - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes average: Defines the reduction that is applied over labels. Should be one of the following: @@ -742,7 +742,7 @@ class MultilabelRecall(MultilabelStatScores): - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, C, ...)``. If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid - per element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)`` As output to ``forward`` and ``compute`` the metric returns the following output: @@ -761,7 +761,7 @@ class MultilabelRecall(MultilabelStatScores): - If ``average=None/'none'``, the shape will be ``(N, C)`` Args: - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels threshold: Threshold for transforming probability to binary (0,1) predictions average: Defines the reduction that is applied over labels. Should be one of the following: diff --git a/src/torchmetrics/classification/precision_recall_curve.py b/src/torchmetrics/classification/precision_recall_curve.py index 0bbd00fa05f..25f4025f52f 100644 --- a/src/torchmetrics/classification/precision_recall_curve.py +++ b/src/torchmetrics/classification/precision_recall_curve.py @@ -225,7 +225,7 @@ class MulticlassPrecisionRecallCurve(Metric): tradeoff between the two values can been seen. For multiclass the metric is calculated by iteratively treating each class as the positive class and all other - classes as the negative, which is refered to as the one-vs-rest approach. One-vs-one is currently not supported by + classes as the negative, which is referred to as the one-vs-rest approach. One-vs-one is currently not supported by this metric. As input to ``forward`` and ``update`` the metric accepts the following input: @@ -254,7 +254,7 @@ class MulticlassPrecisionRecallCurve(Metric): size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory). Args: - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes thresholds: Can be one of: @@ -444,7 +444,7 @@ class MultilabelPrecisionRecallCurve(Metric): Args: preds: Tensor with predictions target: Tensor with true labels - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels thresholds: Can be one of: diff --git a/src/torchmetrics/classification/ranking.py b/src/torchmetrics/classification/ranking.py index 2a104641636..dd58022197b 100644 --- a/src/torchmetrics/classification/ranking.py +++ b/src/torchmetrics/classification/ranking.py @@ -59,7 +59,7 @@ class MultilabelCoverageError(Metric): - ``mlce`` (:class:`~torch.Tensor`): A tensor containing the multilabel coverage error. Args: - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels ignore_index: Specifies a target value that is ignored and does not contribute to the metric calculation validate_args: bool indicating if input arguments and tensors should be validated for correctness. @@ -179,7 +179,7 @@ class MultilabelRankingAveragePrecision(Metric): - ``mlrap`` (:class:`~torch.Tensor`): A tensor containing the multilabel ranking average precision. Args: - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels ignore_index: Specifies a target value that is ignored and does not contribute to the metric calculation validate_args: bool indicating if input arguments and tensors should be validated for correctness. @@ -301,7 +301,7 @@ class MultilabelRankingLoss(Metric): Args: preds: Tensor with predictions target: Tensor with true labels - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels ignore_index: Specifies a target value that is ignored and does not contribute to the metric calculation validate_args: bool indicating if input arguments and tensors should be validated for correctness. diff --git a/src/torchmetrics/classification/recall_fixed_precision.py b/src/torchmetrics/classification/recall_fixed_precision.py index 6f00df004af..bfbf5d68cd9 100644 --- a/src/torchmetrics/classification/recall_fixed_precision.py +++ b/src/torchmetrics/classification/recall_fixed_precision.py @@ -181,7 +181,7 @@ class MulticlassRecallAtFixedPrecision(MulticlassPrecisionRecallCurve): a given precision level. For multiclass the metric is calculated by iteratively treating each class as the positive class and all other - classes as the negative, which is refered to as the one-vs-rest approach. One-vs-one is currently not supported by + classes as the negative, which is referred to as the one-vs-rest approach. One-vs-one is currently not supported by this metric. As input to ``forward`` and ``update`` the metric accepts the following input: @@ -211,7 +211,7 @@ class MulticlassRecallAtFixedPrecision(MulticlassPrecisionRecallCurve): size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory). Args: - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes min_precision: float value specifying minimum precision threshold. thresholds: Can be one of: @@ -353,7 +353,7 @@ class MultilabelRecallAtFixedPrecision(MultilabelPrecisionRecallCurve): size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory). Args: - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels min_precision: float value specifying minimum precision threshold. thresholds: Can be one of: diff --git a/src/torchmetrics/classification/roc.py b/src/torchmetrics/classification/roc.py index 7ae73b1fe56..a76c3ebc02a 100644 --- a/src/torchmetrics/classification/roc.py +++ b/src/torchmetrics/classification/roc.py @@ -175,7 +175,7 @@ class MulticlassROC(MulticlassPrecisionRecallCurve): different thresholds, such that the tradeoff between the two values can be seen. For multiclass the metric is calculated by iteratively treating each class as the positive class and all other - classes as the negative, which is refered to as the one-vs-rest approach. One-vs-one is currently not supported by + classes as the negative, which is referred to as the one-vs-rest approach. One-vs-one is currently not supported by this metric. As input to ``forward`` and ``update`` the metric accepts the following input: @@ -217,7 +217,7 @@ class MulticlassROC(MulticlassPrecisionRecallCurve): and tpr which are sorted in reversed order during their calculation, such that they are monotome increasing. Args: - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes thresholds: Can be one of: @@ -369,7 +369,7 @@ class MultilabelROC(MultilabelPrecisionRecallCurve): which are sorted in reversed order during their calculation, such that they are monotome increasing. Args: - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels thresholds: Can be one of: diff --git a/src/torchmetrics/classification/specificity.py b/src/torchmetrics/classification/specificity.py index 5a86fff125f..31d736881cf 100644 --- a/src/torchmetrics/classification/specificity.py +++ b/src/torchmetrics/classification/specificity.py @@ -41,7 +41,7 @@ class BinarySpecificity(BinaryStatScores): - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, ...)``. If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per - element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` As output to ``forward`` and ``compute`` the metric returns the following output: @@ -175,7 +175,7 @@ class MulticlassSpecificity(MulticlassStatScores): - If ``average=None/'none'``, the shape will be ``(N, C)`` Args: - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes average: Defines the reduction that is applied over labels. Should be one of the following: @@ -304,7 +304,7 @@ class MultilabelSpecificity(MultilabelStatScores): - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, C, ...)``. If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid - per element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)`` @@ -324,7 +324,7 @@ class MultilabelSpecificity(MultilabelStatScores): - If ``average=None/'none'``, the shape will be ``(N, C)`` Args: - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels threshold: Threshold for transforming probability to binary (0,1) predictions average: Defines the reduction that is applied over labels. Should be one of the following: diff --git a/src/torchmetrics/classification/specificity_sensitivity.py b/src/torchmetrics/classification/specificity_sensitivity.py index 42429bf90fd..c199f4dc7ad 100644 --- a/src/torchmetrics/classification/specificity_sensitivity.py +++ b/src/torchmetrics/classification/specificity_sensitivity.py @@ -44,7 +44,7 @@ class BinarySpecificityAtSensitivity(BinaryPrecisionRecallCurve): - r"""Compute the higest possible specificity value given the minimum sensitivity thresholds provided. + r"""Compute the highest possible specificity value given the minimum sensitivity thresholds provided. This is done by first calculating the Receiver Operating Characteristic (ROC) curve for different thresholds and the find the specificity for a given sensitivity level. @@ -128,13 +128,13 @@ def compute(self) -> Tuple[Tensor, Tensor]: # type: ignore[override] class MulticlassSpecificityAtSensitivity(MulticlassPrecisionRecallCurve): - r"""Compute the higest possible specificity value given the minimum sensitivity thresholds provided. + r"""Compute the highest possible specificity value given the minimum sensitivity thresholds provided. This is done by first calculating the Receiver Operating Characteristic (ROC) curve for different thresholds and the find the specificity for a given sensitivity level. For multiclass the metric is calculated by iteratively treating each class as the positive class and all other - classes as the negative, which is refered to as the one-vs-rest approach. One-vs-one is currently not supported by + classes as the negative, which is referred to as the one-vs-rest approach. One-vs-one is currently not supported by this metric. Accepts the following input tensors: @@ -154,7 +154,7 @@ class MulticlassSpecificityAtSensitivity(MulticlassPrecisionRecallCurve): size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory). Args: - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes min_sensitivity: float value specifying minimum sensitivity threshold. thresholds: Can be one of: @@ -230,7 +230,7 @@ def compute(self) -> Tuple[Tensor, Tensor]: # type: ignore[override] class MultilabelSpecificityAtSensitivity(MultilabelPrecisionRecallCurve): - r"""Compute the higest possible specificity value given the minimum sensitivity thresholds provided. + r"""Compute the highest possible specificity value given the minimum sensitivity thresholds provided. This is done by first calculating the Receiver Operating Characteristic (ROC) curve for different thresholds and the find the specificity for a given sensitivity level. @@ -252,7 +252,7 @@ class MultilabelSpecificityAtSensitivity(MultilabelPrecisionRecallCurve): size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory). Args: - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels min_sensitivity: float value specifying minimum sensitivity threshold. thresholds: Can be one of: @@ -328,7 +328,7 @@ def compute(self) -> Tuple[Tensor, Tensor]: # type: ignore[override] class SpecificityAtSensitivity(_ClassificationTaskWrapper): - r"""Compute the higest possible specificity value given the minimum sensitivity thresholds provided. + r"""Compute the highest possible specificity value given the minimum sensitivity thresholds provided. This is done by first calculating the Receiver Operating Characteristic (ROC) curve for different thresholds and the find the specificity for a given sensitivity level. diff --git a/src/torchmetrics/classification/stat_scores.py b/src/torchmetrics/classification/stat_scores.py index c35fa452142..7c72725d57a 100644 --- a/src/torchmetrics/classification/stat_scores.py +++ b/src/torchmetrics/classification/stat_scores.py @@ -97,7 +97,7 @@ class BinaryStatScores(_AbstractStatScores): - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, ...)``. If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid - per element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` @@ -216,7 +216,7 @@ class MulticlassStatScores(_AbstractStatScores): - If ``average=None/'none'``, the shape will be ``(N, C, 5)`` Args: - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes average: Defines the reduction that is applied over labels. Should be one of the following: @@ -343,7 +343,7 @@ class MultilabelStatScores(_AbstractStatScores): - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, C, ...)``. If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid - per element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)`` As output to ``forward`` and ``compute`` the metric returns the following output: @@ -360,7 +360,7 @@ class MultilabelStatScores(_AbstractStatScores): - If ``average=None/'none'``, the shape will be ``(N, C, 5)`` Args: - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels threshold: Threshold for transforming probability to binary (0,1) predictions average: Defines the reduction that is applied over labels. Should be one of the following: diff --git a/src/torchmetrics/clustering/adjusted_rand_score.py b/src/torchmetrics/clustering/adjusted_rand_score.py index 870c03eba35..9df67a8d802 100644 --- a/src/torchmetrics/clustering/adjusted_rand_score.py +++ b/src/torchmetrics/clustering/adjusted_rand_score.py @@ -32,7 +32,7 @@ class AdjustedRandScore(Metric): ARS(U, V) = (\text{RS} - \text{Expected RS}) / (\text{Max RS} - \text{Expected RS}) The adjusted rand score :math:`\text{ARS}` is in essence the :math:`\text{RS}` (rand score) adjusted for chance. - The score ensures that completly randomly cluster labels have a score close to zero and only a perfect match will + The score ensures that completely randomly cluster labels have a score close to zero and only a perfect match will have a score of 1 (up to a permutation of the labels). The adjusted rand score is symmetric, therefore swapping :math:`U` and :math:`V` yields the same adjusted rand score. diff --git a/src/torchmetrics/collections.py b/src/torchmetrics/collections.py index bb4eaae4f4a..3a5650b0cae 100644 --- a/src/torchmetrics/collections.py +++ b/src/torchmetrics/collections.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# this is just a bypass for this module name collision with build-in one +# this is just a bypass for this module name collision with built-in one from collections import OrderedDict from copy import deepcopy from typing import Any, Dict, Hashable, Iterable, Iterator, List, Optional, Sequence, Tuple, Union @@ -60,7 +60,7 @@ class name as key for the output dict. this behaviour. Can also be set to a list of lists of metrics for setting the compute groups yourself. .. note:: - The compute groups feature can significatly speedup the calculation of metrics under the right conditions. + The compute groups feature can significantly speedup the calculation of metrics under the right conditions. First, the feature is only available when calling the ``update`` method and not when calling ``forward`` method due to the internal logic of ``forward`` preventing this. Secondly, since we compute groups share metric states by reference, calling ``.items()``, ``.values()`` etc. on the metric collection will break this @@ -68,7 +68,7 @@ class name as key for the output dict. call to ``update``). .. note:: - Metric collections can be nested at initilization (see last example) but the output of the collection will + Metric collections can be nested at initialization (see last example) but the output of the collection will still be a single flatten dictionary combining the prefix and postfix arguments from the nested collection. Raises: @@ -211,7 +211,7 @@ def update(self, *args: Any, **kwargs: Any) -> None: m0 = getattr(self, cg[0]) m0.update(*args, **m0._filter_kwargs(**kwargs)) if self._state_is_copy: - # If we have deep copied state inbetween updates, reestablish link + # If we have deep copied state in between updates, reestablish link self._compute_groups_create_state_ref() self._state_is_copy = False else: # the first update always do per metric to form compute groups @@ -592,11 +592,11 @@ def plot( ax: Either a single instance of matplotlib axis object or an sequence of matplotlib axis objects. If provided, will add the plots to the provided axis objects. If not provided, will create a new. If argument `together` is set to `True`, a single object is expected. If `together` is set to `False`, - the number of axis objects needs to be the same lenght as the number of metrics in the collection. + the number of axis objects needs to be the same length as the number of metrics in the collection. together: If `True`, will plot all metrics in the same axis. If `False`, will plot each metric in a separate Returns: - Either instal tupel of Figure and Axes object or an sequence of tuples with Figure and Axes object for each + Either install tuple of Figure and Axes object or an sequence of tuples with Figure and Axes object for each metric in the collection. Raises: diff --git a/src/torchmetrics/detection/_mean_ap.py b/src/torchmetrics/detection/_mean_ap.py index bd9717ff73a..7ff44165b8c 100644 --- a/src/torchmetrics/detection/_mean_ap.py +++ b/src/torchmetrics/detection/_mean_ap.py @@ -827,13 +827,13 @@ def __calculate_recall_precision_scores( tp_sum = _cumsum(tps, dim=1, dtype=torch.float) fp_sum = _cumsum(fps, dim=1, dtype=torch.float) for idx, (tp, fp) in enumerate(zip(tp_sum, fp_sum)): - nd = len(tp) + tp_len = len(tp) rc = tp / npig pr = tp / (fp + tp + torch.finfo(torch.float64).eps) prec = torch.zeros((num_rec_thrs,)) score = torch.zeros((num_rec_thrs,)) - recall[idx, idx_cls, idx_bbox_area, idx_max_det_thrs] = rc[-1] if nd else 0 + recall[idx, idx_cls, idx_bbox_area, idx_max_det_thrs] = rc[-1] if tp_len else 0 # Remove zigzags for AUC diff_zero = torch.zeros((1,), device=pr.device) @@ -843,7 +843,7 @@ def __calculate_recall_precision_scores( pr += diff inds = torch.searchsorted(rc, rec_thresholds.to(rc.device), right=False) - num_inds = inds.argmax() if inds.max() >= nd else num_rec_thrs + num_inds = inds.argmax() if inds.max() >= tp_len else num_rec_thrs inds = inds[:num_inds] prec[:num_inds] = pr[inds] score[:num_inds] = det_scores_sorted[inds] diff --git a/src/torchmetrics/detection/mean_ap.py b/src/torchmetrics/detection/mean_ap.py index 9e60b2aa867..6cc80a62317 100644 --- a/src/torchmetrics/detection/mean_ap.py +++ b/src/torchmetrics/detection/mean_ap.py @@ -987,7 +987,7 @@ def plot( return self._plot(val, ax) # -------------------- - # specialized syncronization and apply functions for this metric + # specialized synchronization and apply functions for this metric # -------------------- def _apply(self, fn: Callable) -> torch.nn.Module: # type: ignore[override] diff --git a/src/torchmetrics/detection/panoptic_qualities.py b/src/torchmetrics/detection/panoptic_qualities.py index b30dcd00c10..5ad84ddd974 100644 --- a/src/torchmetrics/detection/panoptic_qualities.py +++ b/src/torchmetrics/detection/panoptic_qualities.py @@ -40,7 +40,7 @@ class PanopticQuality(Metric): PQ = \frac{IOU}{TP + 0.5 FP + 0.5 FN} where IOU, TP, FP and FN are respectively the sum of the intersection over union for true positives, - the number of true postitives, false positives and false negatives. This metric is inspired by the PQ + the number of true positives, false positives and false negatives. This metric is inspired by the PQ implementation of panopticapi, a standard implementation for the PQ metric for panoptic segmentation. .. note: diff --git a/src/torchmetrics/functional/audio/pesq.py b/src/torchmetrics/functional/audio/pesq.py index 76ad0782471..d4a4e7086e0 100644 --- a/src/torchmetrics/functional/audio/pesq.py +++ b/src/torchmetrics/functional/audio/pesq.py @@ -38,7 +38,7 @@ def perceptual_evaluation_speech_quality( r"""Calculate `Perceptual Evaluation of Speech Quality`_ (PESQ). It's a recognized industry standard for audio quality that takes into considerations characteristics such as: audio - sharpness, call volume, background noise, clipping, audio interference ect. PESQ returns a score between -0.5 and + sharpness, call volume, background noise, clipping, audio interference etc. PESQ returns a score between -0.5 and 4.5 with the higher scores indicating a better quality. This metric is a wrapper for the `pesq package`_. Note that input will be moved to `cpu` to perform the metric @@ -55,7 +55,7 @@ def perceptual_evaluation_speech_quality( fs: sampling frequency, should be 16000 or 8000 (Hz) mode: ``'wb'`` (wide-band) or ``'nb'`` (narrow-band) keep_same_device: whether to move the pesq value to the device of preds - n_processes: integer specifiying the number of processes to run in parallel for the metric calculation. + n_processes: integer specifying the number of processes to run in parallel for the metric calculation. Only applies to batches of data and if ``multiprocessing`` package is installed. Returns: diff --git a/src/torchmetrics/functional/classification/accuracy.py b/src/torchmetrics/functional/classification/accuracy.py index 842395a4f1e..5413604b7c4 100644 --- a/src/torchmetrics/functional/classification/accuracy.py +++ b/src/torchmetrics/functional/classification/accuracy.py @@ -105,7 +105,7 @@ def binary_accuracy( Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, ...)`` @@ -189,7 +189,7 @@ def multiclass_accuracy( Args: preds: Tensor with predictions target: Tensor with true labels - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes average: Defines the reduction that is applied over labels. Should be one of the following: @@ -290,14 +290,14 @@ def multilabel_accuracy( Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, C, ...)`` Args: preds: Tensor with predictions target: Tensor with true labels - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels threshold: Threshold for transforming probability to binary (0,1) predictions average: Defines the reduction that is applied over labels. Should be one of the following: diff --git a/src/torchmetrics/functional/classification/auroc.py b/src/torchmetrics/functional/classification/auroc.py index 7e431351001..acd94f4050e 100644 --- a/src/torchmetrics/functional/classification/auroc.py +++ b/src/torchmetrics/functional/classification/auroc.py @@ -238,7 +238,7 @@ def multiclass_auroc( Args: preds: Tensor with predictions target: Tensor with true labels - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes average: Defines the reduction that is applied over classes. Should be one of the following: @@ -366,7 +366,7 @@ def multilabel_auroc( Args: preds: Tensor with predictions target: Tensor with true labels - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels average: Defines the reduction that is applied over labels. Should be one of the following: diff --git a/src/torchmetrics/functional/classification/average_precision.py b/src/torchmetrics/functional/classification/average_precision.py index 20973d6a2a5..93002bb6d2b 100644 --- a/src/torchmetrics/functional/classification/average_precision.py +++ b/src/torchmetrics/functional/classification/average_precision.py @@ -215,7 +215,7 @@ def multiclass_average_precision( Args: preds: Tensor with predictions target: Tensor with true labels - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes average: Defines the reduction that is applied over classes. Should be one of the following: @@ -348,7 +348,7 @@ def multilabel_average_precision( Args: preds: Tensor with predictions target: Tensor with true labels - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels average: Defines the reduction that is applied over labels. Should be one of the following: diff --git a/src/torchmetrics/functional/classification/calibration_error.py b/src/torchmetrics/functional/classification/calibration_error.py index 49a5793b073..ebb5eecbe0d 100644 --- a/src/torchmetrics/functional/classification/calibration_error.py +++ b/src/torchmetrics/functional/classification/calibration_error.py @@ -287,7 +287,7 @@ def multiclass_calibration_error( Args: preds: Tensor with predictions target: Tensor with true labels - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes n_bins: Number of bins to use when computing the metric. norm: Norm used to compare empirical and expected probability bins. ignore_index: diff --git a/src/torchmetrics/functional/classification/cohen_kappa.py b/src/torchmetrics/functional/classification/cohen_kappa.py index 7cf63a24e7e..bd103790049 100644 --- a/src/torchmetrics/functional/classification/cohen_kappa.py +++ b/src/torchmetrics/functional/classification/cohen_kappa.py @@ -93,7 +93,7 @@ class labels. Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, ...)`` @@ -186,7 +186,7 @@ class labels. Args: preds: Tensor with predictions target: Tensor with true labels - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes weights: Weighting type to calculate the score. Choose from: - ``None`` or ``'none'``: no weighting diff --git a/src/torchmetrics/functional/classification/confusion_matrix.py b/src/torchmetrics/functional/classification/confusion_matrix.py index 48e7c23af44..c51770ae7d6 100644 --- a/src/torchmetrics/functional/classification/confusion_matrix.py +++ b/src/torchmetrics/functional/classification/confusion_matrix.py @@ -177,7 +177,7 @@ def binary_confusion_matrix( Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, ...)`` @@ -370,7 +370,7 @@ def multiclass_confusion_matrix( Args: preds: Tensor with predictions target: Tensor with true labels - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes normalize: Normalization mode for confusion matrix. Choose from: - ``None`` or ``'none'``: no normalization (default) @@ -551,7 +551,7 @@ def multilabel_confusion_matrix( Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, C, ...)`` @@ -560,7 +560,7 @@ def multilabel_confusion_matrix( Args: preds: Tensor with predictions target: Tensor with true labels - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels threshold: Threshold for transforming probability to binary (0,1) predictions normalize: Normalization mode for confusion matrix. Choose from: diff --git a/src/torchmetrics/functional/classification/exact_match.py b/src/torchmetrics/functional/classification/exact_match.py index 8083d4e44f1..9e6f7c49df7 100644 --- a/src/torchmetrics/functional/classification/exact_match.py +++ b/src/torchmetrics/functional/classification/exact_match.py @@ -77,7 +77,7 @@ def multiclass_exact_match( Args: preds: Tensor with predictions target: Tensor with true labels - num_classes: Integer specifing the number of labels + num_classes: Integer specifying the number of labels multidim_average: Defines how additionally dimensions ``...`` should be handled. Should be one of the following: @@ -151,14 +151,14 @@ def multilabel_exact_match( Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, C, ...)`` Args: preds: Tensor with predictions target: Tensor with true labels - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels threshold: Threshold for transforming probability to binary (0,1) predictions multidim_average: Defines how additionally dimensions ``...`` should be handled. Should be one of the following: diff --git a/src/torchmetrics/functional/classification/f_beta.py b/src/torchmetrics/functional/classification/f_beta.py index c2d4f6af320..0f0e883266c 100644 --- a/src/torchmetrics/functional/classification/f_beta.py +++ b/src/torchmetrics/functional/classification/f_beta.py @@ -86,7 +86,7 @@ def binary_fbeta_score( Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, ...)`` @@ -184,7 +184,7 @@ def multiclass_fbeta_score( preds: Tensor with predictions target: Tensor with true labels beta: Weighting between precision and recall in calculation. Setting to 1 corresponds to equal weight - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes average: Defines the reduction that is applied over labels. Should be one of the following: @@ -296,7 +296,7 @@ def multilabel_fbeta_score( Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, C, ...)`` @@ -304,7 +304,7 @@ def multilabel_fbeta_score( preds: Tensor with predictions target: Tensor with true labels beta: Weighting between precision and recall in calculation. Setting to 1 corresponds to equal weight - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels threshold: Threshold for transforming probability to binary (0,1) predictions average: Defines the reduction that is applied over labels. Should be one of the following: @@ -394,7 +394,7 @@ def binary_f1_score( Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, ...)`` @@ -478,7 +478,7 @@ def multiclass_f1_score( Args: preds: Tensor with predictions target: Tensor with true labels - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes average: Defines the reduction that is applied over labels. Should be one of the following: @@ -578,14 +578,14 @@ def multilabel_f1_score( Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, C, ...)`` Args: preds: Tensor with predictions target: Tensor with true labels - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels threshold: Threshold for transforming probability to binary (0,1) predictions average: Defines the reduction that is applied over labels. Should be one of the following: diff --git a/src/torchmetrics/functional/classification/group_fairness.py b/src/torchmetrics/functional/classification/group_fairness.py index fc4259adf0b..a0c2b967934 100644 --- a/src/torchmetrics/functional/classification/group_fairness.py +++ b/src/torchmetrics/functional/classification/group_fairness.py @@ -41,7 +41,7 @@ def _groups_validation(groups: torch.Tensor, num_groups: int) -> None: f"number of groups {num_groups}. The group identifiers should be ``0, 1, ..., (num_groups - 1)``.", ) if groups.dtype != torch.long: - raise ValueError(f"Excpected dtype of argument groups to be long, not {groups.dtype}.") + raise ValueError(f"Expected dtype of argument groups to be long, not {groups.dtype}.") def _groups_format(groups: torch.Tensor) -> torch.Tensor: @@ -118,7 +118,7 @@ def binary_groups_stat_rates( Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, ...)``. - ``groups`` (int tensor): ``(N, ...)``. The group identifiers should be ``0, 1, ..., (num_groups - 1)``. @@ -195,7 +195,7 @@ def demographic_parity( Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``groups`` (int tensor): ``(N, ...)``. The group identifiers should be ``0, 1, ..., (num_groups - 1)``. - ``target`` (int tensor): ``(N, ...)``. @@ -277,7 +277,7 @@ def equal_opportunity( Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, ...)``. - ``groups`` (int tensor): ``(N, ...)``. The group identifiers should be ``0, 1, ..., (num_groups - 1)``. diff --git a/src/torchmetrics/functional/classification/hamming.py b/src/torchmetrics/functional/classification/hamming.py index 34cef0b4d8b..ed47ce56982 100644 --- a/src/torchmetrics/functional/classification/hamming.py +++ b/src/torchmetrics/functional/classification/hamming.py @@ -103,7 +103,7 @@ def binary_hamming_distance( Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, ...)`` @@ -188,7 +188,7 @@ def multiclass_hamming_distance( Args: preds: Tensor with predictions target: Tensor with true labels - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes average: Defines the reduction that is applied over labels. Should be one of the following: @@ -290,14 +290,14 @@ def multilabel_hamming_distance( Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, C, ...)`` Args: preds: Tensor with predictions target: Tensor with true labels - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels threshold: Threshold for transforming probability to binary (0,1) predictions average: Defines the reduction that is applied over labels. Should be one of the following: diff --git a/src/torchmetrics/functional/classification/hinge.py b/src/torchmetrics/functional/classification/hinge.py index ca205ed6434..c3fe40be105 100644 --- a/src/torchmetrics/functional/classification/hinge.py +++ b/src/torchmetrics/functional/classification/hinge.py @@ -209,7 +209,7 @@ def multiclass_hinge_loss( Args: preds: Tensor with predictions target: Tensor with true labels - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes squared: If True, this will compute the squared hinge loss. Otherwise, computes the regular hinge loss. multiclass_mode: diff --git a/src/torchmetrics/functional/classification/jaccard.py b/src/torchmetrics/functional/classification/jaccard.py index cd7eef941ff..7e928525ad8 100644 --- a/src/torchmetrics/functional/classification/jaccard.py +++ b/src/torchmetrics/functional/classification/jaccard.py @@ -112,7 +112,7 @@ def binary_jaccard_index( Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, ...)`` @@ -191,7 +191,7 @@ def multiclass_jaccard_index( Args: preds: Tensor with predictions target: Tensor with true labels - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes average: Defines the reduction that is applied over labels. Should be one of the following: @@ -265,7 +265,7 @@ def multilabel_jaccard_index( Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, C, ...)`` @@ -274,7 +274,7 @@ def multilabel_jaccard_index( Args: preds: Tensor with predictions target: Tensor with true labels - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels threshold: Threshold for transforming probability to binary (0,1) predictions average: Defines the reduction that is applied over labels. Should be one of the following: diff --git a/src/torchmetrics/functional/classification/matthews_corrcoef.py b/src/torchmetrics/functional/classification/matthews_corrcoef.py index d9aa2fe75a1..544414ee4a8 100644 --- a/src/torchmetrics/functional/classification/matthews_corrcoef.py +++ b/src/torchmetrics/functional/classification/matthews_corrcoef.py @@ -92,7 +92,7 @@ def binary_matthews_corrcoef( Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, ...)`` @@ -155,7 +155,7 @@ def multiclass_matthews_corrcoef( Args: preds: Tensor with predictions target: Tensor with true labels - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes ignore_index: Specifies a target value that is ignored and does not contribute to the metric calculation validate_args: bool indicating if input arguments and tensors should be validated for correctness. @@ -204,7 +204,7 @@ def multilabel_matthews_corrcoef( Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, C, ...)`` @@ -213,7 +213,7 @@ def multilabel_matthews_corrcoef( Args: preds: Tensor with predictions target: Tensor with true labels - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels threshold: Threshold for transforming probability to binary (0,1) predictions ignore_index: Specifies a target value that is ignored and does not contribute to the metric calculation diff --git a/src/torchmetrics/functional/classification/precision_fixed_recall.py b/src/torchmetrics/functional/classification/precision_fixed_recall.py index 8fff734fba7..a708f703dc1 100644 --- a/src/torchmetrics/functional/classification/precision_fixed_recall.py +++ b/src/torchmetrics/functional/classification/precision_fixed_recall.py @@ -168,7 +168,7 @@ def multiclass_precision_at_fixed_recall( Args: preds: Tensor with predictions target: Tensor with true labels - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes min_recall: float value specifying minimum recall threshold. thresholds: Can be one of: @@ -254,7 +254,7 @@ def multilabel_precision_at_fixed_recall( Args: preds: Tensor with predictions target: Tensor with true labels - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels min_recall: float value specifying minimum recall threshold. thresholds: Can be one of: diff --git a/src/torchmetrics/functional/classification/precision_recall.py b/src/torchmetrics/functional/classification/precision_recall.py index 645281526d6..ac94ce35365 100644 --- a/src/torchmetrics/functional/classification/precision_recall.py +++ b/src/torchmetrics/functional/classification/precision_recall.py @@ -75,7 +75,7 @@ def binary_precision( Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, ...)`` @@ -158,7 +158,7 @@ def multiclass_precision( Args: preds: Tensor with predictions target: Tensor with true labels - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes average: Defines the reduction that is applied over labels. Should be one of the following: @@ -258,14 +258,14 @@ def multilabel_precision( Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, C, ...)`` Args: preds: Tensor with predictions target: Tensor with true labels - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels threshold: Threshold for transforming probability to binary (0,1) predictions average: Defines the reduction that is applied over labels. Should be one of the following: @@ -359,7 +359,7 @@ def binary_recall( Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, ...)`` @@ -442,7 +442,7 @@ def multiclass_recall( Args: preds: Tensor with predictions target: Tensor with true labels - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes average: Defines the reduction that is applied over labels. Should be one of the following: @@ -542,14 +542,14 @@ def multilabel_recall( Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, C, ...)`` Args: preds: Tensor with predictions target: Tensor with true labels - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels threshold: Threshold for transforming probability to binary (0,1) predictions average: Defines the reduction that is applied over labels. Should be one of the following: diff --git a/src/torchmetrics/functional/classification/precision_recall_curve.py b/src/torchmetrics/functional/classification/precision_recall_curve.py index 887a7ba2546..c9b576703fe 100644 --- a/src/torchmetrics/functional/classification/precision_recall_curve.py +++ b/src/torchmetrics/functional/classification/precision_recall_curve.py @@ -40,7 +40,7 @@ def _binary_clf_curve( preds: 1d tensor with predictions target: 1d tensor with true values sample_weights: a 1d tensor with a weight per sample - pos_label: interger determining what the positive class in target tensor is + pos_label: integer determining what the positive class in target tensor is Returns: fps: 1d tensor with false positives for different thresholds @@ -578,7 +578,7 @@ def multiclass_precision_recall_curve( Args: preds: Tensor with predictions target: Tensor with true labels - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes thresholds: Can be one of: @@ -812,7 +812,7 @@ def multilabel_precision_recall_curve( Args: preds: Tensor with predictions target: Tensor with true labels - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels thresholds: Can be one of: diff --git a/src/torchmetrics/functional/classification/ranking.py b/src/torchmetrics/functional/classification/ranking.py index c49003d418d..d6130979cd2 100644 --- a/src/torchmetrics/functional/classification/ranking.py +++ b/src/torchmetrics/functional/classification/ranking.py @@ -80,7 +80,7 @@ def multilabel_coverage_error( Args: preds: Tensor with predictions target: Tensor with true labels - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels ignore_index: Specifies a target value that is ignored and does not contribute to the metric calculation validate_args: bool indicating if input arguments and tensors should be validated for correctness. @@ -153,7 +153,7 @@ def multilabel_ranking_average_precision( Args: preds: Tensor with predictions target: Tensor with true labels - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels ignore_index: Specifies a target value that is ignored and does not contribute to the metric calculation validate_args: bool indicating if input arguments and tensors should be validated for correctness. @@ -238,7 +238,7 @@ def multilabel_ranking_loss( Args: preds: Tensor with predictions target: Tensor with true labels - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels ignore_index: Specifies a target value that is ignored and does not contribute to the metric calculation validate_args: bool indicating if input arguments and tensors should be validated for correctness. diff --git a/src/torchmetrics/functional/classification/recall_fixed_precision.py b/src/torchmetrics/functional/classification/recall_fixed_precision.py index 3cac182e519..745a9de2c34 100644 --- a/src/torchmetrics/functional/classification/recall_fixed_precision.py +++ b/src/torchmetrics/functional/classification/recall_fixed_precision.py @@ -235,7 +235,7 @@ def multiclass_recall_at_fixed_precision( Args: preds: Tensor with predictions target: Tensor with true labels - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes min_precision: float value specifying minimum precision threshold. thresholds: Can be one of: @@ -348,7 +348,7 @@ def multilabel_recall_at_fixed_precision( Args: preds: Tensor with predictions target: Tensor with true labels - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels min_precision: float value specifying minimum precision threshold. thresholds: Can be one of: diff --git a/src/torchmetrics/functional/classification/roc.py b/src/torchmetrics/functional/classification/roc.py index 4bdb33199eb..65d2c16dc87 100644 --- a/src/torchmetrics/functional/classification/roc.py +++ b/src/torchmetrics/functional/classification/roc.py @@ -217,7 +217,7 @@ def multiclass_roc( Args: preds: Tensor with predictions target: Tensor with true labels - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes thresholds: Can be one of: @@ -356,7 +356,7 @@ def multilabel_roc( Args: preds: Tensor with predictions target: Tensor with true labels - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels thresholds: Can be one of: diff --git a/src/torchmetrics/functional/classification/specificity.py b/src/torchmetrics/functional/classification/specificity.py index 7408973743c..112a7b96204 100644 --- a/src/torchmetrics/functional/classification/specificity.py +++ b/src/torchmetrics/functional/classification/specificity.py @@ -72,7 +72,7 @@ def binary_specificity( Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, ...)`` @@ -155,7 +155,7 @@ def multiclass_specificity( Args: preds: Tensor with predictions target: Tensor with true labels - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes average: Defines the reduction that is applied over labels. Should be one of the following: @@ -255,14 +255,14 @@ def multilabel_specificity( Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, C, ...)`` Args: preds: Tensor with predictions target: Tensor with true labels - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels threshold: Threshold for transforming probability to binary (0,1) predictions average: Defines the reduction that is applied over labels. Should be one of the following: diff --git a/src/torchmetrics/functional/classification/specificity_sensitivity.py b/src/torchmetrics/functional/classification/specificity_sensitivity.py index 88e21e89775..6851917e1ab 100644 --- a/src/torchmetrics/functional/classification/specificity_sensitivity.py +++ b/src/torchmetrics/functional/classification/specificity_sensitivity.py @@ -101,7 +101,7 @@ def binary_specificity_at_sensitivity( ignore_index: Optional[int] = None, validate_args: bool = True, ) -> Tuple[Tensor, Tensor]: - r"""Compute the higest possible specificity value given the minimum sensitivity levels provided for binary tasks. + r"""Compute the highest possible specificity value given the minimum sensitivity levels provided for binary tasks. This is done by first calculating the Receiver Operating Characteristic (ROC) curve for different thresholds and the find the specificity for a given sensitivity level. @@ -211,7 +211,7 @@ def multiclass_specificity_at_sensitivity( ignore_index: Optional[int] = None, validate_args: bool = True, ) -> Tuple[Tensor, Tensor]: - r"""Compute the higest possible specificity value given the minimum sensitivity level provided for multiclass tasks. + r"""Compute the highest possible specificity value given minimum sensitivity level provided for multiclass tasks. This is done by first calculating the Receiver Operating Characteristic (ROC) curve for different thresholds and the find the specificity for a given sensitivity level. @@ -235,7 +235,7 @@ def multiclass_specificity_at_sensitivity( Args: preds: Tensor with predictions target: Tensor with true labels - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes min_sensitivity: float value specifying minimum sensitivity threshold. thresholds: Can be one of: @@ -328,7 +328,7 @@ def multilabel_specificity_at_sensitivity( ignore_index: Optional[int] = None, validate_args: bool = True, ) -> Tuple[Tensor, Tensor]: - r"""Compute the higest possible specificity value given the minimum sensitivity level provided for multilabel tasks. + r"""Compute the highest possible specificity value given minimum sensitivity level provided for multilabel tasks. This is done by first calculating the Receiver Operating Characteristic (ROC) curve for different thresholds and the find the specificity for a given sensitivity level. @@ -352,7 +352,7 @@ def multilabel_specificity_at_sensitivity( Args: preds: Tensor with predictions target: Tensor with true labels - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels min_sensitivity: float value specifying minimum sensitivity threshold. thresholds: Can be one of: @@ -414,7 +414,7 @@ def specicity_at_sensitivity( ignore_index: Optional[int] = None, validate_args: bool = True, ) -> Union[Tensor, Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: - r"""Compute the higest possible specicity value given the minimum sensitivity thresholds provided. + r"""Compute the highest possible specicity value given the minimum sensitivity thresholds provided. This is done by first calculating the Receiver Operating Characteristic (ROC) curve for different thresholds and the find the specificity for a given sensitivity level. diff --git a/src/torchmetrics/functional/classification/stat_scores.py b/src/torchmetrics/functional/classification/stat_scores.py index 8efbfdf2e1e..5153554253b 100644 --- a/src/torchmetrics/functional/classification/stat_scores.py +++ b/src/torchmetrics/functional/classification/stat_scores.py @@ -56,7 +56,7 @@ def _binary_stat_scores_tensor_validation( - tensors have to be of same shape - all values in target tensor that are not ignored have to be in {0, 1} - if pred tensor is not floating point, then all values also have to be in {0, 1} - - if ``multidim_average`` is set to ``samplewise`` preds tensor needs to be atleast 2 dimensional + - if ``multidim_average`` is set to ``samplewise`` preds tensor needs to be at least 2 dimensional """ # Check that they have same shape @@ -84,7 +84,7 @@ def _binary_stat_scores_tensor_validation( ) if multidim_average != "global" and preds.ndim < 2: - raise ValueError("Expected input to be atleast 2D when multidim_average is set to `samplewise`") + raise ValueError("Expected input to be at least 2D when multidim_average is set to `samplewise`") def _binary_stat_scores_format( @@ -153,7 +153,7 @@ def binary_stat_scores( Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, ...)`` @@ -262,7 +262,7 @@ def _multiclass_stat_scores_tensor_validation( - if preds has one more dimension than target, then all dimensions except for preds.shape[1] should match exactly. preds.shape[1] should have size equal to number of classes - if preds and target have same number of dims, then all dimensions should match - - if ``multidim_average`` is set to ``samplewise`` preds tensor needs to be atleast 2 dimensional in the + - if ``multidim_average`` is set to ``samplewise`` preds tensor needs to be at least 2 dimensional in the int case and 3 dimensional in the float case - all values in target tensor that are not ignored have to be {0, ..., num_classes - 1} - if pred tensor is not floating point, then all values also have to be in {0, ..., num_classes - 1} @@ -284,7 +284,7 @@ def _multiclass_stat_scores_tensor_validation( if multidim_average != "global" and preds.ndim < 3: raise ValueError( "If `preds` have one dimension more than `target`, the shape of `preds` should " - " atleast 3D when multidim_average is set to `samplewise`" + " at least 3D when multidim_average is set to `samplewise`" ) elif preds.ndim == target.ndim: @@ -296,7 +296,7 @@ def _multiclass_stat_scores_tensor_validation( if multidim_average != "global" and preds.ndim < 2: raise ValueError( "When `preds` and `target` have the same shape, the shape of `preds` should " - " atleast 2D when multidim_average is set to `samplewise`" + " at least 2D when multidim_average is set to `samplewise`" ) else: raise ValueError( @@ -357,7 +357,7 @@ def _multiclass_stat_scores_update( - Else we calculate statistics by first calculating the confusion matrix and afterwards deriving the statistics from that - Remove all datapoints that should be ignored. Depending on if ``ignore_index`` is in the set of labels - or outside we have do use different augmentation stategies when one hot encoding. + or outside we have do use different augmentation strategies when one hot encoding. """ if multidim_average == "samplewise" or top_k != 1: @@ -472,7 +472,7 @@ def multiclass_stat_scores( Args: preds: Tensor with predictions target: Tensor with true labels - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes average: Defines the reduction that is applied over labels. Should be one of the following: @@ -607,7 +607,7 @@ def _multilabel_stat_scores_tensor_validation( - the second dimension of both tensors need to be equal to the number of labels - all values in target tensor that are not ignored have to be in {0, 1} - if pred tensor is not floating point, then all values also have to be in {0, 1} - - if ``multidim_average`` is set to ``samplewise`` preds tensor needs to be atleast 3 dimensional + - if ``multidim_average`` is set to ``samplewise`` preds tensor needs to be at least 3 dimensional """ # Check that they have same shape @@ -641,7 +641,7 @@ def _multilabel_stat_scores_tensor_validation( ) if multidim_average != "global" and preds.ndim < 3: - raise ValueError("Expected input to be atleast 3D when multidim_average is set to `samplewise`") + raise ValueError("Expected input to be at least 3D when multidim_average is set to `samplewise`") def _multilabel_stat_scores_format( @@ -725,14 +725,14 @@ def multilabel_stat_scores( Accepts the following input tensors: - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside - [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``. - ``target`` (int tensor): ``(N, C, ...)`` Args: preds: Tensor with predictions target: Tensor with true labels - num_labels: Integer specifing the number of labels + num_labels: Integer specifying the number of labels threshold: Threshold for transforming probability to binary (0,1) predictions average: Defines the reduction that is applied over labels. Should be one of the following: diff --git a/src/torchmetrics/functional/detection/panoptic_qualities.py b/src/torchmetrics/functional/detection/panoptic_qualities.py index 1f4f1236976..be34439f883 100644 --- a/src/torchmetrics/functional/detection/panoptic_qualities.py +++ b/src/torchmetrics/functional/detection/panoptic_qualities.py @@ -39,7 +39,7 @@ def panoptic_quality( PQ = \frac{IOU}{TP + 0.5 FP + 0.5 FN} where IOU, TP, FP and FN are respectively the sum of the intersection over union for true positives, the number of - true postitives, false positives and false negatives. This metric is inspired by the PQ implementation of + true positives, false positives and false negatives. This metric is inspired by the PQ implementation of panopticapi, a standard implementation for the PQ metric for object detection. .. note: diff --git a/src/torchmetrics/functional/image/lpips.py b/src/torchmetrics/functional/image/lpips.py index 594556d014a..d6480b4eea3 100644 --- a/src/torchmetrics/functional/image/lpips.py +++ b/src/torchmetrics/functional/image/lpips.py @@ -181,23 +181,23 @@ def forward(self, x: Tensor) -> NamedTuple: return vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3) -def spatial_average(in_tens: Tensor, keepdim: bool = True) -> Tensor: - """Spatial averaging over heigh and width of images.""" - return in_tens.mean([2, 3], keepdim=keepdim) +def _spatial_average(in_tens: Tensor, keep_dim: bool = True) -> Tensor: + """Spatial averaging over height and width of images.""" + return in_tens.mean([2, 3], keepdim=keep_dim) -def upsam(in_tens: Tensor, out_hw: Tuple[int, ...] = (64, 64)) -> Tensor: +def _upsample(in_tens: Tensor, out_hw: Tuple[int, ...] = (64, 64)) -> Tensor: """Upsample input with bilinear interpolation.""" return nn.Upsample(size=out_hw, mode="bilinear", align_corners=False)(in_tens) -def normalize_tensor(in_feat: Tensor, eps: float = 1e-10) -> Tensor: +def _normalize_tensor(in_feat: Tensor, eps: float = 1e-10) -> Tensor: """Normalize tensors.""" norm_factor = torch.sqrt(torch.sum(in_feat**2, dim=1, keepdim=True)) return in_feat / (norm_factor + eps) -def resize_tensor(x: Tensor, size: int = 64) -> Tensor: +def _resize_tensor(x: Tensor, size: int = 64) -> Tensor: """https://github.com/toshas/torch-fidelity/blob/master/torch_fidelity/sample_similarity_lpips.py#L127C22-L132.""" if x.shape[-1] > size and x.shape[-2] > size: return torch.nn.functional.interpolate(x, (size, size), mode="area") @@ -318,22 +318,22 @@ def forward( # resize input if needed if self.resize is not None: - in0_input = resize_tensor(in0_input, size=self.resize) - in1_input = resize_tensor(in1_input, size=self.resize) + in0_input = _resize_tensor(in0_input, size=self.resize) + in1_input = _resize_tensor(in1_input, size=self.resize) outs0, outs1 = self.net.forward(in0_input), self.net.forward(in1_input) feats0, feats1, diffs = {}, {}, {} for kk in range(self.L): - feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(outs1[kk]) + feats0[kk], feats1[kk] = _normalize_tensor(outs0[kk]), _normalize_tensor(outs1[kk]) diffs[kk] = (feats0[kk] - feats1[kk]) ** 2 res = [] for kk in range(self.L): if self.spatial: - res.append(upsam(self.lins[kk](diffs[kk]), out_hw=tuple(in0.shape[2:]))) + res.append(_upsample(self.lins[kk](diffs[kk]), out_hw=tuple(in0.shape[2:]))) else: - res.append(spatial_average(self.lins[kk](diffs[kk]), keepdim=True)) + res.append(_spatial_average(self.lins[kk](diffs[kk]), keep_dim=True)) val: Tensor = sum(res) # type: ignore[assignment] if retperlayer: @@ -378,7 +378,7 @@ def learned_perceptual_image_patch_similarity( reduction: Literal["sum", "mean"] = "mean", normalize: bool = False, ) -> Tensor: - """The Learned Perceptual Image Patch Similarity (`LPIPS_`) calculates the perceptual similarity between two images. + """The Learned Perceptual Image Patch Similarity (`LPIPS_`) calculates perceptual similarity between two images. LPIPS essentially computes the similarity between the activations of two image patches for some pre-defined network. This measure has been shown to match human perception well. A low LPIPS score means that image patches are diff --git a/src/torchmetrics/functional/image/ssim.py b/src/torchmetrics/functional/image/ssim.py index 1276e640d68..3f7bc7fcb4b 100644 --- a/src/torchmetrics/functional/image/ssim.py +++ b/src/torchmetrics/functional/image/ssim.py @@ -54,7 +54,7 @@ def _ssim_update( return_full_image: bool = False, return_contrast_sensitivity: bool = False, ) -> Union[Tensor, Tuple[Tensor, Tensor]]: - """Compute Structual Similarity Index Measure. + """Compute Structural Similarity Index Measure. Args: preds: estimated image @@ -68,7 +68,7 @@ def _ssim_update( k1: Parameter of SSIM. k2: Parameter of SSIM. return_full_image: If true, the full ``ssim`` image is returned as a second argument. - Mutually exlusive with ``return_contrast_sensitivity`` + Mutually exclusive with ``return_contrast_sensitivity`` return_contrast_sensitivity: If true, the contrast term is returned as a second argument. The luminance term can be obtained with luminance=ssim/contrast Mutually exclusive with ``return_full_image`` @@ -218,7 +218,7 @@ def structural_similarity_index_measure( return_full_image: bool = False, return_contrast_sensitivity: bool = False, ) -> Union[Tensor, Tuple[Tensor, Tensor]]: - """Compute Structual Similarity Index Measure. + """Compute Structural Similarity Index Measure. Args: preds: estimated image @@ -336,7 +336,7 @@ def _multiscale_ssim_update( ), normalize: Optional[Literal["relu", "simple"]] = None, ) -> Tensor: - """Compute Multi-Scale Structual Similarity Index Measure. + """Compute Multi-Scale Structural Similarity Index Measure. Adapted from: https://github.com/jorge-pessoa/pytorch-msssim/blob/master/pytorch_msssim/__init__.py. @@ -457,9 +457,9 @@ def multiscale_structural_similarity_index_measure( betas: Tuple[float, ...] = (0.0448, 0.2856, 0.3001, 0.2363, 0.1333), normalize: Optional[Literal["relu", "simple"]] = "relu", ) -> Tensor: - """Compute `MultiScaleSSIM`_, Multi-scale Structual Similarity Index Measure. + """Compute `MultiScaleSSIM`_, Multi-scale Structural Similarity Index Measure. - This metric is a generalization of Structual Similarity Index Measure by incorporating image details at different + This metric is a generalization of Structural Similarity Index Measure by incorporating image details at different resolution scores. Args: diff --git a/src/torchmetrics/functional/multimodal/clip_iqa.py b/src/torchmetrics/functional/multimodal/clip_iqa.py index fcf1f4e7ae7..4f5a6ccbb5f 100644 --- a/src/torchmetrics/functional/multimodal/clip_iqa.py +++ b/src/torchmetrics/functional/multimodal/clip_iqa.py @@ -53,7 +53,7 @@ def _download_clip() -> None: "new": ("New photo.", "Old photo."), "warm": ("Warm photo.", "Cold photo."), "real": ("Real photo.", "Abstract photo."), - "beutiful": ("Beautiful photo.", "Ugly photo."), + "beautiful": ("Beautiful photo.", "Ugly photo."), "lonely": ("Lonely photo.", "Sociable photo."), "relaxing": ("Relaxing photo.", "Stressful photo."), } @@ -91,9 +91,9 @@ def _clip_iqa_format_prompts(prompts: Tuple[Union[str, Tuple[str, str]]] = ("qua Args: prompts: A string, tuple of strings or nested tuple of strings. If a single string is provided, it must be one - of the availble prompts (see above). Else the input is expected to be a tuple, where each element can be one - of two things: either a string or a tuple of strings. If a string is provided, it must be one of the - availble prompts (see above). If tuple is provided, it must be of length 2 and the first string must be a + of the available prompts (see above). Else the input is expected to be a tuple, where each element can + be one of two things: either a string or a tuple of strings. If a string is provided, it must be one of the + available prompts (see above). If tuple is provided, it must be of length 2 and the first string must be a positive prompt and the second string must be a negative prompt. Returns: @@ -125,7 +125,7 @@ def _clip_iqa_format_prompts(prompts: Tuple[Union[str, Tuple[str, str]]] = ("qua if isinstance(p, str): if p not in _PROMPTS: raise ValueError( - f"All elements of `prompts` must be one of {_PROMPTS.keys()} if not custom tuple promts, got {p}." + f"All elements of `prompts` must be one of {_PROMPTS.keys()} if not custom tuple prompts, got {p}." ) prompts_names.append(p) prompts_list.extend(_PROMPTS[p]) @@ -230,13 +230,13 @@ def clip_image_quality_assessment( be able to generate a vector representation of the image and the text that is similar if the image and text are semantically similar. - The metric works by calculating the cosine similarity between user provided images and pre-defined promts. The + The metric works by calculating the cosine similarity between user provided images and pre-defined prompts. The prompts always come in pairs of "positive" and "negative" such as "Good photo." and "Bad photo.". By calculating the similartity between image embeddings and both the "positive" and "negative" prompt, the metric can determine which prompt the image is more similar to. The metric then returns the probability that the image is more similar to the first prompt than the second prompt. - Build in promts are: + Build in prompts are: * quality: "Good photo." vs "Bad photo." * brightness: "Bright photo." vs "Dark photo." * noisiness: "Clean photo." vs "Noisy photo." @@ -250,30 +250,30 @@ def clip_image_quality_assessment( * new: "New photo." vs "Old photo." * warm: "Warm photo." vs "Cold photo." * real: "Real photo." vs "Abstract photo." - * beutiful: "Beautiful photo." vs "Ugly photo." + * beautiful: "Beautiful photo." vs "Ugly photo." * lonely: "Lonely photo." vs "Sociable photo." * relaxing: "Relaxing photo." vs "Stressful photo." Args: images: Either a single ``[N, C, H, W]`` tensor or a list of ``[C, H, W]`` tensors model_name_or_path: string indicating the version of the CLIP model to use. By default this argument is set to - ``clip_iqa`` which corresponds to the model used in the original paper. Other availble models are + ``clip_iqa`` which corresponds to the model used in the original paper. Other available models are `"openai/clip-vit-base-patch16"`, `"openai/clip-vit-base-patch32"`, `"openai/clip-vit-large-patch14-336"` and `"openai/clip-vit-large-patch14"` data_range: The maximum value of the input tensor. For example, if the input images are in range [0, 255], data_range should be 255. The images are normalized by this value. prompts: A string, tuple of strings or nested tuple of strings. If a single string is provided, it must be one - of the availble prompts (see above). Else the input is expected to be a tuple, where each element can be one - of two things: either a string or a tuple of strings. If a string is provided, it must be one of the - availble prompts (see above). If tuple is provided, it must be of length 2 and the first string must be a + of the available prompts (see above). Else the input is expected to be a tuple, where each element can + be one of two things: either a string or a tuple of strings. If a string is provided, it must be one of the + available prompts (see above). If tuple is provided, it must be of length 2 and the first string must be a positive prompt and the second string must be a negative prompt. .. note:: If using the default `clip_iqa` model, the package `piq` must be installed. Either install with `pip install piq` or `pip install torchmetrics[multimodal]`. Returns: - A tensor of shape ``(N,)`` if a single promts is provided. If a list of promts is provided, a dictionary of - with the promts as keys and tensors of shape ``(N,)`` as values. + A tensor of shape ``(N,)`` if a single prompts is provided. If a list of prompts is provided, a dictionary of + with the prompts as keys and tensors of shape ``(N,)`` as values. Raises: ModuleNotFoundError: @@ -281,14 +281,14 @@ def clip_image_quality_assessment( ValueError: If not all images have format [C, H, W] ValueError: - If promts is a tuple and it is not of length 2 + If prompts is a tuple and it is not of length 2 ValueError: - If promts is a string and it is not one of the available promts + If prompts is a string and it is not one of the available prompts ValueError: - If promts is a list of strings and not all strings are one of the available promts + If prompts is a list of strings and not all strings are one of the available prompts Example:: - Single promt: + Single prompt: >>> from torchmetrics.functional.multimodal import clip_image_quality_assessment >>> import torch @@ -298,7 +298,7 @@ def clip_image_quality_assessment( tensor([0.8894, 0.8902]) Example:: - Multiple promts: + Multiple prompts: >>> from torchmetrics.functional.multimodal import clip_image_quality_assessment >>> import torch @@ -308,7 +308,7 @@ def clip_image_quality_assessment( {'quality': tensor([0.8894, 0.8902]), 'brightness': tensor([0.5507, 0.5208])} Example:: - Custom promts. Must always be a tuple of length 2, with a positive and negative prompt. + Custom prompts. Must always be a tuple of length 2, with a positive and negative prompt. >>> from torchmetrics.functional.multimodal import clip_image_quality_assessment >>> import torch diff --git a/src/torchmetrics/functional/nominal/cramers.py b/src/torchmetrics/functional/nominal/cramers.py index 6d5d9d0ca59..46d4058010c 100644 --- a/src/torchmetrics/functional/nominal/cramers.py +++ b/src/torchmetrics/functional/nominal/cramers.py @@ -41,7 +41,7 @@ def _cramers_v_update( Args: preds: 1D or 2D tensor of categorical (nominal) data target: 1D or 2D tensor of categorical (nominal) data - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes nan_strategy: Indication of whether to replace or drop ``NaN`` values nan_replace_value: Value to replace ``NaN`s when ``nan_strategy = 'replace``` diff --git a/src/torchmetrics/functional/nominal/pearson.py b/src/torchmetrics/functional/nominal/pearson.py index 7b4291b6226..b519b105a7a 100644 --- a/src/torchmetrics/functional/nominal/pearson.py +++ b/src/torchmetrics/functional/nominal/pearson.py @@ -39,7 +39,7 @@ def _pearsons_contingency_coefficient_update( Args: preds: 1D or 2D tensor of categorical (nominal) data target: 1D or 2D tensor of categorical (nominal) data - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes nan_strategy: Indication of whether to replace or drop ``NaN`` values nan_replace_value: Value to replace ``NaN`s when ``nan_strategy = 'replace``` diff --git a/src/torchmetrics/functional/nominal/theils_u.py b/src/torchmetrics/functional/nominal/theils_u.py index f9eb3f00baf..a010ee48c1d 100644 --- a/src/torchmetrics/functional/nominal/theils_u.py +++ b/src/torchmetrics/functional/nominal/theils_u.py @@ -64,7 +64,7 @@ def _theils_u_update( Args: preds: 1D or 2D tensor of categorical (nominal) data target: 1D or 2D tensor of categorical (nominal) data - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes nan_strategy: Indication of whether to replace or drop ``NaN`` values nan_replace_value: Value to replace ``NaN`s when ``nan_strategy = 'replace``` diff --git a/src/torchmetrics/functional/nominal/tschuprows.py b/src/torchmetrics/functional/nominal/tschuprows.py index 64a55e4e4bf..1ee8756d243 100644 --- a/src/torchmetrics/functional/nominal/tschuprows.py +++ b/src/torchmetrics/functional/nominal/tschuprows.py @@ -41,7 +41,7 @@ def _tschuprows_t_update( Args: preds: 1D or 2D tensor of categorical (nominal) data target: 1D or 2D tensor of categorical (nominal) data - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes nan_strategy: Indication of whether to replace or drop ``NaN`` values nan_replace_value: Value to replace ``NaN`s when ``nan_strategy = 'replace``` diff --git a/src/torchmetrics/functional/regression/r2.py b/src/torchmetrics/functional/regression/r2.py index 100a94338dc..ec6aec10a12 100644 --- a/src/torchmetrics/functional/regression/r2.py +++ b/src/torchmetrics/functional/regression/r2.py @@ -152,7 +152,7 @@ def r2_score( ValueError: If both ``preds`` and ``targets`` are not ``1D`` or ``2D`` tensors. ValueError: - If ``len(preds)`` is less than ``2`` since at least ``2`` sampels are needed to calculate r2 score. + If ``len(preds)`` is less than ``2`` since at least ``2`` samples are needed to calculate r2 score. ValueError: If ``multioutput`` is not one of ``raw_values``, ``uniform_average`` or ``variance_weighted``. ValueError: diff --git a/src/torchmetrics/functional/text/bert.py b/src/torchmetrics/functional/text/bert.py index 3c3236ea195..f375097107e 100644 --- a/src/torchmetrics/functional/text/bert.py +++ b/src/torchmetrics/functional/text/bert.py @@ -268,7 +268,7 @@ def bert_score( system-level evaluation. Moreover, BERTScore computes precision, recall, and F1 measure, which can be useful for evaluating different language generation tasks. - This implemenation follows the original implementation from `BERT_score`_. + This implementation follows the original implementation from `BERT_score`_. Args: preds: Either an iterable of predicted sentences or a ``Dict[input_ids, attention_mask]``. diff --git a/src/torchmetrics/functional/text/cer.py b/src/torchmetrics/functional/text/cer.py index bd37df048f4..19dc55afba9 100644 --- a/src/torchmetrics/functional/text/cer.py +++ b/src/torchmetrics/functional/text/cer.py @@ -64,7 +64,7 @@ def _cer_compute(errors: Tensor, total: Tensor) -> Tensor: def char_error_rate(preds: Union[str, List[str]], target: Union[str, List[str]]) -> Tensor: - """Compute Character Rrror Rate used for performance of an automatic speech recognition system. + """Compute Character Error Rate used for performance of an automatic speech recognition system. This value indicates the percentage of characters that were incorrectly predicted. The lower the value, the better the performance of the ASR system with a CER of 0 being a perfect score. diff --git a/src/torchmetrics/functional/text/chrf.py b/src/torchmetrics/functional/text/chrf.py index 99aca4e08ff..85d62f58a83 100644 --- a/src/torchmetrics/functional/text/chrf.py +++ b/src/torchmetrics/functional/text/chrf.py @@ -50,7 +50,7 @@ def _prepare_n_grams_dicts( ) -> Tuple[ Dict[int, Tensor], Dict[int, Tensor], Dict[int, Tensor], Dict[int, Tensor], Dict[int, Tensor], Dict[int, Tensor] ]: - """Prepare dictionaries with default zero values for total ref, hypothesis and matching chraracter and word n-grams. + """Prepare dictionaries with default zero values for total ref, hypothesis and matching character and word n-grams. Args: n_char_order: A character n-gram order. @@ -546,7 +546,7 @@ def chrf_score( """Calculate `chrF score`_ of machine translated text with one or more references. This implementation supports both chrF score computation introduced in [1] and chrF++ score introduced in - `chrF++ score`_. This implementation follows the implmenetaions from https://github.com/m-popovic/chrF and + `chrF++ score`_. This implementation follows the implementations from https://github.com/m-popovic/chrF and https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/metrics/chrf.py. Args: diff --git a/src/torchmetrics/functional/text/eed.py b/src/torchmetrics/functional/text/eed.py index 605cf533424..de5c50fa786 100644 --- a/src/torchmetrics/functional/text/eed.py +++ b/src/torchmetrics/functional/text/eed.py @@ -138,7 +138,7 @@ def _eed_function( """ number_of_visits = [-1] * (len(hyp) + 1) - # row[i] stores cost of cheapest path from (0,0) to (i,l) in CDER aligment grid. + # row[i] stores cost of cheapest path from (0,0) to (i,l) in CDER alignment grid. row = [1.0] * (len(hyp) + 1) row[0] = 0.0 # CDER initialisation 0,0 = 0.0, rest 1.0 diff --git a/src/torchmetrics/functional/text/helper.py b/src/torchmetrics/functional/text/helper.py index de591f34633..4fe72fcf635 100644 --- a/src/torchmetrics/functional/text/helper.py +++ b/src/torchmetrics/functional/text/helper.py @@ -54,7 +54,7 @@ class _EditOperations(str, Enum): class _LevenshteinEditDistance: """A convenience class for calculating the Levenshtein edit distance. - Class will cache some intermediate values to hasten the calculation. The implementation follows the implemenation + Class will cache some intermediate values to hasten the calculation. The implementation follows the implementation from https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/metrics/lib_ter.py, where the most of this implementation is adapted and copied from. @@ -282,7 +282,7 @@ def _get_empty_row(self, length: int) -> List[Tuple[int, _EditOperations]]: return [(int(self.op_undefined), _EditOperations.OP_UNDEFINED)] * (length + 1) def _get_initial_row(self, length: int) -> List[Tuple[int, _EditOperations]]: - """First row corresponds to insertion operations of the reference, so we do 1 edit operation per reference word. + """First row corresponds to insertion operations of the reference, so 1 edit operation per reference word. Args: length: A length of a tokenized sentence. diff --git a/src/torchmetrics/functional/text/helper_embedding_metric.py b/src/torchmetrics/functional/text/helper_embedding_metric.py index 6891ae4c00e..c675cf363cc 100644 --- a/src/torchmetrics/functional/text/helper_embedding_metric.py +++ b/src/torchmetrics/functional/text/helper_embedding_metric.py @@ -236,10 +236,10 @@ def __len__(self) -> int: return self.num_sentences def _get_tokens_idf(self) -> Dict[int, float]: - """Calculate token inverse document frequences. + """Calculate token inverse document frequencies. Return: - A python dictionary containing inverse document frequences for token ids. + A python dictionary containing inverse document frequencies for token ids. """ token_counter: Counter = Counter() diff --git a/src/torchmetrics/functional/text/rouge.py b/src/torchmetrics/functional/text/rouge.py index eea9902da05..ff04f76cd2c 100644 --- a/src/torchmetrics/functional/text/rouge.py +++ b/src/torchmetrics/functional/text/rouge.py @@ -188,7 +188,7 @@ def _normalize_and_tokenize_text( # If normalizer is none, replace any non-alpha-numeric characters with spaces. text = normalizer(text) if callable(normalizer) else re.sub(r"[^a-z0-9]+", " ", text.lower()) - # If tokenizer is none, spliting by spaces + # If tokenizer is none, splitting by spaces tokens = tokenizer(text) if callable(tokenizer) else re.split(r"\s+", text) if stemmer: @@ -299,7 +299,7 @@ def _rouge_score_update( preds: An iterable of predicted sentences. target: An iterable of iterable of target sentences. rouge_keys_values: List of N-grams/'L'/'Lsum' arguments. - accumulate: Useful incase of multi-reference rouge score. + accumulate: Useful in case of multi-reference rouge score. ``avg`` takes the avg of all references with respect to predictions ``best`` takes the best fmeasure score obtained between prediction and multiple corresponding references. Allowed values are ``avg`` and ``best``. @@ -309,7 +309,7 @@ def _rouge_score_update( If this is ``None``, replacing any non-alpha-numeric characters with spaces is default. This function must take a `str` and return a `str`. tokenizer: - A user's own tokenizer function. If this is ``None``, spliting by spaces is default + A user's own tokenizer function. If this is ``None``, splitting by spaces is default This function must take a `str` and return `Sequence[str]` Example: @@ -433,7 +433,7 @@ def rouge_score( target: An iterable of iterables of target sentences or an iterable of target sentences or a single target sentence. accumulate: - Useful incase of multi-reference rouge score. + Useful in case of multi-reference rouge score. - ``avg`` takes the avg of all references with respect to predictions - ``best`` takes the best fmeasure score obtained between prediction and multiple corresponding references. @@ -442,7 +442,7 @@ def rouge_score( normalizer: A user's own normalizer function. If this is ``None``, replacing any non-alpha-numeric characters with spaces is default. This function must take a ``str`` and return a ``str``. - tokenizer: A user's own tokenizer function. If this is ``None``, spliting by spaces is default + tokenizer: A user's own tokenizer function. If this is ``None``, splitting by spaces is default This function must take a ``str`` and return ``Sequence[str]`` rouge_keys: A list of rouge types to calculate. Keys that are allowed are ``rougeL``, ``rougeLsum``, and ``rouge1`` through ``rouge9``. diff --git a/src/torchmetrics/functional/text/sacre_bleu.py b/src/torchmetrics/functional/text/sacre_bleu.py index babb63744b9..ff34fb174b4 100644 --- a/src/torchmetrics/functional/text/sacre_bleu.py +++ b/src/torchmetrics/functional/text/sacre_bleu.py @@ -102,7 +102,7 @@ class _SacreBLEUTokenizer: import regex _INT_REGEX = ( - # Separate out punctuations preceeded by a non-digit + # Separate out punctuations preceded by a non-digit (regex.compile(r"(\P{N})(\p{P})"), r"\1 \2 "), # Separate out punctuations followed by a non-digit (regex.compile(r"(\p{P})(\P{N})"), r" \1 \2"), diff --git a/src/torchmetrics/functional/text/ter.py b/src/torchmetrics/functional/text/ter.py index c74678ffe40..f57b014909f 100644 --- a/src/torchmetrics/functional/text/ter.py +++ b/src/torchmetrics/functional/text/ter.py @@ -60,7 +60,7 @@ class _TercomTokenizer: See src/ter/core/Normalizer.java in https://github.com/jhclark/tercom Note that Python doesn't support named Unicode blocks so the mapping for relevant blocks was taken from here: https://unicode-table.com/en/blocks/ - This implementation follows the implemenation from + This implementation follows the implementation from https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/tokenizers/tokenizer_ter.py. """ @@ -542,8 +542,8 @@ def translation_edit_rate( ) -> Union[Tensor, Tuple[Tensor, List[Tensor]]]: """Calculate Translation edit rate (`TER`_) of machine translated text with one or more references. - This implementation follows the implmenetaions from - https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/metrics/ter.py. The `sacrebleu` implmenetation is a + This implementation follows the implementations from + https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/metrics/ter.py. The `sacrebleu` implementation is a near-exact reimplementation of the Tercom algorithm, produces identical results on all "sane" outputs. Args: diff --git a/src/torchmetrics/functional/text/wer.py b/src/torchmetrics/functional/text/wer.py index 4465651d1e4..af50d4cb289 100644 --- a/src/torchmetrics/functional/text/wer.py +++ b/src/torchmetrics/functional/text/wer.py @@ -64,7 +64,7 @@ def _wer_compute(errors: Tensor, total: Tensor) -> Tensor: def word_error_rate(preds: Union[str, List[str]], target: Union[str, List[str]]) -> Tensor: - """Word error rate (WordErrorRate_) is a common metric of the performance of an automatic speech recognition system. + """Word error rate (WordErrorRate_) is a common metric of performance of an automatic speech recognition system. This value indicates the percentage of words that were incorrectly predicted. The lower the value, the better the performance of the ASR system with a WER of 0 being a perfect score. diff --git a/src/torchmetrics/functional/text/wip.py b/src/torchmetrics/functional/text/wip.py index e991367333a..2c77139009b 100644 --- a/src/torchmetrics/functional/text/wip.py +++ b/src/torchmetrics/functional/text/wip.py @@ -54,7 +54,7 @@ def _wip_update( def _wip_compute(errors: Tensor, target_total: Tensor, preds_total: Tensor) -> Tensor: - """Compute the Word Information Perserved. + """Compute the Word Information Preserved. Args: errors: Number of edit operations to get from the reference to the prediction, summed over all samples @@ -62,7 +62,7 @@ def _wip_compute(errors: Tensor, target_total: Tensor, preds_total: Tensor) -> T preds_total: Number of words overall prediction Returns: - Word Information Perserved score + Word Information Preserved score """ return (errors / target_total) * (errors / preds_total) diff --git a/src/torchmetrics/image/fid.py b/src/torchmetrics/image/fid.py index afa706b3425..775cc448fd0 100644 --- a/src/torchmetrics/image/fid.py +++ b/src/torchmetrics/image/fid.py @@ -47,7 +47,7 @@ class _FeatureExtractorInceptionV3(Module): # type: ignore[no-redef] class NoTrainInceptionV3(_FeatureExtractorInceptionV3): - """Module that nevers leaves evaluation mode.""" + """Module that never leaves evaluation mode.""" def __init__( self, diff --git a/src/torchmetrics/image/kid.py b/src/torchmetrics/image/kid.py index b017fd28038..cafa1464b6e 100644 --- a/src/torchmetrics/image/kid.py +++ b/src/torchmetrics/image/kid.py @@ -135,7 +135,7 @@ class KernelInceptionDistance(Metric): ValueError: If ``degree`` is not an integer larger than 0 ValueError: - If ``gamma`` is niether ``None`` or a float larger than 0 + If ``gamma`` is neither ``None`` or a float larger than 0 ValueError: If ``coef`` is not an float larger than 0 ValueError: @@ -223,7 +223,7 @@ def __init__( self.coef = coef if not isinstance(reset_real_features, bool): - raise ValueError("Arugment `reset_real_features` expected to be a bool") + raise ValueError("Argument `reset_real_features` expected to be a bool") self.reset_real_features = reset_real_features if not isinstance(normalize, bool): diff --git a/src/torchmetrics/image/lpip.py b/src/torchmetrics/image/lpip.py index f209892e2e2..76864275a01 100644 --- a/src/torchmetrics/image/lpip.py +++ b/src/torchmetrics/image/lpip.py @@ -38,7 +38,7 @@ def _download_lpips() -> None: class LearnedPerceptualImagePatchSimilarity(Metric): - """The Learned Perceptual Image Patch Similarity (`LPIPS_`) calculates the perceptual similarity between two images. + """The Learned Perceptual Image Patch Similarity (`LPIPS_`) calculates perceptual similarity between two images. LPIPS essentially computes the similarity between the activations of two image patches for some pre-defined network. This measure has been shown to match human perception well. A low LPIPS score means that image patches are diff --git a/src/torchmetrics/image/ssim.py b/src/torchmetrics/image/ssim.py index 1b239373c21..ac0808ea653 100644 --- a/src/torchmetrics/image/ssim.py +++ b/src/torchmetrics/image/ssim.py @@ -263,7 +263,7 @@ class MultiScaleStructuralSimilarityIndexMeasure(Metric): ValueError: If ``kernel_size`` is not an int or a Sequence of ints with size 2 or 3. ValueError: - If ``betas`` is not a tuple of floats with lengt 2. + If ``betas`` is not a tuple of floats with length 2. ValueError: If ``normalize`` is neither `None`, `ReLU` nor `simple`. diff --git a/src/torchmetrics/metric.py b/src/torchmetrics/metric.py index ef3f61a9d6f..d8cea430c48 100644 --- a/src/torchmetrics/metric.py +++ b/src/torchmetrics/metric.py @@ -356,7 +356,7 @@ def _forward_reduce_state_update(self, *args: Any, **kwargs: Any) -> Any: _update_count = self._update_count self.reset() - # local syncronization settings + # local synchronization settings self._to_sync = self.dist_sync_on_step self._should_unsync = False _temp_compute_on_cpu = self.compute_on_cpu @@ -556,7 +556,7 @@ def sync_context( """Context manager to synchronize states. This context manager is used in distributed setting and makes sure that the local cache states are restored - after yielding the syncronized state. + after yielding the synchronized state. Args: dist_sync_fn: Function to be used to perform states synchronization @@ -1211,5 +1211,5 @@ def __repr__(self) -> str: return self.__class__.__name__ + _op_metrics def _wrap_compute(self, compute: Callable) -> Callable: - """No wrapping nessesary for compositional metrics.""" + """No wrapping necessary for compositional metrics.""" return compute diff --git a/src/torchmetrics/multimodal/clip_iqa.py b/src/torchmetrics/multimodal/clip_iqa.py index 86310b7892d..dff9e05f2e2 100644 --- a/src/torchmetrics/multimodal/clip_iqa.py +++ b/src/torchmetrics/multimodal/clip_iqa.py @@ -60,13 +60,13 @@ class CLIPImageQualityAssessment(Metric): be able to generate a vector representation of the image and the text that is similar if the image and text are semantically similar. - The metric works by calculating the cosine similarity between user provided images and pre-defined promts. The - promts always comes in pairs of "positive" and "negative" such as "Good photo." and "Bad photo.". By calculating + The metric works by calculating the cosine similarity between user provided images and pre-defined prompts. The + prompts always comes in pairs of "positive" and "negative" such as "Good photo." and "Bad photo.". By calculating the similartity between image embeddings and both the "positive" and "negative" prompt, the metric can determine which prompt the image is more similar to. The metric then returns the probability that the image is more similar to the first prompt than the second prompt. - Build in promts are: + Build in prompts are: * quality: "Good photo." vs "Bad photo." * brightness: "Bright photo." vs "Dark photo." * noisiness: "Clean photo." vs "Noisy photo." @@ -80,7 +80,7 @@ class CLIPImageQualityAssessment(Metric): * new: "New photo." vs "Old photo." * warm: "Warm photo." vs "Cold photo." * real: "Real photo." vs "Abstract photo." - * beutiful: "Beautiful photo." vs "Ugly photo." + * beautiful: "Beautiful photo." vs "Ugly photo." * lonely: "Lonely photo." vs "Sociable photo." * relaxing: "Relaxing photo." vs "Stressful photo." @@ -106,9 +106,9 @@ class CLIPImageQualityAssessment(Metric): data_range: The maximum value of the input tensor. For example, if the input images are in range [0, 255], data_range should be 255. The images are normalized by this value. prompts: A string, tuple of strings or nested tuple of strings. If a single string is provided, it must be one - of the availble prompts (see above). Else the input is expected to be a tuple, where each element can be one - of two things: either a string or a tuple of strings. If a string is provided, it must be one of the - availble prompts (see above). If tuple is provided, it must be of length 2 and the first string must be a + of the available prompts (see above). Else the input is expected to be a tuple, where each element can + be one of two things: either a string or a tuple of strings. If a string is provided, it must be one of the + available prompts (see above). If tuple is provided, it must be of length 2 and the first string must be a positive prompt and the second string must be a negative prompt. kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. diff --git a/src/torchmetrics/nominal/cramers.py b/src/torchmetrics/nominal/cramers.py index 1229383fd5c..1481a28a73d 100644 --- a/src/torchmetrics/nominal/cramers.py +++ b/src/torchmetrics/nominal/cramers.py @@ -55,7 +55,7 @@ class CramersV(Metric): - ``cramers_v`` (:class:`~torch.Tensor`): Scalar tensor containing the Cramer's V statistic. Args: - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes bias_correction: Indication of whether to use bias correction. nan_strategy: Indication of whether to replace or drop ``NaN`` values nan_replace_value: Value to replace ``NaN``s when ``nan_strategy = 'replace'`` diff --git a/src/torchmetrics/nominal/pearson.py b/src/torchmetrics/nominal/pearson.py index 551c28928bd..143e9d20411 100644 --- a/src/torchmetrics/nominal/pearson.py +++ b/src/torchmetrics/nominal/pearson.py @@ -60,7 +60,7 @@ class PearsonsContingencyCoefficient(Metric): - ``pearsons_cc`` (:class:`~torch.Tensor`): Scalar tensor containing the Pearsons Contingency Coefficient statistic. Args: - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes nan_strategy: Indication of whether to replace or drop ``NaN`` values nan_replace_value: Value to replace ``NaN``s when ``nan_strategy = 'replace'`` kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. diff --git a/src/torchmetrics/nominal/theils_u.py b/src/torchmetrics/nominal/theils_u.py index bb41cd44ed2..f21a8efa385 100644 --- a/src/torchmetrics/nominal/theils_u.py +++ b/src/torchmetrics/nominal/theils_u.py @@ -52,7 +52,7 @@ class TheilsU(Metric): - ``theils_u`` (:class:`~torch.Tensor`): Scalar tensor containing the Theil's U statistic. Args: - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes nan_strategy: Indication of whether to replace or drop ``NaN`` values nan_replace_value: Value to replace ``NaN``s when ``nan_strategy = 'replace'`` kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. diff --git a/src/torchmetrics/nominal/tschuprows.py b/src/torchmetrics/nominal/tschuprows.py index c4dd3674d35..540fd016fd6 100644 --- a/src/torchmetrics/nominal/tschuprows.py +++ b/src/torchmetrics/nominal/tschuprows.py @@ -55,7 +55,7 @@ class TschuprowsT(Metric): - ``tschuprows_t`` (:class:`~torch.Tensor`): Scalar tensor containing the Tschuprow's T statistic. Args: - num_classes: Integer specifing the number of classes + num_classes: Integer specifying the number of classes bias_correction: Indication of whether to use bias correction. nan_strategy: Indication of whether to replace or drop ``NaN`` values nan_replace_value: Value to replace ``NaN``s when ``nan_strategy = 'replace'`` diff --git a/src/torchmetrics/regression/mape.py b/src/torchmetrics/regression/mape.py index c8756c9de91..216526b40b7 100644 --- a/src/torchmetrics/regression/mape.py +++ b/src/torchmetrics/regression/mape.py @@ -49,7 +49,7 @@ class MeanAbsolutePercentageError(Metric): Note: MAPE output is a non-negative floating point. Best result is ``0.0`` . But it is important to note that, - bad predictions, can lead to arbitarily large values. Especially when some ``target`` values are close to 0. + bad predictions, can lead to arbitrarily large values. Especially when some ``target`` values are close to 0. This `MAPE implementation returns`_ a very large number instead of ``inf``. Example: diff --git a/src/torchmetrics/retrieval/_deprecated.py b/src/torchmetrics/retrieval/_deprecated.py index 5e4e9ca4c11..45bff9431f2 100644 --- a/src/torchmetrics/retrieval/_deprecated.py +++ b/src/torchmetrics/retrieval/_deprecated.py @@ -19,8 +19,8 @@ class _RetrievalFallOut(RetrievalFallOut): >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) >>> target = tensor([False, False, True, False, True, False, True]) - >>> fo = _RetrievalFallOut(top_k=2) - >>> fo(preds, target, indexes=indexes) + >>> rfo = _RetrievalFallOut(top_k=2) + >>> rfo(preds, target, indexes=indexes) tensor(0.5000) """ diff --git a/src/torchmetrics/retrieval/fall_out.py b/src/torchmetrics/retrieval/fall_out.py index 7c4f031e1e6..52a00298665 100644 --- a/src/torchmetrics/retrieval/fall_out.py +++ b/src/torchmetrics/retrieval/fall_out.py @@ -72,8 +72,8 @@ class RetrievalFallOut(RetrievalMetric): >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) >>> target = tensor([False, False, True, False, True, False, True]) - >>> fo = RetrievalFallOut(top_k=2) - >>> fo(preds, target, indexes=indexes) + >>> rfo = RetrievalFallOut(top_k=2) + >>> rfo(preds, target, indexes=indexes) tensor(0.5000) """ @@ -131,7 +131,7 @@ def compute(self) -> Tensor: elif self.empty_target_action == "neg": res.append(tensor(0.0)) else: - # ensure list containt only float tensors + # ensure list contains only float tensors res.append(self._metric(mini_preds, mini_target)) return torch.stack([x.to(preds) for x in res]).mean() if res else tensor(0.0).to(preds) diff --git a/src/torchmetrics/text/bert.py b/src/torchmetrics/text/bert.py index c420b9c929d..0a5a5fd5b3d 100644 --- a/src/torchmetrics/text/bert.py +++ b/src/torchmetrics/text/bert.py @@ -57,7 +57,7 @@ class BERTScore(Metric): BERT leverages the pre-trained contextual embeddings from BERT and matches words in candidate and reference sentences by cosine similarity. It has been shown to correlate with human judgment on sentence-level and system-level evaluation. Moreover, BERTScore computes precision, recall, and F1 measure, which can be useful for - evaluating different language generation tasks. This implemenation follows the original implementation from + evaluating different language generation tasks. This implementation follows the original implementation from `BERT_score`_. As input to ``forward`` and ``update`` the metric accepts the following input: diff --git a/src/torchmetrics/text/chrf.py b/src/torchmetrics/text/chrf.py index d8f670ff909..3d109cbc6c5 100644 --- a/src/torchmetrics/text/chrf.py +++ b/src/torchmetrics/text/chrf.py @@ -53,7 +53,7 @@ class CHRFScore(Metric): """Calculate `chrf score`_ of machine translated text with one or more references. This implementation supports both ChrF score computation introduced in `chrF score`_ and `chrF++ score`_ introduced - in `chrF++ score`_. This implementation follows the implmenetaions from https://github.com/m-popovic/chrF and + in `chrF++ score`_. This implementation follows the implementations from https://github.com/m-popovic/chrF and https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/metrics/chrf.py. As input to ``forward`` and ``update`` the metric accepts the following input: diff --git a/src/torchmetrics/text/rouge.py b/src/torchmetrics/text/rouge.py index e6ce7a5b426..7bce72ed1c3 100644 --- a/src/torchmetrics/text/rouge.py +++ b/src/torchmetrics/text/rouge.py @@ -55,7 +55,7 @@ class ROUGEScore(Metric): If this is ``None``, replacing any non-alpha-numeric characters with spaces is default. This function must take a ``str`` and return a ``str``. tokenizer: - A user's own tokenizer function. If this is ``None``, spliting by spaces is default + A user's own tokenizer function. If this is ``None``, splitting by spaces is default This function must take a ``str`` and return ``Sequence[str]`` accumulate: Useful in case of multi-reference rouge score. diff --git a/src/torchmetrics/utilities/checks.py b/src/torchmetrics/utilities/checks.py index a347845f8ff..c35556aaabb 100644 --- a/src/torchmetrics/utilities/checks.py +++ b/src/torchmetrics/utilities/checks.py @@ -649,7 +649,7 @@ def check_forward_full_state_property( metric_class: metric class object that should be checked init_args: dict containing arguments for initializing the metric class input_args: dict containing arguments to pass to ``forward`` - num_update_to_compare: if we successfully detech that the flag is safe to set to ``False`` + num_update_to_compare: if we successfully detect that the flag is safe to set to ``False`` we will run some speedup test. This arg should be a list of integers for how many steps to compare over. reps: number of repetitions of speedup test @@ -669,7 +669,7 @@ def check_forward_full_state_property( Partial state for 1000 steps took: ... Recommended setting `full_state_update=False` - Example (states in ``update`` are dependend meaning that ``full_state_update=True``): + Example (states in ``update`` are dependent meaning that ``full_state_update=True``): >>> from torchmetrics.classification import MulticlassConfusionMatrix >>> class MyMetric(MulticlassConfusionMatrix): ... def update(self, preds, target): diff --git a/src/torchmetrics/wrappers/abstract.py b/src/torchmetrics/wrappers/abstract.py index c5a2d76b8d5..27bdfc0a9f3 100644 --- a/src/torchmetrics/wrappers/abstract.py +++ b/src/torchmetrics/wrappers/abstract.py @@ -20,10 +20,10 @@ class WrapperMetric(Metric): """Abstract base class for wrapper metrics. Wrapper metrics are characterized by them wrapping another metric, and forwarding all calls to the wrapped metric. - This means that all logic regarding syncronization etc. is handled by the wrapped metric, and the wrapper metric + This means that all logic regarding synchronization etc. is handled by the wrapped metric, and the wrapper metric should not do anything in this regard. - This class therefore overwrites all methods that are related to syncronization, and does nothing in them. + This class therefore overwrites all methods that are related to synchronization, and does nothing in them. Additionally, the forward method is not implemented by default as custom logic is required for each wrapper metric. diff --git a/src/torchmetrics/wrappers/bootstrapping.py b/src/torchmetrics/wrappers/bootstrapping.py index dd01a0c5c35..4daec853928 100644 --- a/src/torchmetrics/wrappers/bootstrapping.py +++ b/src/torchmetrics/wrappers/bootstrapping.py @@ -62,7 +62,7 @@ class basically keeps multiple copies of the same base metric in memory and when base_metric: base metric class to wrap num_bootstraps: number of copies to make of the base metric for bootstrapping mean: if ``True`` return the mean of the bootstraps - std: if ``True`` return the standard diviation of the bootstraps + std: if ``True`` return the standard deviation of the bootstraps quantile: if given, returns the quantile of the bootstraps. Can only be used with pytorch version 1.6 or higher raw: if ``True``, return all bootstrapped values sampling_strategy: @@ -117,7 +117,7 @@ def __init__( if sampling_strategy not in allowed_sampling: raise ValueError( f"Expected argument ``sampling_strategy`` to be one of {allowed_sampling}" - f" but recieved {sampling_strategy}" + f" but received {sampling_strategy}" ) self.sampling_strategy = sampling_strategy diff --git a/src/torchmetrics/wrappers/classwise.py b/src/torchmetrics/wrappers/classwise.py index 90548309095..3c8d6621bc2 100644 --- a/src/torchmetrics/wrappers/classwise.py +++ b/src/torchmetrics/wrappers/classwise.py @@ -38,7 +38,7 @@ class ClasswiseWrapper(WrapperMetric): postfix: string that is appended to the metric names. Example:: - Basic example where the ouput of a metric is unwrapped into a dictionary with the class index as keys: + Basic example where the output of a metric is unwrapped into a dictionary with the class index as keys: >>> import torch >>> _ = torch.manual_seed(42) diff --git a/src/torchmetrics/wrappers/multitask.py b/src/torchmetrics/wrappers/multitask.py index bab604799f2..5c3eb0d5254 100644 --- a/src/torchmetrics/wrappers/multitask.py +++ b/src/torchmetrics/wrappers/multitask.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# this is just a bypass for this module name collision with build-in one +# this is just a bypass for this module name collision with built-in one from typing import Any, Dict, Optional, Sequence, Union from torch import Tensor, nn @@ -31,7 +31,7 @@ class MultitaskWrapper(WrapperMetric): In multitask learning the different tasks requires different metrics to be evaluated. This wrapper allows for easy evaluation in such cases by supporting multiple predictions and targets through a dictionary. - Note that only metrics where the signature of `update` follows the stardard `preds, target` is supported. + Note that only metrics where the signature of `update` follows the standard `preds, target` is supported. Args: task_metrics: @@ -140,7 +140,7 @@ def compute(self) -> Dict[str, Any]: def forward(self, task_preds: Dict[str, Tensor], task_targets: Dict[str, Tensor]) -> Dict[str, Any]: """Call underlying forward methods for all tasks and return the result as a dictionary.""" - # This method is overriden because we do not need the complex version defined in Metric, that relies on the + # This method is overridden because we do not need the complex version defined in Metric, that relies on the # value of full_state_update, and that also accumulates the results. Here, all computations are handled by the # underlying metrics, which all have their own value of full_state_update, and which all accumulate the results # by themselves. diff --git a/src/torchmetrics/wrappers/tracker.py b/src/torchmetrics/wrappers/tracker.py index b3a9040d8cc..6809fdb115f 100644 --- a/src/torchmetrics/wrappers/tracker.py +++ b/src/torchmetrics/wrappers/tracker.py @@ -171,7 +171,7 @@ def compute_all(self) -> Any: if isinstance(res[0], list): return torch.stack([torch.stack(r, dim=0) for r in res], 0) return torch.stack(res, dim=0) - except TypeError: # fallback solution to just return as it is if we cannot succesfully stack + except TypeError: # fallback solution to just return as it is if we cannot successfully stack return res def reset(self) -> None: @@ -211,16 +211,16 @@ def best_metric( where each is a dict, with keys corresponding to the different values of th collection and the values of the first dict being the optimal values and the values of the second dict being the optimal step - In addtion the value in all cases may be ``None`` if the underlying metric does have a proper defined way + In addition the value in all cases may be ``None`` if the underlying metric does have a proper defined way of being optimal or in the case where a nested structure of metrics are being tracked. """ res = self.compute_all() if isinstance(res, list): rank_zero_warn( - "Encounted nested structure. You are probably using a metric collection inside a metric collection, or" - " a metric wrapper inside a metric collection, which is not supported by `.best_metric()` method." - "Returning `None` instead. Please consider " + "Encountered nested structure. You are probably using a metric collection inside a metric collection," + " or a metric wrapper inside a metric collection, which is not supported by `.best_metric()` method." + " Returning `None` instead." ) if return_step: return None, None @@ -266,7 +266,7 @@ def best_metric( return value def _check_for_increment(self, method: str) -> None: - """Check that a metric that can be updated/used for computations has been intialized.""" + """Check that a metric that can be updated/used for computations has been initialized.""" if not self._increment_called: raise ValueError(f"`{method}` cannot be called before `.increment()` has been called.") diff --git a/tests/integrations/lightning/boring_model.py b/tests/integrations/lightning/boring_model.py index 407a7295ac8..4f1409fd7a8 100644 --- a/tests/integrations/lightning/boring_model.py +++ b/tests/integrations/lightning/boring_model.py @@ -123,5 +123,5 @@ def val_dataloader(self): return torch.utils.data.DataLoader(RandomDataset(32, 64)) def test_dataloader(self): - """Define test dataloader used for testing the mdoel.""" + """Define test dataloader used for testing the model.""" return torch.utils.data.DataLoader(RandomDataset(32, 64)) diff --git a/tests/integrations/test_lightning.py b/tests/integrations/test_lightning.py index 182b4e2243c..df49d6edc0c 100644 --- a/tests/integrations/test_lightning.py +++ b/tests/integrations/test_lightning.py @@ -34,7 +34,7 @@ class DiffMetric(SumMetric): - """DiffMetric inheritted from `SumMetric` by overidding its `update` method.""" + """DiffMetric inherited from `SumMetric` by overidding its `update` method.""" def update(self, value): """Update state.""" @@ -184,7 +184,7 @@ class TestModel(BoringModel): def __init__(self) -> None: super().__init__() - # initiliaze one metric for every combination of `on_step` and `on_epoch` and `forward` and `update` + # initialize one metric for every combination of `on_step` and `on_epoch` and `forward` and `update` self.metric_update = SumMetric() self.metric_update_step = SumMetric() self.metric_update_epoch = SumMetric() diff --git a/tests/unittests/bases/test_aggregation.py b/tests/unittests/bases/test_aggregation.py index 2702d323564..368d417ebb3 100644 --- a/tests/unittests/bases/test_aggregation.py +++ b/tests/unittests/bases/test_aggregation.py @@ -107,10 +107,10 @@ def test_nan_error(value, nan_strategy, metric_class): """Test correct errors are raised.""" metric = metric_class(nan_strategy=nan_strategy) if nan_strategy == "error": - with pytest.raises(RuntimeError, match="Encounted `nan` values in tensor"): + with pytest.raises(RuntimeError, match="Encountered `nan` values in tensor"): metric(value.clone()) elif nan_strategy == "warn": - with pytest.warns(UserWarning, match="Encounted `nan` values in tensor"): + with pytest.warns(UserWarning, match="Encountered `nan` values in tensor"): metric(value.clone()) diff --git a/tests/unittests/bases/test_collections.py b/tests/unittests/bases/test_collections.py index ebda89de83b..98ae0c97f9c 100644 --- a/tests/unittests/bases/test_collections.py +++ b/tests/unittests/bases/test_collections.py @@ -323,7 +323,7 @@ def compute(self): _mc_preds, _mc_target, ), - # two metrics from registry froms a compute group + # two metrics from registry forms a compute group ( [MulticlassPrecision(num_classes=3), MulticlassRecall(num_classes=3)], {0: ["MulticlassPrecision", "MulticlassRecall"]}, @@ -647,7 +647,7 @@ def test_double_nested_collections(base_metrics, expected): def test_with_custom_prefix_postfix(): - """Test that metric colection does not clash with custom prefix and postfix in users metrics. + """Test that metric collection does not clash with custom prefix and postfix in users metrics. See issue: https://github.com/Lightning-AI/torchmetrics/issues/2065 diff --git a/tests/unittests/bases/test_ddp.py b/tests/unittests/bases/test_ddp.py index fb16e87edae..3b12f94a6a6 100644 --- a/tests/unittests/bases/test_ddp.py +++ b/tests/unittests/bases/test_ddp.py @@ -258,7 +258,7 @@ def _test_sync_on_compute_list_state(rank, sync_on_compute): @pytest.mark.parametrize("sync_on_compute", [True, False]) @pytest.mark.parametrize("test_func", [_test_sync_on_compute_list_state, _test_sync_on_compute_tensor_state]) def test_sync_on_compute(sync_on_compute, test_func): - """Test that syncronization of states can be enabled and disabled for compute.""" + """Test that synchronization of states can be enabled and disabled for compute.""" pytest.pool.map(partial(test_func, sync_on_compute=sync_on_compute), range(NUM_PROCESSES)) @@ -270,5 +270,5 @@ def _test_sync_with_empty_lists(rank): @pytest.mark.skipif(sys.platform == "win32", reason="DDP not available on windows") def test_sync_with_empty_lists(): - """Test that syncronization of states can be enabled and disabled for compute.""" + """Test that synchronization of states can be enabled and disabled for compute.""" pytest.pool.map(_test_sync_with_empty_lists, range(NUM_PROCESSES)) diff --git a/tests/unittests/bases/test_hashing.py b/tests/unittests/bases/test_hashing.py index f60246bd475..b8bb28987e7 100644 --- a/tests/unittests/bases/test_hashing.py +++ b/tests/unittests/bases/test_hashing.py @@ -11,7 +11,7 @@ ], ) def test_metric_hashing(metric_cls): - """Tests that hases are different. + """Tests that hashes are different. See the Metric's hash function for details on why this is required. diff --git a/tests/unittests/bases/test_metric.py b/tests/unittests/bases/test_metric.py index 5e6a1c2e979..a7211f2982e 100644 --- a/tests/unittests/bases/test_metric.py +++ b/tests/unittests/bases/test_metric.py @@ -42,7 +42,7 @@ def test_error_on_wrong_input(): with pytest.raises(ValueError, match="Expected keyword argument `dist_sync_fn` to be an callable function.*"): DummyMetric(dist_sync_fn=[2, 3]) - with pytest.raises(ValueError, match="Expected keyword argument `compute_on_cpu` to be an `bool` bu.*"): + with pytest.raises(ValueError, match="Expected keyword argument `compute_on_cpu` to be an `bool` but.*"): DummyMetric(compute_on_cpu=None) with pytest.raises(ValueError, match="Expected keyword argument `sync_on_compute` to be a `bool` but.*"): @@ -59,7 +59,7 @@ def test_error_on_wrong_input(): def test_inherit(): - """Test that metric that inherits can be instanciated.""" + """Test that metric that inherits can be instantiated.""" DummyMetric() @@ -465,7 +465,7 @@ def get_memory_usage(): def test_constant_memory_on_repeat_init(): """Test that when initializing a metric multiple times the memory does not increase. - This only works for metrics with `compute_with_cache=False` as otherwise the cache will keep a refence that python + This only works for metrics with `compute_with_cache=False` as otherwise the cache will keep a reference that python gc will not be able to collect and clean. """ diff --git a/tests/unittests/clustering/test_adjusted_mutual_info_score.py b/tests/unittests/clustering/test_adjusted_mutual_info_score.py index e686ca0837d..25963e47c5c 100644 --- a/tests/unittests/clustering/test_adjusted_mutual_info_score.py +++ b/tests/unittests/clustering/test_adjusted_mutual_info_score.py @@ -92,7 +92,7 @@ def test_adjusted_mutual_info_score_functional_raises_invalid_task(average_metho def test_adjusted_mutual_info_score_functional_is_symmetric( average_method, preds=_single_target_extrinsic1.preds, target=_single_target_extrinsic1.target ): - """Check that the metric funtional is symmetric.""" + """Check that the metric functional is symmetric.""" for p, t in zip(preds, target): assert torch.allclose( adjusted_mutual_info_score(p, t, average_method), diff --git a/tests/unittests/clustering/test_adjusted_rand_score.py b/tests/unittests/clustering/test_adjusted_rand_score.py index fcd2939bbe3..40c560aface 100644 --- a/tests/unittests/clustering/test_adjusted_rand_score.py +++ b/tests/unittests/clustering/test_adjusted_rand_score.py @@ -64,6 +64,6 @@ def test_rand_score_functional_raises_invalid_task(): def test_rand_score_functional_is_symmetric( preds=_single_target_extrinsic1.preds, target=_single_target_extrinsic1.target ): - """Check that the metric funtional is symmetric.""" + """Check that the metric functional is symmetric.""" for p, t in zip(preds, target): assert torch.allclose(adjusted_rand_score(p, t), adjusted_rand_score(t, p)) diff --git a/tests/unittests/clustering/test_mutual_info_score.py b/tests/unittests/clustering/test_mutual_info_score.py index 49522d50ce9..fc82829f1f9 100644 --- a/tests/unittests/clustering/test_mutual_info_score.py +++ b/tests/unittests/clustering/test_mutual_info_score.py @@ -76,6 +76,6 @@ def test_mutual_info_score_functional_raises_invalid_task(): def test_mutual_info_score_functional_is_symmetric( preds=_single_target_extrinsic1.preds, target=_single_target_extrinsic1.target ): - """Check that the metric funtional is symmetric.""" + """Check that the metric functional is symmetric.""" for p, t in zip(preds, target): assert torch.allclose(mutual_info_score(p, t), mutual_info_score(t, p)) diff --git a/tests/unittests/clustering/test_normalized_mutual_info_score.py b/tests/unittests/clustering/test_normalized_mutual_info_score.py index 095bc5963d2..ead5c2a102e 100644 --- a/tests/unittests/clustering/test_normalized_mutual_info_score.py +++ b/tests/unittests/clustering/test_normalized_mutual_info_score.py @@ -90,7 +90,7 @@ def test_normalized_mutual_info_score_functional_raises_invalid_task(average_met def test_normalized_mutual_info_score_functional_is_symmetric( average_method, preds=_single_target_extrinsic1.preds, target=_single_target_extrinsic1.target ): - """Check that the metric funtional is symmetric.""" + """Check that the metric functional is symmetric.""" for p, t in zip(preds, target): assert torch.allclose( normalized_mutual_info_score(p, t, average_method), diff --git a/tests/unittests/clustering/test_rand_score.py b/tests/unittests/clustering/test_rand_score.py index 08df4ff5e5e..dc499ce4d8b 100644 --- a/tests/unittests/clustering/test_rand_score.py +++ b/tests/unittests/clustering/test_rand_score.py @@ -67,6 +67,6 @@ def test_rand_score_functional_raises_invalid_task(): def test_rand_score_functional_is_symmetric( preds=_single_target_extrinsic1.preds, target=_single_target_extrinsic1.target ): - """Check that the metric funtional is symmetric.""" + """Check that the metric functional is symmetric.""" for p, t in zip(preds, target): assert torch.allclose(rand_score(p, t), rand_score(t, p)) diff --git a/tests/unittests/helpers/testers.py b/tests/unittests/helpers/testers.py index 3740e7bf335..ff315df6ec1 100644 --- a/tests/unittests/helpers/testers.py +++ b/tests/unittests/helpers/testers.py @@ -320,9 +320,9 @@ def _assert_dtype_support( class MetricTester: """Test class for all metrics. - Class used for efficiently run alot of parametrized tests in ddp mode. Makes sure that ddp is only setup once and + Class used for efficiently run a lot of parametrized tests in DDP mode. Makes sure that DDP is only setup once and that pool of processes are used for all tests. All tests should subclass from this and implement a new method called - `test_metric_name` where the method `self.run_metric_test` is called inside. + ``test_metric_name`` where the method ``self.run_metric_test`` is called inside. """ diff --git a/tests/unittests/image/test_perceptual_path_length.py b/tests/unittests/image/test_perceptual_path_length.py index 1f16a8c71f2..dd08bac4b22 100644 --- a/tests/unittests/image/test_perceptual_path_length.py +++ b/tests/unittests/image/test_perceptual_path_length.py @@ -45,7 +45,7 @@ def test_interpolation_methods(interpolation_method): @pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch_fidelity") @skip_on_running_out_of_memory() def test_sim_net(): - """Check that the similiarity network is the same as the one used in torch_fidelity.""" + """Check that the similarity network is the same as the one used in torch_fidelity.""" compare = SampleSimilarityLPIPS("sample_similarity", resize=64) simnet = _LPIPS(net="vgg", resize=64) diff --git a/tests/unittests/image/test_rase.py b/tests/unittests/image/test_rase.py index 74d7da23b05..4a9b696e3a5 100644 --- a/tests/unittests/image/test_rase.py +++ b/tests/unittests/image/test_rase.py @@ -42,7 +42,7 @@ def _sewar_rase(preds, target, window_size): """Baseline implementation of metric. - This custom implementation is nessesary since sewar only supports single image and aggregation therefore needs + This custom implementation is necessary since sewar only supports single image and aggregation therefore needs adjustments. """ diff --git a/tests/unittests/multimodal/test_clip_iqa.py b/tests/unittests/multimodal/test_clip_iqa.py index 9af3d169450..403aef0cd96 100644 --- a/tests/unittests/multimodal/test_clip_iqa.py +++ b/tests/unittests/multimodal/test_clip_iqa.py @@ -166,7 +166,7 @@ def test_other_models(model): ("new",), ("warm",), ("real",), - ("beutiful",), + ("beautiful",), ("lonely",), ("relaxing",), # some random combinations @@ -203,7 +203,7 @@ def test_prompt(prompts): @pytest.mark.skipif(not _PIQ_GREATER_EQUAL_0_8, reason="test requires piq>=0.8") @pytest.mark.skipif(not _TRANSFORMERS_GREATER_EQUAL_4_10, reason="test requires transformers>=4.10") def test_plot_method(): - """Test the plot method of CLIPScore seperately in this file due to the skipping conditions.""" + """Test the plot method of CLIPScore separately in this file due to the skipping conditions.""" metric = CLIPImageQualityAssessment() metric.update(torch.rand(1, 3, 256, 256)) fig, ax = metric.plot() diff --git a/tests/unittests/multimodal/test_clip_score.py b/tests/unittests/multimodal/test_clip_score.py index 3413187905e..2cb65ee5a58 100644 --- a/tests/unittests/multimodal/test_clip_score.py +++ b/tests/unittests/multimodal/test_clip_score.py @@ -120,7 +120,7 @@ def test_error_on_wrong_image_format(self, inputs, model_name_or_path): @skip_on_connection_issues() def test_plot_method(self, inputs, model_name_or_path): - """Test the plot method of CLIPScore seperately in this file due to the skipping conditions.""" + """Test the plot method of CLIPScore separately in this file due to the skipping conditions.""" metric = CLIPScore(model_name_or_path=model_name_or_path) preds, target = inputs metric.update(preds[0], target[0]) diff --git a/tests/unittests/regression/test_rse.py b/tests/unittests/regression/test_rse.py index 5e5ca523dcd..da943c51fbe 100644 --- a/tests/unittests/regression/test_rse.py +++ b/tests/unittests/regression/test_rse.py @@ -108,7 +108,7 @@ def test_rse_differentiability(self, squared, preds, target, ref_metric, num_out metric_args={"squared": squared}, ) - @pytest.mark.xfail(raises=RuntimeError, reason="clamp_min_cpu not implented for `Half`.") + @pytest.mark.xfail(raises=RuntimeError, reason="clamp_min_cpu not implemented for `Half`.") def test_rse_half_cpu(self, squared, preds, target, ref_metric, num_outputs): """Test dtype support of the metric on CPU.""" self.run_precision_test_cpu( diff --git a/tests/unittests/text/helpers.py b/tests/unittests/text/helpers.py index 0a78fd2a367..cebad163413 100644 --- a/tests/unittests/text/helpers.py +++ b/tests/unittests/text/helpers.py @@ -91,7 +91,7 @@ def _class_test( check_scriptable: bool indicating if metric should also be tested if it can be scripted key: The key passed onto the `_assert_allclose` to compare the respective metric from the Dict output against the ref_metric. - ignore_order: Ignore order of prediction accross processes when DDP is used. + ignore_order: Ignore order of prediction across processes when DDP is used. kwargs_update: Additional keyword arguments that will be passed with preds and targets when running update on the metric. @@ -99,7 +99,7 @@ def _class_test( if not metric_args: metric_args = {} - # Instanciate metric + # Instantiate metric metric = metric_class(dist_sync_on_step=dist_sync_on_step, **metric_args) # check that the metric is scriptable @@ -257,7 +257,7 @@ def _assert_half_support( class TextTester(MetricTester): """Tester class for text. - Class used for efficiently run alot of parametrized tests in ddp mode. Makes sure that ddp is only setup once and + Class used for efficiently run a lot of parametrized tests in ddp mode. Makes sure that ddp is only setup once and that pool of processes are used for all tests. All tests for text metrics should subclass from this and implement a new method called `test_metric_name` where the method `self.run_metric_test` is called inside. @@ -340,7 +340,7 @@ def run_class_metric_test( check_scriptable: bool indicating if metric should also be tested if it can be scripted key: The key passed onto the `_assert_allclose` to compare the respective metric from the Dict output against the ref_metric. - ignore_order: Ignore order of prediction accross processes when DDP is used. + ignore_order: Ignore order of prediction across processes when DDP is used. kwargs_update: Additional keyword arguments that will be passed with preds and targets when running update on the metric. diff --git a/tests/unittests/wrappers/test_bootstrapping.py b/tests/unittests/wrappers/test_bootstrapping.py index 2bbb4da8592..b02b5034c75 100644 --- a/tests/unittests/wrappers/test_bootstrapping.py +++ b/tests/unittests/wrappers/test_bootstrapping.py @@ -37,7 +37,7 @@ class TestBootStrapper(BootStrapper): """Subclass of Bootstrapper class. For testing purpose, we subclass the bootstrapper class so we can get the exact permutation the class is creating. - This is nessesary such that the reference we are comparing to returns the exact same result for a given permutation. + This is necessary such that the reference we are comparing to returns the exact same result for a given permutation. """ @@ -77,7 +77,7 @@ def test_bootstrap_sampler(sampling_strategy): assert found_one, "resampling did not work because no samples were sampled twice" found_zero = _sample_checker(old_samples, new_samples, operator.ne, 0) - assert found_zero, "resampling did not work because all samples were atleast sampled once" + assert found_zero, "resampling did not work because all samples were at least sampled once" @pytest.mark.parametrize("device", ["cpu", "cuda"]) diff --git a/tests/unittests/wrappers/test_multitask.py b/tests/unittests/wrappers/test_multitask.py index 90d33dcdb5c..f2d1fd7f6f2 100644 --- a/tests/unittests/wrappers/test_multitask.py +++ b/tests/unittests/wrappers/test_multitask.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# this is just a bypass for this module name collision with build-in one +# this is just a bypass for this module name collision with built-in one import re import pytest