Skip to content

Commit

Permalink
fix typo in specificity_at_sensitivity (#2199)
Browse files Browse the repository at this point in the history
Co-authored-by: Jirka Borovec <[email protected]>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Nicki Skafte Detlefsen <[email protected]>
  • Loading branch information
4 people authored Nov 29, 2023
1 parent b07b2a5 commit b7f74c2
Show file tree
Hide file tree
Showing 4 changed files with 60 additions and 21 deletions.
5 changes: 4 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Changed minimum supported Pytorch version from 1.8 to 1.10 ([#2145](https://github.com/Lightning-AI/torchmetrics/pull/2145))


- Changed x-/y-axis order for `PrecisionRecallCurve` to be consistent with scikit-learn ([#2183](https://github.com/Lightning-AI/torchmetrics/pull/2183))


- Use arange and repeat for deterministic bincount ([#2184](https://github.com/Lightning-AI/torchmetrics/pull/2184))


Expand All @@ -46,7 +49,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Deprecated `metric._update_called` ([#2141](https://github.com/Lightning-AI/torchmetrics/pull/2141))


- Changed x-/y-axis order for `PrecisionRecallCurve` to be consistent with scikit-learn ([#2183](https://github.com/Lightning-AI/torchmetrics/pull/2183))
- Deprecated `specicity_at_sensitivity` in favour of `specificity_at_sensitivity` ([#2199](https://github.com/Lightning-AI/torchmetrics/pull/2199))


### Removed
Expand Down
31 changes: 14 additions & 17 deletions src/torchmetrics/classification/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,18 +117,6 @@
)

__all__ = [
"BinaryConfusionMatrix",
"ConfusionMatrix",
"MulticlassConfusionMatrix",
"MultilabelConfusionMatrix",
"PrecisionRecallCurve",
"BinaryPrecisionRecallCurve",
"MulticlassPrecisionRecallCurve",
"MultilabelPrecisionRecallCurve",
"BinaryStatScores",
"MulticlassStatScores",
"MultilabelStatScores",
"StatScores",
"Accuracy",
"BinaryAccuracy",
"MulticlassAccuracy",
Expand All @@ -147,6 +135,10 @@
"BinaryCohenKappa",
"CohenKappa",
"MulticlassCohenKappa",
"BinaryConfusionMatrix",
"ConfusionMatrix",
"MulticlassConfusionMatrix",
"MultilabelConfusionMatrix",
"Dice",
"ExactMatch",
"MulticlassExactMatch",
Expand Down Expand Up @@ -184,29 +176,34 @@
"MultilabelRecall",
"Precision",
"Recall",
"BinaryPrecisionRecallCurve",
"MulticlassPrecisionRecallCurve",
"MultilabelPrecisionRecallCurve",
"PrecisionRecallCurve",
"MultilabelCoverageError",
"MultilabelRankingAveragePrecision",
"MultilabelRankingLoss",
"RecallAtFixedPrecision",
"BinaryRecallAtFixedPrecision",
"MulticlassRecallAtFixedPrecision",
"MultilabelRecallAtFixedPrecision",
"ROC",
"BinaryROC",
"MulticlassROC",
"MultilabelROC",
"ROC",
"BinarySpecificity",
"MulticlassSpecificity",
"MultilabelSpecificity",
"Specificity",
"BinarySpecificityAtSensitivity",
"MulticlassSpecificityAtSensitivity",
"MultilabelSpecificityAtSensitivity",
"BinaryPrecisionAtFixedRecall",
"SpecificityAtSensitivity",
"MulticlassPrecisionAtFixedRecall",
"MultilabelPrecisionAtFixedRecall",
"BinaryStatScores",
"MulticlassStatScores",
"MultilabelStatScores",
"StatScores",
"PrecisionAtFixedRecall",
"RecallAtFixedPrecision",
"BinaryPrecisionAtFixedRecall",
"MulticlassPrecisionAtFixedRecall",
"MultilabelPrecisionAtFixedRecall",
Expand Down
6 changes: 4 additions & 2 deletions src/torchmetrics/functional/classification/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,7 @@
multiclass_specificity_at_sensitivity,
multilabel_specificity_at_sensitivity,
specicity_at_sensitivity,
specificity_at_sensitivity,
)
from torchmetrics.functional.classification.stat_scores import (
binary_stat_scores,
Expand Down Expand Up @@ -165,8 +166,6 @@
"multilabel_fbeta_score",
"binary_fairness",
"binary_groups_stat_rates",
"demographic_parity",
"equal_opportunity",
"binary_hamming_distance",
"hamming_distance",
"multiclass_hamming_distance",
Expand Down Expand Up @@ -212,11 +211,14 @@
"multiclass_specificity_at_sensitivity",
"multilabel_specificity_at_sensitivity",
"specicity_at_sensitivity",
"specificity_at_sensitivity",
"binary_stat_scores",
"multiclass_stat_scores",
"multilabel_stat_scores",
"stat_scores",
"binary_precision_at_fixed_recall",
"multilabel_precision_at_fixed_recall",
"multiclass_precision_at_fixed_recall",
"demographic_parity",
"equal_opportunity",
]
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List, Optional, Tuple, Union

import torch
Expand Down Expand Up @@ -414,7 +415,43 @@ def specicity_at_sensitivity(
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Union[Tensor, Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
r"""Compute the highest possible specicity value given the minimum sensitivity thresholds provided.
r"""Compute the highest possible specificity value given the minimum sensitivity thresholds provided.
.. warning::
This function was deprecated in v1.3.0 of Torchmetrics and will be removed in v2.0.0.
Use `specificity_at_sensitivity` instead.
"""
warnings.warn(
"This method has will be removed in 2.0.0. Use `specificity_at_sensitivity` instead.",
DeprecationWarning,
stacklevel=1,
)
return specificity_at_sensitivity(
preds=preds,
target=target,
task=task,
min_sensitivity=min_sensitivity,
thresholds=thresholds,
num_classes=num_classes,
num_labels=num_labels,
ignore_index=ignore_index,
validate_args=validate_args,
)


def specificity_at_sensitivity(
preds: Tensor,
target: Tensor,
task: Literal["binary", "multiclass", "multilabel"],
min_sensitivity: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Union[Tensor, Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
r"""Compute the highest possible specificity value given the minimum sensitivity thresholds provided.
This is done by first calculating the Receiver Operating Characteristic (ROC) curve for different thresholds and
the find the specificity for a given sensitivity level.
Expand Down

0 comments on commit b7f74c2

Please sign in to comment.