Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

New Metric: Dunn Index #2049

Merged
merged 26 commits into from
Sep 6, 2023
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
84d93d7
functional and metric class initial commit
matsumotosan Sep 1, 2023
edb4b22
docs initial commit
matsumotosan Sep 1, 2023
38daa94
euclidean functional passing
matsumotosan Sep 3, 2023
5884de7
Merge branch 'master' into 2003-dunn-index
matsumotosan Sep 3, 2023
23d2e27
Apply suggestions from code review
matsumotosan Sep 4, 2023
b2188cb
Create inputs.py for clustering tests (#2045)
matsumotosan Sep 4, 2023
954f12f
euclidean functional passing
matsumotosan Sep 4, 2023
d00b526
fix docstring examples
matsumotosan Sep 4, 2023
b62a8b2
New metric: Calinski Harabasz Score (#2036)
matsumotosan Sep 4, 2023
979b8a6
fix inputs to calinski harabasz
matsumotosan Sep 4, 2023
c1f701a
Merge branch 'master' into 2003-dunn-index
matsumotosan Sep 4, 2023
83f2f91
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Sep 4, 2023
74b9141
Merge branch 'master' into 2003-dunn-index
matsumotosan Sep 5, 2023
79aa596
Merge branch 'master' into 2003-dunn-index
matsumotosan Sep 5, 2023
7813a2d
add to docs
SkafteNicki Sep 6, 2023
9d31052
add plot testing
SkafteNicki Sep 6, 2023
9b31de0
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Sep 6, 2023
90bb263
replace vector_norm with norm
SkafteNicki Sep 6, 2023
d9a2b83
fix doc reference
SkafteNicki Sep 6, 2023
19ae17d
Merge branch 'master' into 2003-dunn-index
SkafteNicki Sep 6, 2023
9b5bd37
Update src/torchmetrics/functional/clustering/utils.py
matsumotosan Sep 6, 2023
655ff74
Update tests/unittests/clustering/test_dunn_index.py
matsumotosan Sep 6, 2023
d43f709
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Sep 6, 2023
c8eb436
switch ord to p. ord is python builtin
matsumotosan Sep 6, 2023
3d05c44
Merge branch 'master' into 2003-dunn-index
mergify[bot] Sep 6, 2023
43f25f2
Merge branch 'master' into 2003-dunn-index
mergify[bot] Sep 6, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Added `NormalizedMutualInfoScore` metric to cluster package ([#2029](https://github.com/Lightning-AI/torchmetrics/pull/2029)


- Added `DunnIndex` metric to cluster package ([#200]())
matsumotosan marked this conversation as resolved.
Show resolved Hide resolved

### Changed

-
Expand Down
21 changes: 21 additions & 0 deletions docs/source/clustering/dunn_index.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
.. customcarditem::
:header: Dunn Index
:image: https://pl-flash-data.s3.amazonaws.com/assets/thumbnails/clustering.svg
matsumotosan marked this conversation as resolved.
Show resolved Hide resolved
:tags: Clustering

.. include:: ../links.rst

##########
Dunn Index
##########

Module Interface
________________

.. autoclass:: torchmetrics.clustering.DunnIndex
:exclude-members: update, compute

Functional Interface
____________________

.. autofunction:: torchmetrics.functional.clustering.dunn_index
1 change: 1 addition & 0 deletions docs/source/links.rst
Original file line number Diff line number Diff line change
Expand Up @@ -154,3 +154,4 @@
.. _Normalized Mutual Information Score: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.normalized_mutual_info_score.html
.. _pycocotools: https://github.com/cocodataset/cocoapi/tree/master/PythonAPI/pycocotools
.. _Rand Score: https://link.springer.com/article/10.1007/BF01908075
.. _Dunn Index: https://en.wikipedia.org/wiki/Dunn_index
2 changes: 2 additions & 0 deletions src/torchmetrics/clustering/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,13 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.clustering.dunn_index import DunnIndex
from torchmetrics.clustering.mutual_info_score import MutualInfoScore
from torchmetrics.clustering.normalized_mutual_info_score import NormalizedMutualInfoScore
from torchmetrics.clustering.rand_score import RandScore

__all__ = [
"DunnIndex",
"MutualInfoScore",
"NormalizedMutualInfoScore",
"RandScore",
Expand Down
120 changes: 120 additions & 0 deletions src/torchmetrics/clustering/dunn_index.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Union

from torch import Tensor

from torchmetrics.functional.clustering.dunn_index import dunn_index
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE

if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["DunnIndex.plot"]


class DunnIndex(Metric):
r"""Compute `Dunn Index`_.

.. math::
DI_m = \frac{\min_{1\leq i<j\leq m} \delta(C_i,C_j)}{\max_{1\leq k\leq m} \Delta_k}

Where :math:`C_i` is a cluster of tensors, :math:`C_j` is a cluster of tensors,
and :math:`\delta(C_i,C_j)` is the intercluster distance metric for :math:`m` clusters.

As input to ``forward`` and ``update`` the metric accepts the following input:

- ``preds`` (:class:`~torch.Tensor`): single integer tensor with shape ``(N,)`` with predicted cluster labels
- ``target`` (:class:`~torch.Tensor`): single integer tensor with shape ``(N,)`` with ground truth cluster labels
matsumotosan marked this conversation as resolved.
Show resolved Hide resolved

As output of ``forward`` and ``compute`` the metric returns the following output:

- ``dunn_index`` (:class:`~torch.Tensor`): A tensor with the Dunn Index

Args:
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.

Example:
>>> import torch
>>> from torchmetrics.clustering import DunnIndex
>>> preds = torch.tensor([2, 1, 0, 1, 0])
>>> target = torch.tensor([0, 2, 1, 1, 0])
>>> dun_index = DunnIndex()
>>> dunn_index(preds, target)
tensor(0.5004)

"""

is_differentiable: bool = True
higher_is_better: bool = True
full_state_update: bool = True
plot_lower_bound: float = 0.0
x: List[Tensor]
labels: List[Tensor]
contingency: Tensor

def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)

self.add_state("x", default=[], dist_reduce_fx="cat")
self.add_state("labels", default=[], dist_reduce_fx="cat")

def update(self, x: Tensor, labels: Tensor) -> None:
"""Update state with predictions and targets."""
self.x.append(x)
self.labels.append(labels)

def compute(self) -> Tensor:
"""Compute mutual information over state."""
return dunn_index(dim_zero_cat(self.x), dim_zero_cat(self.labels))
matsumotosan marked this conversation as resolved.
Show resolved Hide resolved

def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.

Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis

Returns:
Figure and Axes object

Raises:
ModuleNotFoundError:
If `matplotlib` is not installed

.. plot::
:scale: 75

>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.clustering import DunnIndex
>>> metric = DunnIndex()
>>> metric.update(torch.randint(0, 4, (10,)), torch.randint(0, 4, (10,)))
>>> fig_, ax_ = metric.plot(metric.compute())

.. plot::
:scale: 75

>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.clustering import DunnIndex
>>> metric = DunnIndex()
>>> for _ in range(10):
... metric.update(torch.randint(0, 4, (10,)), torch.randint(0, 4, (10,)))
>>> fig_, ax_ = metric.plot(metric.compute())

"""
return self._plot(val, ax)
2 changes: 2 additions & 0 deletions src/torchmetrics/functional/clustering/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,13 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.functional.clustering.dunn_index import dunn_index
from torchmetrics.functional.clustering.mutual_info_score import mutual_info_score
from torchmetrics.functional.clustering.normalized_mutual_info_score import normalized_mutual_info_score
from torchmetrics.functional.clustering.rand_score import rand_score

__all__ = [
"dunn_index",
"mutual_info_score",
"normalized_mutual_info_score",
"rand_score",
Expand Down
83 changes: 83 additions & 0 deletions src/torchmetrics/functional/clustering/dunn_index.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import combinations
from typing import Tuple

import torch
from torch import Tensor


def _dunn_index_update(x: Tensor, labels: Tensor, p: float) -> Tuple[Tensor, Tensor]:
"""Update and return variables required to compute the Dunn index.

Args:
x: feature vectors of shape (n_samples, n_features)
labels: cluster labels
p: p-norm (distance metric)

Returns:
intercluster_distance: intercluster distances
max_intracluster_distance: max intracluster distances

"""
unique_labels, inverse_indices = labels.unique(return_inverse=True)
clusters = [x[inverse_indices == label_idx] for label_idx in range(len(unique_labels))]
centroids = [c.mean(dim=0) for c in clusters]

intercluster_distance = torch.linalg.vector_norm(
torch.stack([a - b for a, b in combinations(centroids, 2)], dim=0), ord=p, dim=1
)

max_intracluster_distance = torch.stack(
[torch.linalg.vector_norm(ci - mu, ord=p, dim=1).max() for ci, mu in zip(clusters, centroids)]
)

return intercluster_distance, max_intracluster_distance


def _dunn_index_compute(intercluster_distance: Tensor, max_intracluster_distance: Tensor) -> Tensor:
"""Compute the Dunn index based on updated state.

Args:
intercluster_distance: intercluster distances
max_intracluster_distance: max intracluster distances

Returns:
dunn_index: Dunn index

"""
return intercluster_distance.min() / max_intracluster_distance.max()


def dunn_index(x: Tensor, labels: Tensor, p: float = 2) -> Tensor:
matsumotosan marked this conversation as resolved.
Show resolved Hide resolved
"""Compute the Dunn index.

Args:
x: feature vectors
matsumotosan marked this conversation as resolved.
Show resolved Hide resolved
labels: cluster labels
p: p-norm used for distance metric

Returns:
dunn_index: Dunn index

Example:
>>> from torchmetrics.functional.clustering import dunn_index
>>> x = torch.tensor([0, 3, 2, 2, 1])
>>> labels = torch.tensor([1, 3, 2, 0, 1])
>>> dunn_index(preds, target)
tensor(1.0)

"""
pairwise_distance, max_distance = _dunn_index_update(x, labels, p)
matsumotosan marked this conversation as resolved.
Show resolved Hide resolved
return _dunn_index_compute(pairwise_distance, max_distance)
26 changes: 14 additions & 12 deletions src/torchmetrics/functional/clustering/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,18 @@ def calculate_contingency_matrix(
return contingency


def _is_real_discrete_label(x: Tensor) -> bool:
"""Check if tensor of labels is real and discrete.

Args:
x: tensor

"""
matsumotosan marked this conversation as resolved.
Show resolved Hide resolved
if x.ndim != 1:
raise ValueError(f"Expected arguments to be 1-d tensors but got {x.ndim}-d tensors.")
return not (torch.is_floating_point(x) or torch.is_complex(x))


def check_cluster_labels(preds: Tensor, target: Tensor) -> None:
"""Check shape of input tensors and if they are real, discrete tensors.

Expand All @@ -160,18 +172,8 @@ def check_cluster_labels(preds: Tensor, target: Tensor) -> None:

"""
_check_same_shape(preds, target)
if preds.ndim != 1:
raise ValueError(f"Expected arguments to be 1d tensors but got {preds.ndim} and {target.ndim}")
if (
torch.is_floating_point(preds)
or torch.is_complex(preds)
or torch.is_floating_point(target)
or torch.is_complex(target)
):
raise ValueError(
f"Expected real, discrete values but received {preds.dtype} for"
f"predictions and {target.dtype} for target labels instead."
)
if not (_is_real_discrete_label(preds) and _is_real_discrete_label(target)):
raise ValueError(f"Expected real, discrete values for x but received {preds.dtype} and {target.dtype}.")


def calcualte_pair_cluster_confusion_matrix(
Expand Down
26 changes: 26 additions & 0 deletions tests/unittests/clustering/inputs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
from collections import namedtuple

import torch
from sklearn.datasets import make_blobs

Input = namedtuple("Input", ["x", "labels"])

NUM_BATCHES = 4
NUM_SAMPLES = 50
NUM_FEATURES = 2
NUM_CLASSES = 3


def _batch_blobs(num_batches, num_samples, num_features, num_classes):
x = []
labels = []

for _ in range(num_batches):
_x, _labels = make_blobs(num_samples, num_features, centers=num_classes)
x.append(torch.tensor(_x))
labels.append(torch.tensor(_labels))

return Input(x=torch.stack(x), labels=torch.stack(labels))


_input_blobs = _batch_blobs(NUM_BATCHES, NUM_SAMPLES, NUM_FEATURES, NUM_CLASSES)
Loading
Loading