From 750a6ca16b64589c9895c3848c110b4a03ea7d92 Mon Sep 17 00:00:00 2001 From: Marc Bresson Date: Wed, 23 Aug 2023 19:57:52 +0200 Subject: [PATCH 01/23] refactor: remove outdated code and issue a warning if two tensors are on separate devices. --- ignite/metrics/ssim.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py index 03ff1ce1716..5474c039c73 100644 --- a/ignite/metrics/ssim.py +++ b/ignite/metrics/ssim.py @@ -1,3 +1,4 @@ +import warnings from typing import Callable, Sequence, Union import torch @@ -158,8 +159,13 @@ def update(self, output: Sequence[torch.Tensor]) -> None: ) channel = y_pred.size(1) - if len(self._kernel.shape) < 4: - self._kernel = self._kernel.expand(channel, 1, -1, -1).to(device=y_pred.device) + + if y_pred.device != self._device: + warnings.warn( + "The metric's device should be the same than your update tensors. See SSIM() device argument.", + RuntimeWarning, + ) + self._kernel = self._kernel.expand(channel, 1, -1, -1).to(device=y_pred.device) y_pred = F.pad(y_pred, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode="reflect") y = F.pad(y, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode="reflect") From 78a4c780661082fdcb480767cdfc47b41992bc37 Mon Sep 17 00:00:00 2001 From: Marc Bresson Date: Thu, 24 Aug 2023 09:33:55 +0200 Subject: [PATCH 02/23] feat: prioritize computation on GPU devices over CPUs If either one of the metric device or the update input device is a GPU, this commit will put the other one on GPU. --- ignite/metrics/ssim.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py index 5474c039c73..cf46fa836b6 100644 --- a/ignite/metrics/ssim.py +++ b/ignite/metrics/ssim.py @@ -160,12 +160,20 @@ def update(self, output: Sequence[torch.Tensor]) -> None: channel = y_pred.size(1) + self._kernel = self._kernel.expand(channel, 1, -1, -1) + if y_pred.device != self._device: warnings.warn( "The metric's device should be the same than your update tensors. See SSIM() device argument.", RuntimeWarning, ) - self._kernel = self._kernel.expand(channel, 1, -1, -1).to(device=y_pred.device) + + if self._device == torch.device("cpu"): + self._kernel = self._kernel.to(device=y_pred.device) + + if y_pred == torch.device("cpu"): + y_pred = y_pred.to(device=self._device) + y = y.to(device=self._device) y_pred = F.pad(y_pred, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode="reflect") y = F.pad(y, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode="reflect") From 85eebd572a2e1e44c489befa50ee647ef4efa333 Mon Sep 17 00:00:00 2001 From: Marc Bresson Date: Thu, 24 Aug 2023 11:43:30 +0200 Subject: [PATCH 03/23] fix: use a temp var that will be moved with y_pred The comparison with self._device was not possible because it can be created with `torch.device("cuda")` which is not equal to `torch.device("cuda:0")` which is the device of a tensor created with `torch.device("cuda")`. This change will have a bigger performance hit when self._kernel is not on the same device as y_pred as it will need to be moved onto y_pred's device every time update() is called. --- ignite/metrics/ssim.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py index cf46fa836b6..9914062add4 100644 --- a/ignite/metrics/ssim.py +++ b/ignite/metrics/ssim.py @@ -161,28 +161,29 @@ def update(self, output: Sequence[torch.Tensor]) -> None: channel = y_pred.size(1) self._kernel = self._kernel.expand(channel, 1, -1, -1) + kernel = self._kernel - if y_pred.device != self._device: + if y_pred.device != self._kernel.device: warnings.warn( "The metric's device should be the same than your update tensors. See SSIM() device argument.", RuntimeWarning, ) - if self._device == torch.device("cpu"): - self._kernel = self._kernel.to(device=y_pred.device) + if self._kernel.device == torch.device("cpu"): + kernel = kernel.to(device=y_pred.device) - if y_pred == torch.device("cpu"): - y_pred = y_pred.to(device=self._device) - y = y.to(device=self._device) + if y_pred.device == torch.device("cpu"): + y_pred = y_pred.to(device=self._kernel.device) + y = y.to(device=self._kernel.device) y_pred = F.pad(y_pred, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode="reflect") y = F.pad(y, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode="reflect") - if y_pred.dtype != self._kernel.dtype: - self._kernel = self._kernel.to(dtype=y_pred.dtype) + if y_pred.dtype != kernel.dtype: + kernel = kernel.to(dtype=y_pred.dtype) input_list = [y_pred, y, y_pred * y_pred, y * y, y_pred * y] - outputs = F.conv2d(torch.cat(input_list), self._kernel, groups=channel) + outputs = F.conv2d(torch.cat(input_list), kernel, groups=channel) batch_size = y_pred.size(0) output_list = [outputs[x * batch_size : (x + 1) * batch_size] for x in range(len(input_list))] From 9125e602d2dfac38e19f5106e54b30f6d9d72b7a Mon Sep 17 00:00:00 2001 From: Marc Bresson Date: Thu, 24 Aug 2023 11:46:55 +0200 Subject: [PATCH 04/23] test: add metric and y_pred with different devices test --- tests/ignite/metrics/test_ssim.py | 34 +++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/tests/ignite/metrics/test_ssim.py b/tests/ignite/metrics/test_ssim.py index 45ddccbd1c6..b28d929496c 100644 --- a/tests/ignite/metrics/test_ssim.py +++ b/tests/ignite/metrics/test_ssim.py @@ -1,3 +1,5 @@ +import warnings + import numpy as np import pytest import torch @@ -102,6 +104,38 @@ def test_ssim( assert np.allclose(ignite_ssim, skimg_ssim, atol=precision) +@pytest.mark.parametrize( + "metric_device, y_pred_device", + [ + [torch.device("cpu"), torch.device("cpu")], + [torch.device("cpu"), torch.device("cuda")], + [torch.device("cuda"), torch.device("cpu")], + [torch.device("cuda"), torch.device("cuda")], + ], +) +def test_ssim_device(available_device, metric_device, y_pred_device): + if available_device == "cpu": + pytest.skip("This test requires a cuda device.") + + data_range = 1.0 + sigma = 1.5 + shape = (12, 5, 256, 256) + + ssim = SSIM(data_range=data_range, sigma=sigma, device=metric_device) + + y_pred = torch.rand(shape, device=y_pred_device) + y = y_pred * 0.8 + + with pytest.warns() as record_warning: + warnings.warn("Avoid pytest DID NOT WARN failure.") + ssim.update((y_pred, y)) + + if metric_device == y_pred_device: + assert len(record_warning) == 1 + else: + assert len(record_warning) == 2 + + def test_ssim_variable_batchsize(available_device): # Checks https://github.com/pytorch/ignite/issues/2532 sigma = 1.5 From a4c2f7cc84d0c7b58e34565ee350bfdfd6e62eed Mon Sep 17 00:00:00 2001 From: Marc Bresson Date: Thu, 24 Aug 2023 14:20:09 +0200 Subject: [PATCH 05/23] feat: move self._kernel directly and issue a warning only when not all y_pred tensors are on the same device --- ignite/metrics/ssim.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py index 9914062add4..6d28dfac79c 100644 --- a/ignite/metrics/ssim.py +++ b/ignite/metrics/ssim.py @@ -161,29 +161,27 @@ def update(self, output: Sequence[torch.Tensor]) -> None: channel = y_pred.size(1) self._kernel = self._kernel.expand(channel, 1, -1, -1) - kernel = self._kernel if y_pred.device != self._kernel.device: - warnings.warn( - "The metric's device should be the same than your update tensors. See SSIM() device argument.", - RuntimeWarning, - ) - if self._kernel.device == torch.device("cpu"): - kernel = kernel.to(device=y_pred.device) + self._kernel = self._kernel.to(device=y_pred.device) if y_pred.device == torch.device("cpu"): + warnings.warn( + "The metric device or one of the previous update tensor was set on another device than this update tensor, which is on CPU. To avoid having a performance hit, ensure that your metric device and all of your update tensors are on the same device.", + RuntimeWarning, + ) y_pred = y_pred.to(device=self._kernel.device) y = y.to(device=self._kernel.device) y_pred = F.pad(y_pred, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode="reflect") y = F.pad(y, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode="reflect") - if y_pred.dtype != kernel.dtype: - kernel = kernel.to(dtype=y_pred.dtype) + if y_pred.dtype != self._kernel.dtype: + self._kernel = self._kernel.to(dtype=y_pred.dtype) input_list = [y_pred, y, y_pred * y_pred, y * y, y_pred * y] - outputs = F.conv2d(torch.cat(input_list), kernel, groups=channel) + outputs = F.conv2d(torch.cat(input_list), self._kernel, groups=channel) batch_size = y_pred.size(0) output_list = [outputs[x * batch_size : (x + 1) * batch_size] for x in range(len(input_list))] From 1908fffb696a8e9c292e4c161addba3326122c99 Mon Sep 17 00:00:00 2001 From: Marc Bresson Date: Thu, 24 Aug 2023 14:20:39 +0200 Subject: [PATCH 06/23] feat: adapt test to new behaviour --- tests/ignite/metrics/test_ssim.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/tests/ignite/metrics/test_ssim.py b/tests/ignite/metrics/test_ssim.py index b28d929496c..c51cc850627 100644 --- a/tests/ignite/metrics/test_ssim.py +++ b/tests/ignite/metrics/test_ssim.py @@ -126,14 +126,19 @@ def test_ssim_device(available_device, metric_device, y_pred_device): y_pred = torch.rand(shape, device=y_pred_device) y = y_pred * 0.8 - with pytest.warns() as record_warning: - warnings.warn("Avoid pytest DID NOT WARN failure.") + if metric_device == torch.device("cuda") and y_pred_device == torch.device("cpu"): + with pytest.warns(RuntimeWarning): + ssim.update((y_pred, y)) + else: ssim.update((y_pred, y)) - if metric_device == y_pred_device: - assert len(record_warning) == 1 + if metric_device == torch.device("cuda") or y_pred_device == torch.device("cuda"): + # A tensor will always have the device index set + excepted_device = torch.device("cuda:0") else: - assert len(record_warning) == 2 + excepted_device = torch.device("cpu") + + assert ssim._kernel.device == excepted_device def test_ssim_variable_batchsize(available_device): From 2547e70a33124c6ea6881f9d5e840f47f9ab11ba Mon Sep 17 00:00:00 2001 From: Marc Bresson Date: Thu, 24 Aug 2023 14:22:51 +0200 Subject: [PATCH 07/23] feat: keep the accumulation on the same device as self._kernel --- ignite/metrics/ssim.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py index 6d28dfac79c..a4f15953380 100644 --- a/ignite/metrics/ssim.py +++ b/ignite/metrics/ssim.py @@ -199,7 +199,7 @@ def update(self, output: Sequence[torch.Tensor]) -> None: b2 = sigma_pred_sq + sigma_target_sq + self.c2 ssim_idx = (a1 * a2) / (b1 * b2) - self._sum_of_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).sum().to(self._device) + self._sum_of_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).sum() self._num_examples += y.shape[0] From 326995590532666bcf2a10d2ddceff2a2ef2cb79 Mon Sep 17 00:00:00 2001 From: Marc Bresson Date: Thu, 24 Aug 2023 14:30:25 +0200 Subject: [PATCH 08/23] feat: move accumulation along side self._kernel --- ignite/metrics/ssim.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py index a4f15953380..141ac8146aa 100644 --- a/ignite/metrics/ssim.py +++ b/ignite/metrics/ssim.py @@ -165,6 +165,7 @@ def update(self, output: Sequence[torch.Tensor]) -> None: if y_pred.device != self._kernel.device: if self._kernel.device == torch.device("cpu"): self._kernel = self._kernel.to(device=y_pred.device) + self._sum_of_ssim = self._sum_of_ssim.to(device=y_pred.device) if y_pred.device == torch.device("cpu"): warnings.warn( From 04af09014626c2c98bcba4a44c46eabdd697ff63 Mon Sep 17 00:00:00 2001 From: Marc Bresson Date: Thu, 24 Aug 2023 15:11:53 +0200 Subject: [PATCH 09/23] feat: allow different channel number --- ignite/metrics/ssim.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py index 141ac8146aa..1cb9e447953 100644 --- a/ignite/metrics/ssim.py +++ b/ignite/metrics/ssim.py @@ -104,6 +104,7 @@ def __init__( self.pad_h = (self.kernel_size[0] - 1) // 2 self.pad_w = (self.kernel_size[1] - 1) // 2 self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma) + self._expanded_kernel = None @reinit__is_reduced def reset(self) -> None: @@ -158,14 +159,11 @@ def update(self, output: Sequence[torch.Tensor]) -> None: f"Expected y_pred and y to have BxCxHxW shape. Got y_pred: {y_pred.shape} and y: {y.shape}." ) - channel = y_pred.size(1) - - self._kernel = self._kernel.expand(channel, 1, -1, -1) - if y_pred.device != self._kernel.device: if self._kernel.device == torch.device("cpu"): self._kernel = self._kernel.to(device=y_pred.device) self._sum_of_ssim = self._sum_of_ssim.to(device=y_pred.device) + self._expanded_kernel = None if y_pred.device == torch.device("cpu"): warnings.warn( @@ -181,8 +179,12 @@ def update(self, output: Sequence[torch.Tensor]) -> None: if y_pred.dtype != self._kernel.dtype: self._kernel = self._kernel.to(dtype=y_pred.dtype) + channel = y_pred.size(1) + if self._expanded_kernel is None or self._expanded_kernel.shape[0] != channel: + self._expanded_kernel = self._kernel.expand(channel, 1, -1, -1) + input_list = [y_pred, y, y_pred * y_pred, y * y, y_pred * y] - outputs = F.conv2d(torch.cat(input_list), self._kernel, groups=channel) + outputs = F.conv2d(torch.cat(input_list), self._expanded_kernel, groups=channel) batch_size = y_pred.size(0) output_list = [outputs[x * batch_size : (x + 1) * batch_size] for x in range(len(input_list))] From 7922ec9e6719ba3901d3d5ec8fcdb8998a298814 Mon Sep 17 00:00:00 2001 From: Marc Bresson Date: Fri, 25 Aug 2023 09:19:03 +0200 Subject: [PATCH 10/23] style: format using the run_code_style script --- tests/ignite/metrics/test_ssim.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/ignite/metrics/test_ssim.py b/tests/ignite/metrics/test_ssim.py index c51cc850627..5802d12a2d8 100644 --- a/tests/ignite/metrics/test_ssim.py +++ b/tests/ignite/metrics/test_ssim.py @@ -1,5 +1,3 @@ -import warnings - import numpy as np import pytest import torch @@ -138,7 +136,7 @@ def test_ssim_device(available_device, metric_device, y_pred_device): else: excepted_device = torch.device("cpu") - assert ssim._kernel.device == excepted_device + assert ssim._kernel.device == excepted_device def test_ssim_variable_batchsize(available_device): From b0625e481a7bb00999676ed5d8e5b527f6ac2cff Mon Sep 17 00:00:00 2001 From: Marc Bresson Date: Fri, 25 Aug 2023 09:26:32 +0200 Subject: [PATCH 11/23] style: add line brak to conform to E501 --- ignite/metrics/ssim.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py index 1cb9e447953..8d0c353040b 100644 --- a/ignite/metrics/ssim.py +++ b/ignite/metrics/ssim.py @@ -167,7 +167,9 @@ def update(self, output: Sequence[torch.Tensor]) -> None: if y_pred.device == torch.device("cpu"): warnings.warn( - "The metric device or one of the previous update tensor was set on another device than this update tensor, which is on CPU. To avoid having a performance hit, ensure that your metric device and all of your update tensors are on the same device.", + "The metric device or one of the previous update tensor was set on another device than this " + "update tensor, which is on CPU. To avoid having a performance hit, ensure that your metric " + "device and all of your update tensors are on the same device.", RuntimeWarning, ) y_pred = y_pred.to(device=self._kernel.device) From 6817316b2e8fa20fe6f13284bd41394a8a6e6f04 Mon Sep 17 00:00:00 2001 From: Marc Bresson Date: Fri, 25 Aug 2023 09:54:00 +0200 Subject: [PATCH 12/23] fix: use torch.empty to avoid type incompatibility between None and Tensor with mypy --- ignite/metrics/ssim.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py index 8d0c353040b..01de4a39c5c 100644 --- a/ignite/metrics/ssim.py +++ b/ignite/metrics/ssim.py @@ -104,7 +104,7 @@ def __init__( self.pad_h = (self.kernel_size[0] - 1) // 2 self.pad_w = (self.kernel_size[1] - 1) // 2 self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma) - self._expanded_kernel = None + self._expanded_kernel = torch.empty(0) @reinit__is_reduced def reset(self) -> None: @@ -163,7 +163,7 @@ def update(self, output: Sequence[torch.Tensor]) -> None: if self._kernel.device == torch.device("cpu"): self._kernel = self._kernel.to(device=y_pred.device) self._sum_of_ssim = self._sum_of_ssim.to(device=y_pred.device) - self._expanded_kernel = None + self._expanded_kernel = torch.empty(0) if y_pred.device == torch.device("cpu"): warnings.warn( @@ -182,7 +182,7 @@ def update(self, output: Sequence[torch.Tensor]) -> None: self._kernel = self._kernel.to(dtype=y_pred.dtype) channel = y_pred.size(1) - if self._expanded_kernel is None or self._expanded_kernel.shape[0] != channel: + if not self._expanded_kernel.numel() or self._expanded_kernel.shape[0] != channel: self._expanded_kernel = self._kernel.expand(channel, 1, -1, -1) input_list = [y_pred, y, y_pred * y_pred, y * y, y_pred * y] From d2aa8c8fcf856f8197bf49af9f68b5abc35bb532 Mon Sep 17 00:00:00 2001 From: Marc Bresson Date: Fri, 25 Aug 2023 10:51:21 +0200 Subject: [PATCH 13/23] feat: only operate on self._kernel, keep the accumulation on user's selected device --- ignite/metrics/ssim.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py index 01de4a39c5c..0123bed9345 100644 --- a/ignite/metrics/ssim.py +++ b/ignite/metrics/ssim.py @@ -103,14 +103,15 @@ def __init__( self.c2 = (k2 * data_range) ** 2 self.pad_h = (self.kernel_size[0] - 1) // 2 self.pad_w = (self.kernel_size[1] - 1) // 2 - self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma) - self._expanded_kernel = torch.empty(0) + self._kernel_2d = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma) + self._kernel = torch.empty(0) @reinit__is_reduced def reset(self) -> None: self._sum_of_ssim = torch.tensor(0.0, dtype=torch.float64, device=self._device) self._num_examples = 0 - self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma) + self._kernel = torch.empty(0) + self._kernel_2d = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma) def _uniform(self, kernel_size: int) -> torch.Tensor: max, min = 2.5, -2.5 @@ -159,11 +160,16 @@ def update(self, output: Sequence[torch.Tensor]) -> None: f"Expected y_pred and y to have BxCxHxW shape. Got y_pred: {y_pred.shape} and y: {y.shape}." ) + channel = y_pred.size(1) + if ( + not self._kernel.numel() # when the tensor is still empty from the __init__ + or self._kernel.shape[0] != channel + ): + self._kernel = self._kernel_2d.expand(channel, 1, -1, -1) + if y_pred.device != self._kernel.device: if self._kernel.device == torch.device("cpu"): self._kernel = self._kernel.to(device=y_pred.device) - self._sum_of_ssim = self._sum_of_ssim.to(device=y_pred.device) - self._expanded_kernel = torch.empty(0) if y_pred.device == torch.device("cpu"): warnings.warn( @@ -181,12 +187,8 @@ def update(self, output: Sequence[torch.Tensor]) -> None: if y_pred.dtype != self._kernel.dtype: self._kernel = self._kernel.to(dtype=y_pred.dtype) - channel = y_pred.size(1) - if not self._expanded_kernel.numel() or self._expanded_kernel.shape[0] != channel: - self._expanded_kernel = self._kernel.expand(channel, 1, -1, -1) - input_list = [y_pred, y, y_pred * y_pred, y * y, y_pred * y] - outputs = F.conv2d(torch.cat(input_list), self._expanded_kernel, groups=channel) + outputs = F.conv2d(torch.cat(input_list), self._kernel, groups=channel) batch_size = y_pred.size(0) output_list = [outputs[x * batch_size : (x + 1) * batch_size] for x in range(len(input_list))] @@ -204,7 +206,7 @@ def update(self, output: Sequence[torch.Tensor]) -> None: b2 = sigma_pred_sq + sigma_target_sq + self.c2 ssim_idx = (a1 * a2) / (b1 * b2) - self._sum_of_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).sum() + self._sum_of_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).sum().to(device=self._device) self._num_examples += y.shape[0] From c6bf8f8a62faccd2b8d549b00070db71b752b826 Mon Sep 17 00:00:00 2001 From: Marc Bresson Date: Fri, 25 Aug 2023 10:58:38 +0200 Subject: [PATCH 14/23] test: add variable channel test and factorize the code --- tests/ignite/metrics/test_ssim.py | 66 ++++++++++++++++++++++++++----- 1 file changed, 56 insertions(+), 10 deletions(-) diff --git a/tests/ignite/metrics/test_ssim.py b/tests/ignite/metrics/test_ssim.py index 5802d12a2d8..1a442c97639 100644 --- a/tests/ignite/metrics/test_ssim.py +++ b/tests/ignite/metrics/test_ssim.py @@ -1,3 +1,5 @@ +from typing import Sequence, Union + import numpy as np import pytest import torch @@ -70,25 +72,49 @@ def test_invalid_ssim(): "shape, kernel_size, gaussian, use_sample_covariance", [[(8, 3, 224, 224), 7, False, True], [(12, 3, 28, 28), 11, True, False]], ) -def test_ssim( - available_device, shape, kernel_size, gaussian, use_sample_covariance, dtype=torch.float32, precision=7e-5 -): - y_pred = torch.rand(shape, device=available_device, dtype=dtype) +def test_ssim(available_device, shape, kernel_size, gaussian, use_sample_covariance): + y_pred = torch.rand(shape, device=available_device) y = y_pred * 0.8 + compare_ssim_ignite_skiimg( + y_pred, + y, + available_device, + kernel_size=kernel_size, + gaussian=gaussian, + use_sample_covariance=use_sample_covariance, + ) + + +def compare_ssim_ignite_skiimg( + y_pred: torch.Tensor, + y: torch.Tensor, + device: torch.device, + precision: float = 2e-5, # default to float32 expected precision + *, + skimg_y_pred: Union[np.ndarray, None] = None, + skimg_y: Union[np.ndarray, None] = None, + data_range: float = 1.0, + kernel_size: Union[int, Sequence[int]] = 11, + gaussian: bool = True, + use_sample_covariance: bool = False, +): sigma = 1.5 - data_range = 1.0 - ssim = SSIM(data_range=data_range, sigma=sigma, device=available_device) + + ssim = SSIM(data_range=data_range, sigma=sigma, device=device) ssim.update((y_pred, y)) ignite_ssim = ssim.compute() if y_pred.dtype == torch.bfloat16: y_pred = y_pred.to(dtype=torch.float16) - skimg_pred = y_pred.cpu().numpy() - skimg_y = skimg_pred * 0.8 + if skimg_y_pred is None: + skimg_y_pred = y_pred.cpu().numpy() + if skimg_y is None: + skimg_y = skimg_y_pred * 0.8 + skimg_ssim = ski_ssim( - skimg_pred, + skimg_y_pred, skimg_y, win_size=kernel_size, sigma=sigma, @@ -165,6 +191,21 @@ def test_ssim_variable_batchsize(available_device): assert np.allclose(out, expected) +def test_ssim_variable_channel(available_device): + y_preds = [ + torch.rand(12, 5, 28, 28, device=available_device), + torch.rand(12, 4, 28, 28, device=available_device), + torch.rand(12, 7, 28, 28, device=available_device), + torch.rand(12, 3, 28, 28, device=available_device), + torch.rand(12, 11, 28, 28, device=available_device), + torch.rand(12, 6, 28, 28, device=available_device), + ] + y_true = [v * 0.8 for v in y_preds] + + for y_pred, y in zip(y_preds, y_true): + compare_ssim_ignite_skiimg(y_pred, y, available_device) + + @pytest.mark.parametrize( "dtype, precision", [(torch.bfloat16, 2e-3), (torch.float16, 4e-4), (torch.float32, 2e-5), (torch.float64, 2e-5)] ) @@ -173,7 +214,12 @@ def test_cuda_ssim_dtypes(available_device, dtype, precision): if available_device == "cpu" and dtype in [torch.float16, torch.bfloat16]: pytest.skip(reason=f"Unsupported dtype {dtype} on CPU device") - test_ssim(available_device, (12, 3, 28, 28), 11, True, False, dtype=dtype, precision=precision) + shape = (12, 3, 28, 28) + + y_pred = torch.rand(shape, device=available_device, dtype=dtype) + y = y_pred * 0.8 + + compare_ssim_ignite_skiimg(y_pred, y, available_device, precision) @pytest.mark.parametrize("metric_device", ["cpu", "process_device"]) From 99c34696ade1a469d8c59b8ea5cdb2eb3e41f329 Mon Sep 17 00:00:00 2001 From: Marc Bresson Date: Fri, 25 Aug 2023 11:03:50 +0200 Subject: [PATCH 15/23] refactor: remove redundant line between init and reset --- ignite/metrics/ssim.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py index 0031a16a0aa..e4063af8613 100644 --- a/ignite/metrics/ssim.py +++ b/ignite/metrics/ssim.py @@ -104,7 +104,6 @@ def __init__( self.pad_h = (self.kernel_size[0] - 1) // 2 self.pad_w = (self.kernel_size[1] - 1) // 2 self._kernel_2d = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma) - self._kernel = torch.empty(0) @reinit__is_reduced def reset(self) -> None: From eba6f68f8f1e75da3783ec08fceb696c0f7dac4a Mon Sep 17 00:00:00 2001 From: Marc Bresson <50196352+MarcBresson@users.noreply.github.com> Date: Fri, 25 Aug 2023 11:39:08 +0200 Subject: [PATCH 16/23] refactor: elif comparison and replace RuntimeWarning by UserWarning Co-authored-by: vfdev --- ignite/metrics/ssim.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py index e4063af8613..963c1b0f11d 100644 --- a/ignite/metrics/ssim.py +++ b/ignite/metrics/ssim.py @@ -168,12 +168,11 @@ def update(self, output: Sequence[torch.Tensor]) -> None: if self._kernel.device == torch.device("cpu"): self._kernel = self._kernel.to(device=y_pred.device) - if y_pred.device == torch.device("cpu"): + elif y_pred.device == torch.device("cpu"): warnings.warn( - "The metric device or one of the previous update tensor was set on another device than this " - "update tensor, which is on CPU. To avoid having a performance hit, ensure that your metric " - "device and all of your update tensors are on the same device.", - RuntimeWarning, + f"y_pred tensor is on cpu device but previous computation was on another device: {self._kernel.device}. " + "To avoid having a performance hit, please ensure that all y and y_pred tensors " + "are on the same device.", ) y_pred = y_pred.to(device=self._kernel.device) y = y.to(device=self._kernel.device) From 91ae2359cfccc9e5110117fdced2c08a722a44dc Mon Sep 17 00:00:00 2001 From: Marc Bresson Date: Fri, 25 Aug 2023 11:51:57 +0200 Subject: [PATCH 17/23] refactor: set _kernel in __init__ and manually format to pass E501 --- ignite/metrics/ssim.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py index 963c1b0f11d..0d394bf36fe 100644 --- a/ignite/metrics/ssim.py +++ b/ignite/metrics/ssim.py @@ -104,12 +104,12 @@ def __init__( self.pad_h = (self.kernel_size[0] - 1) // 2 self.pad_w = (self.kernel_size[1] - 1) // 2 self._kernel_2d = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma) + self._kernel = torch.empty(0) @reinit__is_reduced def reset(self) -> None: self._sum_of_ssim = torch.tensor(0.0, dtype=torch.float64, device=self._device) self._num_examples = 0 - self._kernel = torch.empty(0) def _uniform(self, kernel_size: int) -> torch.Tensor: kernel = torch.zeros(kernel_size) @@ -170,9 +170,9 @@ def update(self, output: Sequence[torch.Tensor]) -> None: elif y_pred.device == torch.device("cpu"): warnings.warn( - f"y_pred tensor is on cpu device but previous computation was on another device: {self._kernel.device}. " - "To avoid having a performance hit, please ensure that all y and y_pred tensors " - "are on the same device.", + "y_pred tensor is on cpu device but previous computation was on another device: " + f"{self._kernel.device}. To avoid having a performance hit, please ensure that all " + "y and y_pred tensors are on the same device.", ) y_pred = y_pred.to(device=self._kernel.device) y = y.to(device=self._kernel.device) From 7284b01671ae6ac2dc97ea393bc25a3b30853cdc Mon Sep 17 00:00:00 2001 From: Marc Bresson Date: Fri, 25 Aug 2023 11:52:14 +0200 Subject: [PATCH 18/23] test: adapt test to new UserWarning --- tests/ignite/metrics/test_ssim.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/ignite/metrics/test_ssim.py b/tests/ignite/metrics/test_ssim.py index 1a442c97639..5ebd1a56fa2 100644 --- a/tests/ignite/metrics/test_ssim.py +++ b/tests/ignite/metrics/test_ssim.py @@ -151,7 +151,7 @@ def test_ssim_device(available_device, metric_device, y_pred_device): y = y_pred * 0.8 if metric_device == torch.device("cuda") and y_pred_device == torch.device("cpu"): - with pytest.warns(RuntimeWarning): + with pytest.warns(UserWarning): ssim.update((y_pred, y)) else: ssim.update((y_pred, y)) @@ -222,6 +222,7 @@ def test_cuda_ssim_dtypes(available_device, dtype, precision): compare_ssim_ignite_skiimg(y_pred, y, available_device, precision) +@pytest.mark.skip @pytest.mark.parametrize("metric_device", ["cpu", "process_device"]) def test_distrib_integration(distributed, metric_device): from ignite.engine import Engine @@ -288,6 +289,7 @@ def update(engine, i): assert pytest.approx(res, abs=tol) == true_res +@pytest.mark.skip @pytest.mark.parametrize("metric_device", [torch.device("cpu"), "process_device"]) def test_distrib_accumulator_device(distributed, metric_device): device = idist.device() From d96255c5c4ba0215d8a281ae8514bc6a304a18c5 Mon Sep 17 00:00:00 2001 From: Marc Bresson Date: Fri, 25 Aug 2023 12:01:40 +0200 Subject: [PATCH 19/23] test: remove skips --- tests/ignite/metrics/test_ssim.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/ignite/metrics/test_ssim.py b/tests/ignite/metrics/test_ssim.py index 5ebd1a56fa2..8cef646c6a2 100644 --- a/tests/ignite/metrics/test_ssim.py +++ b/tests/ignite/metrics/test_ssim.py @@ -222,7 +222,6 @@ def test_cuda_ssim_dtypes(available_device, dtype, precision): compare_ssim_ignite_skiimg(y_pred, y, available_device, precision) -@pytest.mark.skip @pytest.mark.parametrize("metric_device", ["cpu", "process_device"]) def test_distrib_integration(distributed, metric_device): from ignite.engine import Engine @@ -289,7 +288,6 @@ def update(engine, i): assert pytest.approx(res, abs=tol) == true_res -@pytest.mark.skip @pytest.mark.parametrize("metric_device", [torch.device("cpu"), "process_device"]) def test_distrib_accumulator_device(distributed, metric_device): device = idist.device() From 2807f288adc290ea62914df08944881528fefedf Mon Sep 17 00:00:00 2001 From: Marc Bresson Date: Fri, 25 Aug 2023 12:02:01 +0200 Subject: [PATCH 20/23] refactor: use None instead of torch.empty --- ignite/metrics/ssim.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py index 0d394bf36fe..b5e34b6c8ee 100644 --- a/ignite/metrics/ssim.py +++ b/ignite/metrics/ssim.py @@ -1,5 +1,5 @@ import warnings -from typing import Callable, Sequence, Union +from typing import Callable, Sequence, Union, Optional import torch import torch.nn.functional as F @@ -104,7 +104,7 @@ def __init__( self.pad_h = (self.kernel_size[0] - 1) // 2 self.pad_w = (self.kernel_size[1] - 1) // 2 self._kernel_2d = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma) - self._kernel = torch.empty(0) + self._kernel: Optional[torch.Tensor] = None @reinit__is_reduced def reset(self) -> None: @@ -158,10 +158,7 @@ def update(self, output: Sequence[torch.Tensor]) -> None: ) channel = y_pred.size(1) - if ( - not self._kernel.numel() # when the tensor is still empty from the __init__ - or self._kernel.shape[0] != channel - ): + if self._kernel is None or self._kernel.shape[0] != channel: self._kernel = self._kernel_2d.expand(channel, 1, -1, -1) if y_pred.device != self._kernel.device: From 526234cf1e4a12adb210ef97ff2fa74050a81112 Mon Sep 17 00:00:00 2001 From: Marc Bresson Date: Fri, 25 Aug 2023 12:09:20 +0200 Subject: [PATCH 21/23] style: reorder imports --- ignite/metrics/ssim.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py index b5e34b6c8ee..1ad1965c0ec 100644 --- a/ignite/metrics/ssim.py +++ b/ignite/metrics/ssim.py @@ -1,5 +1,5 @@ import warnings -from typing import Callable, Sequence, Union, Optional +from typing import Callable, Optional, Sequence, Union import torch import torch.nn.functional as F From b6f1a21faae6acf7371476c9ec207e1189fd92d1 Mon Sep 17 00:00:00 2001 From: Marc Bresson Date: Fri, 25 Aug 2023 12:15:17 +0200 Subject: [PATCH 22/23] refactor: rename channel to nb_channel --- ignite/metrics/ssim.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py index 1ad1965c0ec..25757a94f7c 100644 --- a/ignite/metrics/ssim.py +++ b/ignite/metrics/ssim.py @@ -157,9 +157,9 @@ def update(self, output: Sequence[torch.Tensor]) -> None: f"Expected y_pred and y to have BxCxHxW shape. Got y_pred: {y_pred.shape} and y: {y.shape}." ) - channel = y_pred.size(1) - if self._kernel is None or self._kernel.shape[0] != channel: - self._kernel = self._kernel_2d.expand(channel, 1, -1, -1) + nb_channel = y_pred.size(1) + if self._kernel is None or self._kernel.shape[0] != nb_channel: + self._kernel = self._kernel_2d.expand(nb_channel, 1, -1, -1) if y_pred.device != self._kernel.device: if self._kernel.device == torch.device("cpu"): @@ -181,7 +181,7 @@ def update(self, output: Sequence[torch.Tensor]) -> None: self._kernel = self._kernel.to(dtype=y_pred.dtype) input_list = [y_pred, y, y_pred * y_pred, y * y, y_pred * y] - outputs = F.conv2d(torch.cat(input_list), self._kernel, groups=channel) + outputs = F.conv2d(torch.cat(input_list), self._kernel, groups=nb_channel) batch_size = y_pred.size(0) output_list = [outputs[x * batch_size : (x + 1) * batch_size] for x in range(len(input_list))] From 0a38aa5cc947d806756308ba917c2443cbb899c3 Mon Sep 17 00:00:00 2001 From: vfdev Date: Fri, 25 Aug 2023 13:20:08 +0200 Subject: [PATCH 23/23] Fixed failing test_distrib_accumulator_device --- tests/ignite/metrics/test_ssim.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/ignite/metrics/test_ssim.py b/tests/ignite/metrics/test_ssim.py index 8cef646c6a2..cc43addb0d2 100644 --- a/tests/ignite/metrics/test_ssim.py +++ b/tests/ignite/metrics/test_ssim.py @@ -296,7 +296,10 @@ def test_distrib_accumulator_device(distributed, metric_device): ssim = SSIM(data_range=1.0, device=metric_device) - for dev in [ssim._device, ssim._kernel.device]: + assert ssim._kernel is None + assert isinstance(ssim._kernel_2d, torch.Tensor) + + for dev in [ssim._device, ssim._kernel_2d.device]: assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}" y_pred = torch.rand(2, 3, 28, 28, dtype=torch.float, device=device)