Skip to content

Commit

Permalink
Update min Pytorch to 1.10 (#2145)
Browse files Browse the repository at this point in the history
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
Co-authored-by: Jirka Borovec <[email protected]>
  • Loading branch information
3 people authored Oct 11, 2023
1 parent 505c16d commit 09bd064
Show file tree
Hide file tree
Showing 21 changed files with 32 additions and 110 deletions.
4 changes: 2 additions & 2 deletions .azure/gpu-unittests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@ jobs:
matrix:
"PyTorch | old":
# Torch does not have build wheels with old Torch versions for newer CUDA
docker-image: "pytorchlightning/torchmetrics:ubuntu20.04-cuda11.1.1-py3.8-torch1.8.1"
docker-image: "pytorchlightning/torchmetrics:ubuntu22.04-cuda11.8.0-py3.9-torch1.10"
agent-pool: "lit-rtx-3090"
torch-ver: "1.8.1"
torch-ver: "1.10.2"
"PyTorch | 1.X":
docker-image: "pytorchlightning/torchmetrics:ubuntu22.04-cuda11.8.0-py3.9-torch1.13"
agent-pool: "lit-rtx-3090"
Expand Down
6 changes: 1 addition & 5 deletions .github/workflows/ci-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ jobs:
matrix:
os: ["ubuntu-20.04"]
python-version: ["3.9"]
pytorch-version: ["1.9.1", "1.10.2", "1.11.0", "1.12.1", "1.13.1", "2.0.1", "2.1.0"]
pytorch-version: ["1.10.2", "1.11.0", "1.12.1", "1.13.1", "2.0.1", "2.1.0"]
include:
- { os: "ubuntu-22.04", python-version: "3.8", pytorch-version: "1.13.1" }
- { os: "ubuntu-22.04", python-version: "3.10", pytorch-version: "1.13.1" }
Expand All @@ -47,10 +47,6 @@ jobs:
- { os: "windows-2022", python-version: "3.9", pytorch-version: "1.13.1" }
- { os: "windows-2022", python-version: "3.10", pytorch-version: "2.0.1" }
- { os: "windows-2022", python-version: "3.11", pytorch-version: "2.1.0" }
# the oldest configurations
- { os: "ubuntu-20.04", python-version: "3.8", pytorch-version: "1.8.1", requires: "oldest" }
- { os: "macOS-11", python-version: "3.8", pytorch-version: "1.8.1", requires: "oldest" }
- { os: "windows-2019", python-version: "3.8", pytorch-version: "1.8.1", requires: "oldest" }
env:
PYTORCH_URL: "https://download.pytorch.org/whl/cpu/torch_stable.html"
FREEZE_REQUIREMENTS: ${{ ! (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/heads/release/')) }}
Expand Down
1 change: 0 additions & 1 deletion .github/workflows/docker-build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@ jobs:
include:
# These are the base images for PL release docker images,
# so include at least all of the combinations in release-dockers.yml.
- { python: "3.8", pytorch: "1.8.1", cuda: "11.1.1", ubuntu: "20.04" }
- { python: "3.9", pytorch: "1.10", cuda: "11.8.0", ubuntu: "22.04" }
- { python: "3.9", pytorch: "1.11", cuda: "11.8.0", ubuntu: "22.04" }
- { python: "3.9", pytorch: "1.13", cuda: "11.8.0", ubuntu: "22.04" }
Expand Down
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

- Change default state of `SpectralAngleMapper` and `UniversalImageQualityIndex` to be tensors ([#2089](https://github.com/Lightning-AI/torchmetrics/pull/2089))

- Changed minimum supported Pytorch version from 1.8 to 1.10 ([#2145](https://github.com/Lightning-AI/torchmetrics/pull/2145))


### Removed

Expand Down
3 changes: 2 additions & 1 deletion requirements/base.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

numpy >1.20.0
packaging >17.1
torch >=1.8.1, <=2.1.0
torch >=1.10.0, <=2.0.1
torch >=1.10.0, <=2.1.0
typing-extensions; python_version < '3.9'
lightning-utilities >=0.8.0, <0.10.0
14 changes: 1 addition & 13 deletions src/torchmetrics/functional/image/helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,6 @@
from torch import Tensor
from torch.nn import functional as F # noqa: N812

from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_10


def _gaussian(kernel_size: int, sigma: float, dtype: torch.dtype, device: Union[torch.device, str]) -> Tensor:
"""Compute 1D gaussian kernel.
Expand Down Expand Up @@ -172,13 +169,4 @@ def _reflection_pad_3d(inputs: Tensor, pad_h: int, pad_w: int, pad_d: int) -> Te
padded input tensor
"""
if _TORCH_GREATER_EQUAL_1_10:
inputs = F.pad(inputs, (pad_h, pad_h, pad_w, pad_w, pad_d, pad_d), mode="reflect")
else:
rank_zero_warn(
"An older version of pyTorch is used."
" For optimal speed, please upgrade to at least PyTorch v1.10 or higher."
)
for dim, pad in enumerate([pad_h, pad_w, pad_d]):
inputs = _single_dimension_pad(inputs, dim + 2, pad, outer_pad=1)
return inputs
return F.pad(inputs, (pad_h, pad_h, pad_w, pad_w, pad_d, pad_d), mode="reflect")
5 changes: 2 additions & 3 deletions src/torchmetrics/functional/image/perceptual_path_length.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
from torch import Tensor, nn

from torchmetrics.functional.image.lpips import _LPIPS
from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_10, _TORCHVISION_AVAILABLE
from torchmetrics.utilities.imports import _TORCHVISION_AVAILABLE

if not _TORCHVISION_AVAILABLE:
__doctest_skip__ = ["perceptual_path_length"]
Expand Down Expand Up @@ -246,8 +246,7 @@ def perceptual_path_length(
else:
raise ValueError(f"sim_net must be a nn.Module or one of 'alex', 'vgg', 'squeeze', got {sim_net}")

decorator = torch.inference_mode if _TORCH_GREATER_EQUAL_1_10 else torch.no_grad
with decorator():
with torch.inference_mode():
distances = []
num_batches = math.ceil(num_samples / batch_size)
for batch_idx in range(num_batches):
Expand Down
13 changes: 2 additions & 11 deletions src/torchmetrics/image/fid.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,11 @@
from torch.nn.functional import adaptive_avg_pool2d

from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _TORCH_FIDELITY_AVAILABLE, _TORCH_GREATER_EQUAL_1_9
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _TORCH_FIDELITY_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE

__doctest_skip__ = ["FrechetInceptionDistance.__init__"] if not _TORCH_GREATER_EQUAL_1_9 else []

if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ += ["FrechetInceptionDistance.plot"]
__doctest_skip__ = ["FrechetInceptionDistance.plot"]

if _TORCH_FIDELITY_AVAILABLE:
from torch_fidelity.feature_extractor_inceptionv3 import FeatureExtractorInceptionV3 as _FeatureExtractorInceptionV3
Expand All @@ -42,9 +40,6 @@ class _FeatureExtractorInceptionV3(Module): # type: ignore[no-redef]

__doctest_skip__ = ["FrechetInceptionDistance", "FrechetInceptionDistance.plot"]

if not _TORCH_GREATER_EQUAL_1_9:
__doctest_skip__ = ["FrechetInceptionDistance", "FrechetInceptionDistance.plot"]


class NoTrainInceptionV3(_FeatureExtractorInceptionV3):
"""Module that never leaves evaluation mode."""
Expand Down Expand Up @@ -285,10 +280,6 @@ def __init__(
**kwargs: Any,
) -> None:
super().__init__(**kwargs)

if not _TORCH_GREATER_EQUAL_1_9:
raise ValueError("FrechetInceptionDistance metric requires that PyTorch is version 1.9.0 or higher.")

if isinstance(feature, int):
num_features = feature
if not _TORCH_FIDELITY_AVAILABLE:
Expand Down
13 changes: 1 addition & 12 deletions src/torchmetrics/image/mifid.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from torchmetrics.image.fid import NoTrainInceptionV3, _compute_fid
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _TORCH_FIDELITY_AVAILABLE, _TORCH_GREATER_EQUAL_1_10
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _TORCH_FIDELITY_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE

__doctest_requires__ = {
Expand All @@ -29,12 +29,6 @@
]
}

if not _TORCH_GREATER_EQUAL_1_10:
__doctest_skip__ = [
"MemorizationInformedFrechetInceptionDistance",
"MemorizationInformedFrechetInceptionDistance.plot",
]

if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["MemorizationInformedFrechetInceptionDistance.plot"]

Expand Down Expand Up @@ -166,11 +160,6 @@ def __init__(
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if not _TORCH_GREATER_EQUAL_1_10:
raise RuntimeError(
"MemorizationInformedFrechetInceptionDistance metric requires PyTorch version greater or equal to 1.10"
)

if isinstance(feature, int):
if not _TORCH_FIDELITY_AVAILABLE:
raise ModuleNotFoundError(
Expand Down
2 changes: 1 addition & 1 deletion src/torchmetrics/utilities/checks.py
Original file line number Diff line number Diff line change
Expand Up @@ -656,7 +656,7 @@ def check_forward_full_state_property(
Example (states in ``update`` are independent, save to set ``full_state_update=False``)
>>> from torchmetrics.classification import MulticlassConfusionMatrix
>>> check_forward_full_state_property( # doctest: +ELLIPSIS
>>> check_forward_full_state_property( # doctest: +SKIP
... MulticlassConfusionMatrix,
... init_args = {'num_classes': 3},
... input_args = {'preds': torch.randint(3, (100,)), 'target': torch.randint(3, (100,))},
Expand Down
4 changes: 1 addition & 3 deletions src/torchmetrics/utilities/compute.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,6 @@
import torch
from torch import Tensor

from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_9


def _safe_matmul(x: Tensor, y: Tensor) -> Tensor:
"""Safe calculation of matrix multiplication.
Expand Down Expand Up @@ -101,7 +99,7 @@ def _auc_compute_without_check(x: Tensor, y: Tensor, direction: float, axis: int
def _auc_compute(x: Tensor, y: Tensor, reorder: bool = False) -> Tensor:
with torch.no_grad():
if reorder:
x, x_idx = torch.sort(x, stable=True) if _TORCH_GREATER_EQUAL_1_9 else torch.sort(x)
x, x_idx = torch.sort(x, stable=True)
y = y[x_idx]

dx = x[1:] - x[:-1]
Expand Down
2 changes: 0 additions & 2 deletions src/torchmetrics/utilities/imports.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,6 @@
_PYTHON_VERSION = ".".join(map(str, [sys.version_info.major, sys.version_info.minor, sys.version_info.micro]))
_PYTHON_LOWER_3_8 = parse(_PYTHON_VERSION) < Version("3.8")
_TORCH_LOWER_1_12_DEV: Optional[bool] = compare_version("torch", operator.lt, "1.12.0.dev")
_TORCH_GREATER_EQUAL_1_9: Optional[bool] = compare_version("torch", operator.ge, "1.9.0")
_TORCH_GREATER_EQUAL_1_10: Optional[bool] = compare_version("torch", operator.ge, "1.10.0")
_TORCH_GREATER_EQUAL_1_11: Optional[bool] = compare_version("torch", operator.ge, "1.11.0")
_TORCH_GREATER_EQUAL_1_12: Optional[bool] = compare_version("torch", operator.ge, "1.12.0")
_TORCH_GREATER_EQUAL_1_13: Optional[bool] = compare_version("torch", operator.ge, "1.13.0")
Expand Down
4 changes: 1 addition & 3 deletions tests/unittests/classification/test_calibration_error.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
multiclass_calibration_error,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_9, _TORCH_GREATER_EQUAL_1_13
from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_13

from unittests import NUM_CLASSES
from unittests.classification.inputs import _binary_cases, _multiclass_cases
Expand Down Expand Up @@ -228,8 +228,6 @@ def test_multiclass_calibration_error_differentiability(self, inputs):
def test_multiclass_calibration_error_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if dtype == torch.half and not _TORCH_GREATER_EQUAL_1_9:
pytest.xfail(reason="torch.max in metric not supported before pytorch v1.9 for cpu + half")
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.softmax in metric does not support cpu + half precision")
self.run_precision_test_cpu(
Expand Down
6 changes: 0 additions & 6 deletions tests/unittests/classification/test_ranking.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
multilabel_ranking_average_precision,
multilabel_ranking_loss,
)
from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_9

from unittests import NUM_CLASSES
from unittests.classification.inputs import _multilabel_cases
Expand Down Expand Up @@ -124,11 +123,6 @@ def test_multilabel_ranking_dtype_cpu(self, inputs, metric, functional_metric, r
pytest.xfail(
reason="multilabel_ranking_average_precision requires torch.unique which is not implemented for half"
)
if dtype == torch.half and not _TORCH_GREATER_EQUAL_1_9 and functional_metric == multilabel_coverage_error:
pytest.xfail(
reason="multilabel_coverage_error requires torch.min which is only implemented for half"
" in v1.9 or higher of torch."
)
self.run_precision_test_cpu(
preds=preds,
target=target,
Expand Down
34 changes: 10 additions & 24 deletions tests/unittests/image/test_fid.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from torch.nn import Module
from torch.utils.data import Dataset
from torchmetrics.image.fid import FrechetInceptionDistance, NoTrainInceptionV3
from torchmetrics.utilities.imports import _TORCH_FIDELITY_AVAILABLE, _TORCH_GREATER_EQUAL_1_9
from torchmetrics.utilities.imports import _TORCH_FIDELITY_AVAILABLE

torch.manual_seed(42)

Expand All @@ -34,7 +34,6 @@ def test_no_train_network_missing_torch_fidelity():
NoTrainInceptionV3(name="inception-v3-compat", features_list=["2048"])


@pytest.mark.skipif(not _TORCH_GREATER_EQUAL_1_9, reason="test requires torch>=1.9")
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity")
def test_no_train():
"""Assert that metric never leaves evaluation mode."""
Expand All @@ -53,7 +52,6 @@ def forward(self, x):
assert not model.metric.inception.training, "FID metric was changed to training mode which should not happen"


@pytest.mark.skipif(not _TORCH_GREATER_EQUAL_1_9, reason="test requires torch>=1.9")
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity")
def test_fid_pickle():
"""Assert that we can initialize the metric and pickle it."""
Expand All @@ -67,28 +65,21 @@ def test_fid_pickle():

def test_fid_raises_errors_and_warnings():
"""Test that expected warnings and errors are raised."""
if _TORCH_GREATER_EQUAL_1_9:
if _TORCH_FIDELITY_AVAILABLE:
with pytest.raises(ValueError, match="Integer input to argument `feature` must be one of .*"):
_ = FrechetInceptionDistance(feature=2)
else:
with pytest.raises(
ModuleNotFoundError,
match="FID metric requires that `Torch-fidelity` is installed."
" Either install as `pip install torchmetrics[image-quality]` or `pip install torch-fidelity`.",
):
_ = FrechetInceptionDistance()

with pytest.raises(TypeError, match="Got unknown input to argument `feature`"):
_ = FrechetInceptionDistance(feature=[1, 2])
if _TORCH_FIDELITY_AVAILABLE:
with pytest.raises(ValueError, match="Integer input to argument `feature` must be one of .*"):
_ = FrechetInceptionDistance(feature=2)
else:
with pytest.raises(
ValueError, match="FrechetInceptionDistance metric requires that PyTorch is version 1.9.0 or higher."
ModuleNotFoundError,
match="FID metric requires that `Torch-fidelity` is installed."
" Either install as `pip install torchmetrics[image-quality]` or `pip install torch-fidelity`.",
):
_ = FrechetInceptionDistance()

with pytest.raises(TypeError, match="Got unknown input to argument `feature`"):
_ = FrechetInceptionDistance(feature=[1, 2])


@pytest.mark.skipif(not _TORCH_GREATER_EQUAL_1_9, reason="test requires torch>=1.9")
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity")
@pytest.mark.parametrize("feature", [64, 192, 768, 2048])
def test_fid_same_input(feature):
Expand Down Expand Up @@ -120,7 +111,6 @@ def __len__(self) -> int:


@pytest.mark.skipif(not torch.cuda.is_available(), reason="test is too slow without gpu")
@pytest.mark.skipif(not _TORCH_GREATER_EQUAL_1_9, reason="test requires torch>=1.9")
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity")
@pytest.mark.parametrize("equal_size", [False, True])
def test_compare_fid(tmpdir, equal_size, feature=768):
Expand Down Expand Up @@ -157,7 +147,6 @@ def test_compare_fid(tmpdir, equal_size, feature=768):
assert torch.allclose(tm_res.cpu(), torch.tensor([torch_fid["frechet_inception_distance"]]), atol=1e-3)


@pytest.mark.skipif(not _TORCH_GREATER_EQUAL_1_9, reason="test requires torch>=1.9")
@pytest.mark.parametrize("reset_real_features", [True, False])
def test_reset_real_features_arg(reset_real_features):
"""Test that `reset_real_features` argument works as expected."""
Expand Down Expand Up @@ -187,7 +176,6 @@ def test_reset_real_features_arg(reset_real_features):
assert metric.real_features_cov_sum.shape == torch.Size([64, 64])


@pytest.mark.skipif(not _TORCH_GREATER_EQUAL_1_9, reason="test requires torch>=1.9")
@pytest.mark.parametrize("normalize", [True, False])
def test_normalize_arg(normalize):
"""Test that normalize argument works as expected."""
Expand All @@ -206,7 +194,6 @@ def test_normalize_arg(normalize):
metric.update(img, real=True)


@pytest.mark.skipif(not _TORCH_GREATER_EQUAL_1_9, reason="test requires torch>=1.9")
def test_not_enough_samples():
"""Test that an error is raised if not enough samples were provided."""
img = torch.randint(0, 255, (1, 3, 299, 299), dtype=torch.uint8)
Expand All @@ -219,7 +206,6 @@ def test_not_enough_samples():
metric.compute()


@pytest.mark.skipif(not _TORCH_GREATER_EQUAL_1_9, reason="test requires torch>=1.9")
def test_dtype_transfer_to_submodule():
"""Test that change in dtype also changes the default inception net."""
imgs = torch.randn(1, 3, 256, 256)
Expand Down
4 changes: 1 addition & 3 deletions tests/unittests/image/test_lpips.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from lpips import LPIPS as LPIPS_reference # noqa: N811
from torch import Tensor
from torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity
from torchmetrics.utilities.imports import _LPIPS_AVAILABLE, _TORCH_GREATER_EQUAL_1_9
from torchmetrics.utilities.imports import _LPIPS_AVAILABLE

from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
Expand Down Expand Up @@ -73,8 +73,6 @@ def test_lpips_differentiability(self):
# LPIPS half + cpu does not work due to missing support in torch.min for older version of torch
def test_lpips_half_cpu(self):
"""Test for half + cpu support."""
if not _TORCH_GREATER_EQUAL_1_9:
pytest.xfail(reason="LPIPS metric does not support cpu + half precision for v1.8.1 or lower of Pytorch")
self.run_precision_test_cpu(_inputs.img1, _inputs.img2, LearnedPerceptualImagePatchSimilarity)

@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
Expand Down
Loading

0 comments on commit 09bd064

Please sign in to comment.