-
-
Notifications
You must be signed in to change notification settings - Fork 59
/
Copy pathtest_functional.py
132 lines (110 loc) · 4.02 KB
/
test_functional.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
from typing import Iterable, Union
import pytest
import torch
import torch.nn.functional as f
from fft_conv_pytorch.fft_conv import fft_conv, to_ntuple
from fft_conv_pytorch.utils import _assert_almost_equal, _gcd
@pytest.mark.parametrize("in_channels", [2, 3])
@pytest.mark.parametrize("out_channels", [2, 3])
@pytest.mark.parametrize("groups", [1, 2, 3])
@pytest.mark.parametrize("kernel_size", [2, 3])
@pytest.mark.parametrize("padding", [0, 1, "same"])
@pytest.mark.parametrize("stride", [1, 2])
@pytest.mark.parametrize("dilation", [1, 2])
@pytest.mark.parametrize("bias", [True])
@pytest.mark.parametrize("ndim", [1, 2, 3])
@pytest.mark.parametrize("input_size", [7, 8])
def test_fft_conv_functional(
in_channels: int,
out_channels: int,
kernel_size: Union[int, Iterable[int]],
padding: Union[int, Iterable[int]],
stride: Union[int, Iterable[int]],
dilation: Union[int, Iterable[int]],
groups: int,
bias: bool,
ndim: int,
input_size: int,
):
if padding == "same" and (stride != 1 or dilation != 1):
# padding='same' is not compatible with strided convolutions
return
torch_conv = getattr(f, f"conv{ndim}d")
groups = _gcd(in_channels, _gcd(out_channels, groups))
batch_size = 2 # TODO: Make this non-constant?
dims = ndim * [input_size]
signal = torch.randn(batch_size, in_channels, *dims)
kwargs = dict(
bias=torch.randn(out_channels) if bias else None,
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
)
kernel_size = to_ntuple(kernel_size, n=signal.ndim - 2)
w0 = torch.randn(
out_channels, in_channels // groups, *kernel_size, requires_grad=True
)
w1 = w0.detach().clone().requires_grad_()
b0 = torch.randn(out_channels, requires_grad=True) if bias else None
b1 = b0.detach().clone().requires_grad_() if bias else None
kwargs = dict(
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
)
y0 = fft_conv(signal, w0, bias=b0, **kwargs)
y1 = torch_conv(signal, w1, bias=b1, **kwargs)
_assert_almost_equal(y0, y1)
@pytest.mark.parametrize("in_channels", [2, 3])
@pytest.mark.parametrize("out_channels", [2, 3])
@pytest.mark.parametrize("groups", [1, 2, 3])
@pytest.mark.parametrize("kernel_size", [2, 3])
@pytest.mark.parametrize("padding", [0, 1, "same"])
@pytest.mark.parametrize("stride", [1, 2])
@pytest.mark.parametrize("dilation", [1, 2])
@pytest.mark.parametrize("bias", [True])
@pytest.mark.parametrize("ndim", [1, 2, 3])
@pytest.mark.parametrize("input_size", [7, 8])
def test_fft_conv_backward_functional(
in_channels: int,
out_channels: int,
kernel_size: Union[int, Iterable[int]],
padding: Union[int, Iterable[int]],
stride: Union[int, Iterable[int]],
dilation: Union[int, Iterable[int]],
groups: int,
bias: bool,
ndim: int,
input_size: int,
):
if padding == "same" and (stride != 1 or dilation != 1):
# padding='same' is not compatible with strided convolutions
return
torch_conv = getattr(f, f"conv{ndim}d")
groups = _gcd(in_channels, _gcd(out_channels, groups))
batch_size = 2 # TODO: Make this non-constant?
dims = ndim * [input_size]
signal = torch.randn(batch_size, in_channels, *dims)
kernel_size = to_ntuple(kernel_size, n=signal.ndim - 2)
w0 = torch.randn(
out_channels, in_channels // groups, *kernel_size, requires_grad=True
)
w1 = w0.detach().clone().requires_grad_()
b0 = torch.randn(out_channels, requires_grad=True) if bias else None
b1 = b0.detach().clone().requires_grad_() if bias else None
kwargs = dict(
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
)
y0 = fft_conv(signal, w0, bias=b0, **kwargs)
y1 = torch_conv(signal, w1, bias=b1, **kwargs)
# Compute pseudo-loss and gradient
y0.sum().backward()
y1.sum().backward()
_assert_almost_equal(w0.grad, w1.grad)
if bias:
_assert_almost_equal(b0.grad, b1.grad)