Skip to content

Commit

Permalink
formatting changes from black 22.3.0
Browse files Browse the repository at this point in the history
Summary:
Applies the black-fbsource codemod with the new build of pyfmt.

paintitblack

Reviewed By: lisroach

Differential Revision: D36324783

fbshipit-source-id: 280c09e88257e5e569ab729691165d8dedd767bc
  • Loading branch information
amyreese authored and facebook-github-bot committed May 12, 2022
1 parent d74ea39 commit e4f0b3d
Show file tree
Hide file tree
Showing 5 changed files with 8 additions and 8 deletions.
2 changes: 1 addition & 1 deletion fvcore/common/param_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def __init__(
self._decay = decay

def __call__(self, where: float) -> float:
return self._start_value * (self._decay ** where)
return self._start_value * (self._decay**where)


class LinearParamScheduler(ParamScheduler):
Expand Down
2 changes: 1 addition & 1 deletion fvcore/nn/smooth_l1_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def smooth_l1_loss(
else:
n = torch.abs(input - target)
cond = n < beta
loss = torch.where(cond, 0.5 * n ** 2 / beta, n - 0.5 * beta)
loss = torch.where(cond, 0.5 * n**2 / beta, n - 0.5 * beta)

if reduction == "mean":
loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum()
Expand Down
6 changes: 3 additions & 3 deletions tests/test_flop_count.py
Original file line number Diff line number Diff line change
Expand Up @@ -361,8 +361,8 @@ def _test_conv(
batch_size
* input_dim
* output_dim
* (kernel_size ** conv_dim)
* (spatial_size ** conv_dim)
* (kernel_size**conv_dim)
* (spatial_size**conv_dim)
/ group_size
/ 1e9
)
Expand Down Expand Up @@ -905,7 +905,7 @@ def test_upsample(self):
nodes = self._count_function(
F.interpolate, (torch.rand(2, 2, 2, 2), None, 2, "bilinear", False), op_name
)
self.assertEqual(counter(*nodes), 2 ** 4 * 4 * 4)
self.assertEqual(counter(*nodes), 2**4 * 4 * 4)

def test_complicated_einsum(self):
op_name = "aten::einsum"
Expand Down
2 changes: 1 addition & 1 deletion tests/test_smooth_l1_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def test_smooth_l1_loss(self) -> None:
targets = torch.tensor([1.1, 2, 4.5], dtype=torch.float32)
beta = 0.5
loss = smooth_l1_loss(inputs, targets, beta=beta, reduction="none").numpy()
self.assertTrue(np.allclose(loss, [0.5 * 0.1 ** 2 / beta, 0, 1.5 - 0.5 * beta]))
self.assertTrue(np.allclose(loss, [0.5 * 0.1**2 / beta, 0, 1.5 - 0.5 * beta]))

beta = 0.05
loss = smooth_l1_loss(inputs, targets, beta=beta, reduction="none").numpy()
Expand Down
4 changes: 2 additions & 2 deletions tests/test_weight_init.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,9 +63,9 @@ def test_conv_weight_init(self) -> None:
if layer is nn.Conv1d:
spatial_dim = k_size
elif layer is nn.Conv2d:
spatial_dim = k_size ** 2
spatial_dim = k_size**2
elif layer is nn.Conv3d:
spatial_dim = k_size ** 3
spatial_dim = k_size**3

# Calculate fan_in and fan_out.
# pyre-fixme[61]: `spatial_dim` may not be initialized here.
Expand Down

0 comments on commit e4f0b3d

Please sign in to comment.