Skip to content

Commit

Permalink
Fix doctest not raising when failed (#5100)
Browse files Browse the repository at this point in the history
* fix doctest not raising

* fix case

* fix

* add check

* refine

Co-authored-by: oneflow-ci-bot <[email protected]>
  • Loading branch information
jackalcooper and oneflow-ci-bot authored Jun 4, 2021
1 parent d7b2570 commit 1f9db98
Show file tree
Hide file tree
Showing 40 changed files with 178 additions and 200 deletions.
18 changes: 10 additions & 8 deletions ci/check/run_license_format.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,14 @@ def check_file(path):
with open(path) as f:
content = f.read()
txt = get_txt(path)
if content.count("The OneFlow Authors. All rights reserved.") > 1:
return ("duplicated", content)
if content.startswith(txt) or (not content):
if "import doctest" in content and "raise_on_error=True" not in content:
return ("please add 'doctest.testmod(raise_on_error=True)'", content)
elif content.count("The OneFlow Authors. All rights reserved.") > 1:
return ("license_duplicated", content)
elif content.startswith(txt) or (not content):
return ("ok", content)
else:
return ("absent", content)
elif content.startswith(txt) == False:
return ("license_absent", content)


def format_file(path):
Expand All @@ -50,13 +52,13 @@ def format_file(path):
format_status, content = check_file(path)
if format_status == "ok":
return True
elif format_status == "absent":
elif format_status == "license_absent":
with open(path, "w") as w:
new_content = txt + content
w.write(new_content)
return False
else:
raise ValueError(f"license {format_status} {path}")
raise ValueError(f"{format_status} {path}")


def do_check(x):
Expand Down Expand Up @@ -97,7 +99,7 @@ def glob_files(path):
any_absence = False
for (p, format_status) in p.map(do_check, files):
if format_status != "ok":
print(f"license {format_status}:", p)
print(f"{format_status}:", p)
any_absence = True
if any_absence:
exit(1)
Expand Down
4 changes: 2 additions & 2 deletions oneflow/python/nn/modules/abs.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,12 +50,12 @@ def abs_op(x):
>>> x = flow.Tensor(np.array([-1, 2, -3, 4]).astype(np.float32))
>>> flow.abs(x)
tensor([1., 2., 3., 4.], dtype=oneflow.float32)
"""
return Abs()(x)


if __name__ == "__main__":
import doctest

doctest.testmod()
doctest.testmod(raise_on_error=True)
10 changes: 5 additions & 5 deletions oneflow/python/nn/modules/acosh.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def forward(self, x):
@experimental_api
def acosh_op(x):
r"""Returns a new tensor with the inverse hyperbolic cosine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \cosh^{-1}(\text{input}_{i})
Expand Down Expand Up @@ -70,7 +70,7 @@ def acosh_op_tensor(x):
acosh() -> Tensor
See :func:`oneflow.experimental.acosh`
"""

return Acosh()(x)
Expand All @@ -82,7 +82,7 @@ def arccosh_op(x):
r"""
See :func:`oneflow.experimental.acosh`
"""

return Acosh()(x)
Expand All @@ -96,7 +96,7 @@ def arccosh_op_tensor(x):
arccosh() -> Tensor
See :func:`oneflow.experimental.acosh`
"""

return Acosh()(x)
Expand All @@ -105,4 +105,4 @@ def arccosh_op_tensor(x):
if __name__ == "__main__":
import doctest

doctest.testmod()
doctest.testmod(raise_on_error=True)
8 changes: 4 additions & 4 deletions oneflow/python/nn/modules/activation.py
Original file line number Diff line number Diff line change
Expand Up @@ -462,7 +462,7 @@ class Hardsigmoid(Module):
>>> out = hardsigmoid(input).numpy()
>>> print(out)
[0.41666666 0.5 0.5833333 ]
"""

Expand Down Expand Up @@ -759,7 +759,7 @@ class Hardswish(Module):
>>> out = hardswish(input).numpy()
>>> print(out)
[-0.20833333 0. 0.29166666]
.. _`Searching for MobileNetV3`:
https://arxiv.org/abs/1905.02244
"""
Expand Down Expand Up @@ -812,7 +812,7 @@ class Hardtanh(Module):
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> m = flow.nn.Hardtanh()
>>> arr = np.array([0.2, 0.3, 3.0, 4.0])
>>> x = flow.Tensor(arr)
Expand Down Expand Up @@ -914,4 +914,4 @@ def forward(self, x):
if __name__ == "__main__":
import doctest

doctest.testmod()
doctest.testmod(raise_on_error=True)
6 changes: 3 additions & 3 deletions oneflow/python/nn/modules/arange.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,9 +87,9 @@ def arange_op(
device(flow.device, optional): the desired device of returned tensor. Default: if None, uses the current device for the default tensor.
requires_grad(bool, optional): If autograd should record operations on the returned tensor. Default: `False`.
For example:
For example:
.. code-block:: python
.. code-block:: python
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
Expand All @@ -108,4 +108,4 @@ def arange_op(
if __name__ == "__main__":
import doctest

doctest.testmod()
doctest.testmod(raise_on_error=True)
4 changes: 2 additions & 2 deletions oneflow/python/nn/modules/argmax.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def argmax_op(input, dim: int = None, keepdim: bool = False):
For example:
.. code-block:: python
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
Expand All @@ -111,4 +111,4 @@ def argmax_op(input, dim: int = None, keepdim: bool = False):
if __name__ == "__main__":
import doctest

doctest.testmod()
doctest.testmod(raise_on_error=True)
8 changes: 4 additions & 4 deletions oneflow/python/nn/modules/atan2.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def forward(self, x, y):
@experimental_api
def atan2_op(input, other):
r"""Element-wise arctangent of input{i}/other{i}
with consideration of the quadrant. Returns a new tensor with the signed
with consideration of the quadrant. Returns a new tensor with the signed
angles in radians between vector (other{i},input{i}) and vector (1, 0).
The shapes of input and other must be broadcastable.
Expand All @@ -57,14 +57,14 @@ def atan2_op(input, other):
>>> import oneflow.experimental as flow
>>> import numpy as np
>>> x1 = flow.Tensor(np.array([1,2,3]))
>>> y1 = flow.Tensor(np.array([3,2,1]))
>>> x2 = flow.Tensor(np.array([1.53123589,0.54242598,0.15117185]))
>>> y2 = flow.Tensor(np.array([-0.21906378,0.09467151,-0.75562878]))
>>> x3 = flow.Tensor(np.array([1,0,-1]))
>>> y3 = flow.Tensor(np.array([0,1,0]))
>>> flow.enable_eager_execution()
>>> flow.atan2(x1,y1).numpy()
array([0.32175055, 0.7853982 , 1.2490457 ], dtype=float32)
Expand Down Expand Up @@ -92,4 +92,4 @@ def atan2_op_tensor(input, other):
if __name__ == "__main__":
import doctest

doctest.testmod()
doctest.testmod(raise_on_error=True)
2 changes: 1 addition & 1 deletion oneflow/python/nn/modules/batchnorm.py
Original file line number Diff line number Diff line change
Expand Up @@ -363,4 +363,4 @@ def _check_input_dim(self, input):
if __name__ == "__main__":
import doctest

doctest.testmod()
doctest.testmod(raise_on_error=True)
2 changes: 1 addition & 1 deletion oneflow/python/nn/modules/cast.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,4 +68,4 @@ def cast_op(x, dtype):
if __name__ == "__main__":
import doctest

doctest.testmod()
doctest.testmod(raise_on_error=True)
4 changes: 2 additions & 2 deletions oneflow/python/nn/modules/concat.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def concat_op(inputs, dim=0):
Args:
inputs: a `list` of `Tensor`
dim: a `int`.
dim: a `int`.
Returns:
A `Tensor`
Expand Down Expand Up @@ -90,4 +90,4 @@ def concat_op(inputs, dim=0):
if __name__ == "__main__":
import doctest

doctest.testmod()
doctest.testmod(raise_on_error=True)
2 changes: 1 addition & 1 deletion oneflow/python/nn/modules/constant.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,4 +212,4 @@ def ones_like_op(other):
if __name__ == "__main__":
import doctest

doctest.testmod()
doctest.testmod(raise_on_error=True)
2 changes: 1 addition & 1 deletion oneflow/python/nn/modules/container.py
Original file line number Diff line number Diff line change
Expand Up @@ -534,4 +534,4 @@ def _replicate_for_data_parallel(self):
if __name__ == "__main__":
import doctest

doctest.testmod()
doctest.testmod(raise_on_error=True)
6 changes: 3 additions & 3 deletions oneflow/python/nn/modules/dropout.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,9 +65,9 @@ class Dropout(_DropoutNd):
- Input: :math:`(*)`. Input can be of any shape
- Output: :math:`(*)`. Output is of the same shape as input
For example:
For example:
.. code-block:: python
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
Expand Down Expand Up @@ -127,4 +127,4 @@ def forward(self, x):
if __name__ == "__main__":
import doctest

doctest.testmod()
doctest.testmod(raise_on_error=True)
4 changes: 2 additions & 2 deletions oneflow/python/nn/modules/eq.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def eq_op(input, other):
other (oneflow.Tensor): the tensor to compare
Returns:
- A boolean tensor that is True where :attr:`input` is equal to :attr:`other` and False elsewhere
For example:
Expand All @@ -82,4 +82,4 @@ def eq_op(input, other):
if __name__ == "__main__":
import doctest

doctest.testmod()
doctest.testmod(raise_on_error=True)
10 changes: 5 additions & 5 deletions oneflow/python/nn/modules/exp.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,9 @@ def forward(self, x):
def exp_op(x):
"""This operator computes the exponential of Tensor.
The equation is:
The equation is:
.. math::
.. math::
out = e^x
Expand All @@ -46,9 +46,9 @@ def exp_op(x):
Returns:
oneflow.Tensor: The result Tensor
For example:
For example:
.. code-block:: python
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
Expand All @@ -66,4 +66,4 @@ def exp_op(x):
if __name__ == "__main__":
import doctest

doctest.testmod()
doctest.testmod(raise_on_error=True)
16 changes: 8 additions & 8 deletions oneflow/python/nn/modules/expand.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,21 +66,21 @@ def forward(self, x):
@experimental_api
def expand_op(x, *sizes):
"""This operator expand the input tensor to a larger size.
Passing -1 as the size for a dimension means not changing the size of that dimension.
Tensor can be also expanded to a larger number of dimensions and the new ones will be appended at the front.
For the new dimensions, the size cannot be set to -1.
Tensor can be also expanded to a larger number of dimensions and the new ones will be appended at the front.
For the new dimensions, the size cannot be set to -1.
Args:
x (oneflow.Tensor): The input Tensor.
x (oneflow.Tensor): The input Tensor.
*sizes (flow.Size or int): The desired expanded size.
Returns:
oneflow.Tensor: The result Tensor.
oneflow.Tensor: The result Tensor.
For example:
For example:
.. code-block:: python
Expand All @@ -104,4 +104,4 @@ def expand_op(x, *sizes):
if __name__ == "__main__":
import doctest

doctest.testmod()
doctest.testmod(raise_on_error=True)
8 changes: 4 additions & 4 deletions oneflow/python/nn/modules/gather.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def gather_op(input, index, dim=0, sparse_grad=False):
out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0
out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1
out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2
:attr:`input` and :attr:`index` must have the same number of dimensions.
Expand All @@ -83,11 +83,11 @@ def gather_op(input, index, dim=0, sparse_grad=False):
input (Tensor): the source tensor
dim (int): the axis along which to index
index (LongTensor): the indices of elements to gather
For example:
.. code-block:: python
>>> import oneflow.experimental as flow
>>> import numpy as np
>>> flow.enable_eager_execution()
Expand All @@ -105,4 +105,4 @@ def gather_op(input, index, dim=0, sparse_grad=False):
if __name__ == "__main__":
import doctest

doctest.testmod()
doctest.testmod(raise_on_error=True)
Loading

0 comments on commit 1f9db98

Please sign in to comment.