diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3822aac147fc0..a395e41b926f8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -26,6 +26,6 @@ repos: # Exclude everything in frontends except __init__.py, and func_wrapper.py exclude: 'ivy/functional/(frontends|backends)/(?!.*/func_wrapper\.py$).*(?!__init__\.py$)' - repo: https://github.com/unifyai/lint-hook - rev: 956eb831454d5a4fc3138467b85aedbf634140f0 + rev: a90659d806c6d65f20ec41095a2da8e8920cc96f hooks: - id: ivy-lint diff --git a/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py b/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py index 91e1a2d7baf87..9d0b890841772 100644 --- a/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py +++ b/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py @@ -7,10 +7,6 @@ # --------------- # -# --- Helpers --- # -# --------------- # - - def _swap_direction(norm): try: return _SWAP_DIRECTION_MAP[norm] diff --git a/ivy/functional/frontends/numpy/ma/MaskedArray.py b/ivy/functional/frontends/numpy/ma/MaskedArray.py index d59c1633f6604..34ad474a5b58a 100644 --- a/ivy/functional/frontends/numpy/ma/MaskedArray.py +++ b/ivy/functional/frontends/numpy/ma/MaskedArray.py @@ -2,7 +2,6 @@ import ivy.functional.frontends.numpy as np_frontend import numpy as np -masked = True masked_print_options = "--" nomask = False @@ -194,10 +193,12 @@ def _array_in_str(self): def _is_masked_array(x): return isinstance(x, (np.ma.MaskedArray, np_frontend.ma.MaskedArray)) - # Instance Methods # - # ---------------- # - # TODO +masked = True +# Instance Methods # +# ---------------- # + +# TODO # masked_array (alias) diff --git a/ivy/functional/frontends/tensorflow/raw_ops.py b/ivy/functional/frontends/tensorflow/raw_ops.py index 5c46ccc7fa1e5..ce38aa3b4f19f 100644 --- a/ivy/functional/frontends/tensorflow/raw_ops.py +++ b/ivy/functional/frontends/tensorflow/raw_ops.py @@ -14,7 +14,6 @@ Acos = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.acos)) Acosh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.acosh)) -Add = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add)) AddN = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add_n)) AddV2 = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add)) ArgMax = to_ivy_arrays_and_back( @@ -220,7 +219,6 @@ ) Sin = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.sin)) Size = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.general_functions.size)) -Slice = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.slice)) Softmax = to_ivy_arrays_and_back( with_unsupported_dtypes( { @@ -253,7 +251,6 @@ Squeeze = to_ivy_arrays_and_back( map_raw_ops_alias(tf_frontend.general_functions.squeeze) ) -Sub = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.subtract)) Tan = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.tan)) Tanh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.tanh)) Xlogy = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.xlogy)) @@ -831,3 +828,8 @@ def Xlog1py(*, x, y, name="Xlog1py"): @to_ivy_arrays_and_back def ZerosLike(*, x, name="ZerosLike"): return ivy.zeros_like(x) + + +Add = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add)) +Slice = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.slice)) +Sub = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.subtract)) diff --git a/ivy/functional/frontends/torch/comparison_ops.py b/ivy/functional/frontends/torch/comparison_ops.py index 5eb04bad6d727..eeaadadfbf864 100644 --- a/ivy/functional/frontends/torch/comparison_ops.py +++ b/ivy/functional/frontends/torch/comparison_ops.py @@ -289,8 +289,8 @@ def topk(input, k, dim=None, largest=True, sorted=True, *, out=None): return ivy.top_k(input, k, axis=dim, largest=largest, sorted=sorted, out=out) -ge = greater_equal gt = greater +ne = not_equal +ge = greater_equal le = less_equal lt = less -ne = not_equal diff --git a/ivy_tests/test_ivy/test_frontends/test_jax/test_nn/test_non_linear_activations.py b/ivy_tests/test_ivy/test_frontends/test_jax/test_nn/test_non_linear_activations.py index ef904c7854dde..f5984c3d9083d 100644 --- a/ivy_tests/test_ivy/test_frontends/test_jax/test_nn/test_non_linear_activations.py +++ b/ivy_tests/test_ivy/test_frontends/test_jax/test_nn/test_non_linear_activations.py @@ -106,9 +106,11 @@ def test_jax_elu( fn_tree="jax.nn.gelu", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float_and_complex"), + large_abs_safety_factor=1, + small_abs_safety_factor=1, + safety_factor_scale="linear", min_value=-1e4, max_value=1e4, - abs_smallest_val=1e-3, ), approximate=st.booleans(), test_with_out=st.just(False), @@ -125,7 +127,7 @@ def test_jax_gelu( ): input_dtype, x = dtype_and_x # As erf function doesn't support complex dtype - if "complex" in input_dtype[0]: + if "complex" in str(x[0].dtype): approximate = True helpers.test_frontend_function( input_dtypes=input_dtype, @@ -134,8 +136,8 @@ def test_jax_gelu( test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, - rtol=1e-2, - atol=1e-2, + rtol=1e-02, + atol=1e-02, x=x[0], approximate=approximate, ) @@ -315,9 +317,7 @@ def test_jax_hard_tanh( small_abs_safety_factor=2, safety_factor_scale="linear", ), - negative_slope=helpers.floats( - min_value=0.0, max_value=1.0, small_abs_safety_factor=16 - ), + negative_slope=helpers.floats(min_value=0.0, max_value=1.0), test_with_out=st.just(False), ) def test_jax_leaky_relu( @@ -735,9 +735,9 @@ def test_jax_soft_sign( @handle_frontend_test( fn_tree="jax.nn.softmax", dtype_x_axis=helpers.dtype_values_axis( - available_dtypes=helpers.get_dtypes("float_and_complex"), + available_dtypes=helpers.get_dtypes("float"), min_num_dims=2, - max_axes_size=2, + max_axes_size=1, force_int_axis=True, valid_axis=True, ), diff --git a/ivy_tests/test_ivy/test_frontends/test_jax/test_numpy/test_mathematical_functions.py b/ivy_tests/test_ivy/test_frontends/test_jax/test_numpy/test_mathematical_functions.py index 65be2aa2b07c3..852293a989fdb 100644 --- a/ivy_tests/test_ivy/test_frontends/test_jax/test_numpy/test_mathematical_functions.py +++ b/ivy_tests/test_ivy/test_frontends/test_jax/test_numpy/test_mathematical_functions.py @@ -543,46 +543,6 @@ def test_jax_ceil( ) -# clip -@handle_frontend_test( - fn_tree="jax.numpy.clip", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_integer"), - min_value=-1e3, - max_value=1e3, - max_dim_size=10, - max_num_dims=4, - min_dim_size=1, - min_num_dims=1, - ), - a_min=st.integers(min_value=0, max_value=5), - a_max=st.integers(min_value=5, max_value=10), -) -def test_jax_clip( - *, - dtype_and_x, - a_min, - a_max, - on_device, - fn_tree, - frontend, - backend_fw, - test_flags, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_function( - input_dtypes=input_dtype, - frontend=frontend, - backend_to_test=backend_fw, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - a=x[0], - a_min=a_min, - a_max=a_max, - ) - - # conj @handle_frontend_test( fn_tree="jax.numpy.conj", @@ -1486,49 +1446,6 @@ def test_jax_gcd( ) -# gradient -@handle_frontend_test( - fn_tree="jax.numpy.gradient", - dtype_input_axis=helpers.dtype_values_axis( - available_dtypes=("float32", "float16", "float64"), - min_num_dims=1, - max_num_dims=3, - min_dim_size=2, - max_dim_size=4, - valid_axis=True, - force_int_axis=True, - ), - varargs=helpers.ints( - min_value=-3, - max_value=3, - ), -) -def test_jax_gradient( - dtype_input_axis, - varargs, - frontend, - backend_fw, - test_flags, - fn_tree, - on_device, -): - input_dtype, x, axis = dtype_input_axis - test_flags.num_positional_args = 2 - kw = {} - kw["varargs"] = varargs - kw["axis"] = axis - helpers.test_frontend_function( - input_dtypes=input_dtype, - frontend=frontend, - backend_to_test=backend_fw, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - f=x[0], - **kw, - ) - - # heaviside @handle_frontend_test( fn_tree="jax.numpy.heaviside", diff --git a/ivy_tests/test_ivy/test_frontends/test_jax/test_random.py b/ivy_tests/test_ivy/test_frontends/test_jax/test_random.py index 692dbccc62c62..10ddbd13320db 100644 --- a/ivy_tests/test_ivy/test_frontends/test_jax/test_random.py +++ b/ivy_tests/test_ivy/test_frontends/test_jax/test_random.py @@ -820,62 +820,6 @@ def call(): assert u.shape == v.shape -@pytest.mark.xfail -@handle_frontend_test( - fn_tree="jax.random.logistic", - dtype_key=helpers.dtype_and_values( - available_dtypes=["uint32"], - min_value=0, - max_value=2000, - min_num_dims=1, - max_num_dims=1, - min_dim_size=2, - max_dim_size=2, - ), - shape=helpers.get_shape(allow_none=False, min_num_dims=1, min_dim_size=1), - dtype=helpers.get_dtypes("float", full=False), - test_with_out=st.just(False), -) -def test_jax_logistic( - *, - dtype_key, - shape, - dtype, - on_device, - fn_tree, - frontend, - backend_fw, - test_flags, -): - input_dtype, key = dtype_key - - def call(): - return helpers.test_frontend_function( - input_dtypes=input_dtype, - frontend=frontend, - backend_to_test=backend_fw, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - key=key[0], - shape=shape, - dtype=dtype[0], - test_values=False, - ) - - ret = call() - - if not ivy.exists(ret): - return - - ret_np, ret_from_np = ret - ret_np = helpers.flatten_and_to_np(ret=ret_np, backend=backend_fw) - ret_from_np = helpers.flatten_and_to_np(ret=ret_from_np, backend=backend_fw) - for u, v in zip(ret_np, ret_from_np): - assert u.dtype == v.dtype - assert u.shape == v.shape - - @pytest.mark.xfail @handle_frontend_test( fn_tree="jax.random.maxwell", diff --git a/ivy_tests/test_ivy/test_frontends/test_mindspore/test_ops/test_function/test_mindspore_nn_func.py b/ivy_tests/test_ivy/test_frontends/test_mindspore/test_ops/test_function/test_mindspore_nn_func.py index 17919064ef663..fe825eda7098f 100644 --- a/ivy_tests/test_ivy/test_frontends/test_mindspore/test_ops/test_function/test_mindspore_nn_func.py +++ b/ivy_tests/test_ivy/test_frontends/test_mindspore/test_ops/test_function/test_mindspore_nn_func.py @@ -665,52 +665,6 @@ def test_mindspore_log_softmax( # ) -# max_pool3d -@pytest.mark.skip("Testing pipeline not yet implemented") -@handle_frontend_test( - fn_tree="mindspore.ops.function.nn_func.max_pool3d", - x_k_s_p=helpers.arrays_for_pooling( - min_dims=5, - max_dims=5, - min_side=1, - max_side=4, - only_explicit_padding=True, - return_dilation=True, - data_format="channel_first", - ), - test_with_out=st.just(False), - ceil_mode=st.sampled_from([True, False]), -) -def test_mindspore_max_pool3d( - x_k_s_p, - ceil_mode, - *, - test_flags, - frontend, - backend_fw, - fn_tree, - on_device, -): - input_dtypes, x, kernel_size, stride, padding, dilation = x_k_s_p - - padding = (padding[0][0], padding[1][0], padding[2][0]) - - helpers.test_frontend_function( - input_dtypes=input_dtypes, - backend_to_test=backend_fw, - test_flags=test_flags, - frontend=frontend, - fn_tree=fn_tree, - on_device=on_device, - input=x[0], - kernel_size=kernel_size, - stride=stride, - padding=padding, - dilation=dilation, - ceil_mode=ceil_mode, - ) - - # pad @pytest.mark.skip("Testing pipeline not yet implemented") @handle_frontend_test( diff --git a/ivy_tests/test_ivy/test_frontends/test_numpy/test_fft/test_discrete_fourier_transform.py b/ivy_tests/test_ivy/test_frontends/test_numpy/test_fft/test_discrete_fourier_transform.py index 9e121cc13268c..cf81f0b7bdf41 100644 --- a/ivy_tests/test_ivy/test_frontends/test_numpy/test_fft/test_discrete_fourier_transform.py +++ b/ivy_tests/test_ivy/test_frontends/test_numpy/test_fft/test_discrete_fourier_transform.py @@ -5,8 +5,8 @@ import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import handle_frontend_test from ivy_tests.test_ivy.test_functional.test_experimental.test_nn.test_layers import ( - _x_and_ifft, - _x_and_rfftn, + x_and_ifft, + x_and_rfftn, ) @@ -90,7 +90,7 @@ def test_numpy_fftshift( @handle_frontend_test( fn_tree="numpy.fft.ifft", - dtype_and_x=_x_and_ifft(), + dtype_and_x=x_and_ifft(), ) def test_numpy_ifft(dtype_and_x, backend_fw, frontend, test_flags, fn_tree, on_device): input_dtype, x, dim, norm, n = dtype_and_x @@ -111,7 +111,7 @@ def test_numpy_ifft(dtype_and_x, backend_fw, frontend, test_flags, fn_tree, on_d @handle_frontend_test( fn_tree="numpy.fft.ifftn", - dtype_and_x=_x_and_ifft(), + dtype_and_x=x_and_ifft(), ) def test_numpy_ifftn(dtype_and_x, backend_fw, frontend, test_flags, fn_tree, on_device): input_dtype, x, dim, norm, n = dtype_and_x @@ -237,7 +237,7 @@ def test_numpy_rfftfreq( @handle_frontend_test( fn_tree="numpy.fft.rfftn", - dtype_and_x=_x_and_rfftn(), + dtype_and_x=x_and_rfftn(), ) def test_numpy_rfftn(dtype_and_x, frontend, backend_fw, test_flags, fn_tree, on_device): dtype, x, s, axes, norm = dtype_and_x diff --git a/ivy_tests/test_ivy/test_frontends/test_numpy/test_random/test_functions.py b/ivy_tests/test_ivy/test_frontends/test_numpy/test_random/test_functions.py index 4fc4230dcb56f..b2cae02335ca7 100644 --- a/ivy_tests/test_ivy/test_frontends/test_numpy/test_random/test_functions.py +++ b/ivy_tests/test_ivy/test_frontends/test_numpy/test_random/test_functions.py @@ -2,7 +2,6 @@ from hypothesis import strategies as st, assume import numpy as np - # local import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import handle_frontend_test @@ -146,37 +145,6 @@ def test_numpy_chisquare( ) -@handle_frontend_test( - fn_tree="numpy.random.choice", - dtypes=helpers.get_dtypes("float", full=False), - a=helpers.ints(min_value=2, max_value=10), - size=helpers.get_shape(allow_none=True), -) -def test_numpy_choice( - dtypes, - size, - frontend, - test_flags, - backend_fw, - fn_tree, - on_device, - a, -): - helpers.test_frontend_function( - input_dtypes=dtypes, - backend_to_test=backend_fw, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - test_values=False, - a=a, - size=size, - replace=True, - p=np.array([1 / a] * a, dtype=dtypes[0]), - ) - - # dirichlet @handle_frontend_test( fn_tree="numpy.random.dirichlet", @@ -385,51 +353,6 @@ def test_numpy_gumbel( ) -@handle_frontend_test( - fn_tree="numpy.random.laplace", - input_dtypes=helpers.get_dtypes("float", full=False), - loc=st.floats( - allow_nan=False, - allow_infinity=False, - width=32, - min_value=0, - exclude_min=True, - ), - scale=st.floats( - allow_nan=False, - allow_infinity=False, - width=32, - min_value=0, - exclude_min=True, - ), - size=helpers.get_shape(allow_none=True), - test_with_out=st.just(False), -) -def test_numpy_laplace( - input_dtypes, - size, - frontend, - test_flags, - fn_tree, - on_device, - backend_fw, - loc, - scale, -): - helpers.test_frontend_function( - input_dtypes=input_dtypes, - backend_to_test=backend_fw, - test_flags=test_flags, - frontend=frontend, - fn_tree=fn_tree, - on_device=on_device, - test_values=False, - loc=loc, - scale=scale, - size=size, - ) - - # logistic @handle_frontend_test( fn_tree="numpy.random.logistic", @@ -516,42 +439,6 @@ def test_numpy_lognormal( ) -@handle_frontend_test( - fn_tree="numpy.random.logseries", - input_dtypes=helpers.get_dtypes("float", index=2), - p=st.floats( - allow_nan=False, - allow_infinity=False, - min_value=0, - max_value=1, - exclude_max=True, - ), - size=helpers.get_shape(allow_none=True), - test_with_out=st.just(False), -) -def test_numpy_logseries( - input_dtypes, - frontend, - test_flags, - fn_tree, - backend_fw, - on_device, - p, - size, -): - helpers.test_frontend_function( - input_dtypes=input_dtypes, - backend_to_test=backend_fw, - test_flags=test_flags, - frontend=frontend, - fn_tree=fn_tree, - on_device=on_device, - test_values=False, - p=p, - size=size, - ) - - # multinomial @handle_frontend_test( fn_tree="numpy.random.multinomial", @@ -959,41 +846,6 @@ def test_numpy_standard_normal( ) -# standard_t -@handle_frontend_test( - fn_tree="numpy.random.standard_t", - df=st.floats(min_value=1, max_value=20), - df_dtypes=helpers.get_dtypes("integer", full=False), - size=st.tuples( - st.integers(min_value=2, max_value=5), st.integers(min_value=2, max_value=5) - ), - size_dtypes=helpers.get_dtypes("integer", full=False), - test_with_out=st.just(False), -) -def test_numpy_standard_t( - df, - df_dtypes, - size, - size_dtypes, - frontend, - test_flags, - fn_tree, - backend_fw, - on_device, -): - helpers.test_frontend_function( - input_dtypes=df_dtypes + size_dtypes, - backend_to_test=backend_fw, - test_flags=test_flags, - frontend=frontend, - fn_tree=fn_tree, - on_device=on_device, - test_values=False, - df=df, - size=size, - ) - - @handle_frontend_test( fn_tree="numpy.random.triangular", input_dtypes=helpers.get_dtypes("float"), @@ -1167,40 +1019,3 @@ def test_numpy_weibull( a=a, size=size, ) - - -@handle_frontend_test( - fn_tree="numpy.random.zipf", - input_dtypes=helpers.get_dtypes("float", index=2), - a=st.floats( - allow_nan=False, - allow_infinity=False, - width=32, - min_value=1, - max_value=1000, - exclude_min=True, - ), - size=helpers.get_shape(allow_none=True), - test_with_out=st.just(False), -) -def test_numpy_zipf( - input_dtypes, - frontend, - test_flags, - backend_fw, - fn_tree, - on_device, - a, - size, -): - helpers.test_frontend_function( - input_dtypes=input_dtypes, - backend_to_test=backend_fw, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - test_values=False, - a=a, - size=size, - ) diff --git a/ivy_tests/test_ivy/test_frontends/test_onnx/test_elementwise.py b/ivy_tests/test_ivy/test_frontends/test_onnx/test_elementwise.py index 50cf23e71c94d..4d4108730bd9c 100644 --- a/ivy_tests/test_ivy/test_frontends/test_onnx/test_elementwise.py +++ b/ivy_tests/test_ivy/test_frontends/test_onnx/test_elementwise.py @@ -14,7 +14,7 @@ @pytest.mark.skip("Testing pipeline not yet implemented") @handle_frontend_test( - fn_tree="onnx.Abs", + fn_tree="onnx.abs", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric", full=False), large_abs_safety_factor=2.5, @@ -70,7 +70,7 @@ def test_onnx_abs_v2(dtype_x): @pytest.mark.skip("Testing pipeline not yet implemented") @handle_frontend_test( - fn_tree="onnx.Acos", + fn_tree="onnx.acos", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), @@ -120,7 +120,7 @@ def test_onnx_acos_v2(dtype_x): @pytest.mark.skip("Testing pipeline not yet implemented") @handle_frontend_test( - fn_tree="onnx.Acosh", + fn_tree="onnx.acosh", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), @@ -170,7 +170,7 @@ def test_onnx_acosh_v2(dtype_x): @pytest.mark.skip("Testing pipeline not yet implemented") @handle_frontend_test( - fn_tree="onnx.Add", + fn_tree="onnx.add", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, @@ -235,7 +235,7 @@ def test_onnx_add_v2(dtype_x): @pytest.mark.skip("Testing pipeline not yet implemented") @handle_frontend_test( - fn_tree="onnx.Asin", + fn_tree="onnx.asin", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_attribute.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_attribute.py index c23662391620c..f0a471073ab0e 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_attribute.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_attribute.py @@ -6,7 +6,7 @@ @handle_frontend_test( - fn_tree="paddle.imag", + fn_tree="paddle.tensor.attribute.imag", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), @@ -33,7 +33,7 @@ def test_paddle_imag( @handle_frontend_test( - fn_tree="paddle.is_complex", + fn_tree="paddle.tensor.attribute.is_complex", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), @@ -60,7 +60,7 @@ def test_paddle_is_complex( @handle_frontend_test( - fn_tree="paddle.is_floating_point", + fn_tree="paddle.tensor.attribute.is_floating_point", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), @@ -87,7 +87,7 @@ def test_paddle_is_floating_point( @handle_frontend_test( - fn_tree="paddle.is_integer", + fn_tree="paddle.tensor.attribute.is_integer", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), @@ -114,7 +114,7 @@ def test_paddle_is_integer( @handle_frontend_test( - fn_tree="paddle.rank", + fn_tree="paddle.tensor.attribute.rank", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), @@ -141,7 +141,7 @@ def test_paddle_rank( @handle_frontend_test( - fn_tree="paddle.real", + fn_tree="paddle.tensor.attribute.real", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_linalg.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_linalg.py index ceff4a4f94323..6f3118326c4d9 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_linalg.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_linalg.py @@ -7,12 +7,7 @@ import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import assert_all_close from ivy_tests.test_ivy.helpers import handle_frontend_test, matrix_is_stable -from ivy_tests.test_ivy.test_functional.test_core.test_linalg import ( - _get_dtype_and_matrix, -) - from ivy_tests.test_ivy.test_frontends.test_tensorflow.test_linalg import ( - _get_first_matrix, _get_second_matrix, _get_cholesky_matrix, ) @@ -288,6 +283,7 @@ def test_paddle_bincount( min_value=-10, max_value=10, ), + aliases=["paddle.tensor.linalg.bmm"], test_with_out=st.just(False), ) def test_paddle_bmm( @@ -314,7 +310,7 @@ def test_paddle_bmm( # cholesky @handle_frontend_test( - fn_tree="paddle.cholesky", + fn_tree="paddle.tensor.linalg.cholesky", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_value=0, @@ -349,7 +345,7 @@ def test_paddle_cholesky( @handle_frontend_test( - fn_tree="paddle.cholesky_solve", + fn_tree="paddle.tensor.linalg.cholesky_solve", x=_get_second_matrix(), y=_get_paddle_cholesky_matrix(), test_with_out=st.just(False), @@ -382,7 +378,7 @@ def test_paddle_cholesky_solve( @handle_frontend_test( - fn_tree="paddle.cond", + fn_tree="paddle.tensor.linalg.cond", dtype_and_x=_get_dtype_and_matrix_non_singular(dtypes=["float32", "float64"]), p=st.sampled_from([None, "fro", "nuc", np.inf, -np.inf, 1, -1, 2, -2]), test_with_out=st.just(False), @@ -415,7 +411,7 @@ def test_paddle_cond( # cross @handle_frontend_test( - fn_tree="paddle.cross", + fn_tree="paddle.tensor.linalg.cross", dtype_x_y_axis=dtype_value1_value2_axis( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1, @@ -489,7 +485,7 @@ def test_paddle_dist( # dot @handle_frontend_test( - fn_tree="paddle.dot", + fn_tree="paddle.tensor.linalg.dot", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, @@ -523,7 +519,7 @@ def test_paddle_dot( # eig @handle_frontend_test( - fn_tree="paddle.eig", + fn_tree="paddle.tensor.linalg.eig", dtype_and_input=_get_dtype_and_square_matrix(real_and_complex_only=True), test_with_out=st.just(False), ) @@ -570,7 +566,7 @@ def test_paddle_eig( # eigh @handle_frontend_test( - fn_tree="paddle.eigh", + fn_tree="paddle.tensor.linalg.eigh", dtype_and_input=_get_dtype_and_square_matrix(real_and_complex_only=True), UPLO=st.sampled_from(("L", "U")), test_with_out=st.just(False), @@ -620,7 +616,7 @@ def test_paddle_eigh( # eigvals @handle_frontend_test( - fn_tree="paddle.eigvals", + fn_tree="paddle.tensor.linalg.eigvals", dtype_x=_get_dtype_and_square_matrix(real_and_complex_only=True), test_with_out=st.just(False), ) @@ -652,7 +648,7 @@ def test_paddle_eigvals( # eigvalsh @handle_frontend_test( - fn_tree="paddle.eigvalsh", + fn_tree="paddle.tensor.linalg.eigvalsh", dtype_x=_get_dtype_and_square_matrix(real_and_complex_only=True), UPLO=st.sampled_from(("L", "U")), test_with_out=st.just(False), @@ -696,6 +692,7 @@ def test_paddle_eigvalsh( min_value=-10, max_value=10, ), + aliases=["paddle.tensor.linalg.matmul"], transpose_x=st.booleans(), transpose_y=st.booleans(), test_with_out=st.just(False), @@ -728,7 +725,7 @@ def test_paddle_matmul( # matrix_power @handle_frontend_test( - fn_tree="paddle.matrix_power", + fn_tree="paddle.tensor.linalg.matrix_power", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=0, @@ -762,7 +759,7 @@ def test_paddle_matrix_power( # norm @handle_frontend_test( - fn_tree="paddle.norm", + fn_tree="paddle.tensor.linalg.norm", dtype_values_axis=_dtype_values_axis(), keepdims=st.booleans(), test_with_out=st.just(False), @@ -795,7 +792,7 @@ def test_paddle_norm( # pinv @handle_frontend_test( - fn_tree="paddle.pinv", + fn_tree="paddle.tensor.linalg.pinv", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_num_dims=2, @@ -838,67 +835,38 @@ def test_paddle_pinv( ) -# qr -@handle_frontend_test( - fn_tree="paddle.qr", - dtype_and_x=_get_dtype_and_matrix(), - mode=st.sampled_from(("reduced", "complete")), - test_with_out=st.just(False), -) -def test_paddle_qr( - dtype_and_x, - mode, - frontend, - test_flags, - fn_tree, - backend_fw, - on_device, -): - dtype, x = dtype_and_x - assume(matrix_is_stable(x[0])) - helpers.test_frontend_function( - input_dtypes=dtype, - frontend=frontend, - test_flags=test_flags, - backend_to_test=backend_fw, - fn_tree=fn_tree, - on_device=on_device, - rtol=1e-01, - x=x[0], - mode=mode, - ) - - # solve @handle_frontend_test( fn_tree="paddle.solve", - x=_get_first_matrix(), - y=_get_second_matrix(), + dtype_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), + num_arrays=2, + shared_dtype=True, + min_value=-10, + max_value=10, + ), + aliases=["paddle.tensor.linalg.solve"], test_with_out=st.just(False), ) def test_paddle_solve( *, - x, - y, + dtype_x, frontend, - backend_fw, test_flags, + backend_fw, fn_tree, on_device, ): - input_dtype1, x1 = x - input_dtype2, x2 = y + input_dtype, x = dtype_x helpers.test_frontend_function( - input_dtypes=[input_dtype1, input_dtype2], - backend_to_test=backend_fw, + input_dtypes=input_dtype, frontend=frontend, + backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, - rtol=1e-3, - atol=1e-3, - x=x1, - y=x2, + x=x[0], + y=x[1], ) diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_activation.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_activation.py index a1b17a2d3dbf2..dca9cef8c70a9 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_activation.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_activation.py @@ -455,31 +455,6 @@ def test_paddle_prelu( ) -# relu -@handle_frontend_test( - fn_tree="paddle.nn.functional.relu", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - ), -) -def test_paddle_relu( - dtype_and_x, - frontend, - test_flags, - backend_fw, - fn_tree, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_function( - input_dtypes=input_dtype, - backend_to_test=backend_fw, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - x=x[0], - ) - - # relu6 @handle_frontend_test( fn_tree="paddle.nn.functional.relu6", diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_common.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_common.py index 14dedc3b6db4a..d6b095a9536ee 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_common.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_common.py @@ -6,7 +6,7 @@ import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import handle_frontend_test from ivy_tests.test_ivy.test_frontends.test_torch.test_nn.test_functional.test_linear_functions import ( # noqa: E501 - _x_and_linear, + x_and_linear, ) @@ -220,7 +220,7 @@ def paddle_unfold_handler(draw, dtype): # linear @handle_frontend_test( fn_tree="paddle.nn.functional.common.linear", - dtype_x_weight_bias=_x_and_linear( + dtype_x_weight_bias=x_and_linear( dtypes=helpers.get_dtypes("valid", full=False), ), ) @@ -303,7 +303,9 @@ def test_paddle_cosine_similarity( p=st.floats(min_value=0.0, max_value=1.0), axis=st.integers(min_value=0, max_value=1), training=st.booleans(), - mode=st.sampled_from(["upscale_in_train", "downscale_in_infer"]), + mode=st.one_of( + *[st.just(seq) for seq in ["upscale_in_train", "downscale_in_infer"]] + ), ) def test_paddle_dropout( *, diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_conv.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_conv.py index 0ceea31a7e7ef..8d08fa2f37c85 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_conv.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_conv.py @@ -5,7 +5,7 @@ import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import handle_frontend_test from ivy_tests.test_ivy.test_frontends.test_torch.test_nn.test_functional.test_convolution_functions import ( # noqa: E501 - _x_and_filters, + x_and_filters, _output_shape, ) from ivy_tests.test_ivy.test_functional.test_nn.test_layers import ( @@ -16,7 +16,7 @@ # conv1d @handle_frontend_test( fn_tree="paddle.nn.functional.conv1d", - dtype_vals=_x_and_filters(dim=1), + dtype_vals=x_and_filters(dim=1), ) def test_paddle_conv1d( *, @@ -48,7 +48,7 @@ def test_paddle_conv1d( # conv1d_transpose @handle_frontend_test( fn_tree="paddle.nn.functional.conv1d_transpose", - dtype_vals=_x_and_filters(dim=1, transpose=True), + dtype_vals=x_and_filters(dim=1, transpose=True), ) def test_paddle_conv1d_tranpose( *, @@ -90,7 +90,7 @@ def test_paddle_conv1d_tranpose( # conv2d @handle_frontend_test( fn_tree="paddle.nn.functional.conv2d", - dtype_vals=_x_and_filters(dim=2), + dtype_vals=x_and_filters(dim=2), ) def test_paddle_conv2d( *, @@ -122,7 +122,7 @@ def test_paddle_conv2d( # conv2d_transpose @handle_frontend_test( fn_tree="paddle.nn.functional.conv2d_transpose", - dtype_vals=_x_and_filters(dim=2, transpose=True), + dtype_vals=x_and_filters(dim=2, transpose=True), ) def test_paddle_conv2d_tranpose( *, @@ -164,7 +164,7 @@ def test_paddle_conv2d_tranpose( # conv3d @handle_frontend_test( fn_tree="paddle.nn.functional.conv3d", - dtype_vals=_x_and_filters(dim=3), + dtype_vals=x_and_filters(dim=3), ) def test_paddle_conv3d( *, @@ -198,7 +198,7 @@ def test_paddle_conv3d( # conv3d_transpose @handle_frontend_test( fn_tree="paddle.nn.functional.conv3d_transpose", - dtype_vals=_x_and_filters(dim=3, transpose=True), + dtype_vals=x_and_filters(dim=3, transpose=True), ) def test_paddle_conv3d_tranpose( *, diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_distance.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_distance.py index e69de29bb2d1d..8b137891791fe 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_distance.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_distance.py @@ -0,0 +1 @@ + diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_extension.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_extension.py index e69de29bb2d1d..8b137891791fe 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_extension.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_extension.py @@ -0,0 +1 @@ + diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_input.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_input.py index e69de29bb2d1d..8b137891791fe 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_input.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_input.py @@ -0,0 +1 @@ + diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_loss.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_loss.py index c6792a06158c3..7883b88d30559 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_loss.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_loss.py @@ -468,81 +468,6 @@ def test_paddle_nll_loss( ) -@handle_frontend_test( - fn_tree="paddle.nn.functional.sigmoid_focal_loss", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), - num_arrays=1, - shared_dtype=False, - min_num_dims=1, - min_dim_size=1, - ), - dtype_and_normalizer=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), - num_arrays=1, - shared_dtype=True, - min_num_dims=1, - min_dim_size=1, - max_num_dims=1, - max_dim_size=1, - ), - dtype_and_labels=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), - num_arrays=1, - shared_dtype=False, - min_num_dims=1, - min_dim_size=1, - min_value=0, - max_value=1, - ), - alpha=st.floats( - min_value=0.0, - max_value=1.0, - ), - gamma=st.floats( - min_value=0.0, - max_value=5.0, - ), - reduction=st.sampled_from(["mean", "sum", "none"]), -) -def test_paddle_sigmoid_focal_loss( - dtype_and_x, - dtype_and_normalizer, - dtype_and_labels, - alpha, - gamma, - reduction, - on_device, - fn_tree, - frontend, - test_flags, - backend_fw, -): - x_dtype, x = dtype_and_x - normalizer_dtype, normalizer = dtype_and_normalizer - label_dtype, labels = dtype_and_labels - normalizer = [norm.reshape(-1) for norm in normalizer] - labels = ivy.array(labels, dtype=ivy.int64) - helpers.test_frontend_function( - input_dtypes=[ivy.int64] - + [ivy.float64] - + x_dtype - + normalizer_dtype - + label_dtype, - backend_to_test=backend_fw, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - logit=x[0], - label=labels[0], - alpha=alpha, - gamma=gamma, - normalizer=normalizer[0], - reduction=reduction, - ) - - # smooth_l1_loss @handle_frontend_test( fn_tree="paddle.nn.functional.smooth_l1_loss", @@ -586,93 +511,6 @@ def test_paddle_smooth_l1_loss( ) -@handle_frontend_test( - fn_tree="paddle.nn.functional.softmax_with_cross_entropy", - dtype_and_x_and_axis=helpers.dtype_values_axis( - available_dtypes=helpers.get_dtypes("float"), - num_arrays=2, - min_value=1e-04, - max_value=1, - min_num_dims=2, - allow_inf=False, - shared_dtype=True, - force_int_axis=True, - valid_axis=True, - ), - soft_label=st.booleans(), - numeric_stable_mode=st.booleans(), - return_softmax=st.booleans(), -) -def test_paddle_softmax_with_cross_entropy( - dtype_and_x_and_axis, - soft_label, - numeric_stable_mode, - return_softmax, - on_device, - fn_tree, - backend_fw, - frontend, - test_flags, -): - x_dtype, x, axis = dtype_and_x_and_axis - logits = x[0] - labels = x[1] - label_dtype = x_dtype - ignore_index = 0 - if soft_label: - labels = labels / ivy.sum(labels).to_native() - else: - labels = ivy.argmax(labels, axis=axis).to_native() - flattened_labels = labels.flatten() - ignore_index = ivy.randint(0, flattened_labels.size) - ignore_index = flattened_labels[ignore_index] - label_dtype = [str(labels.dtype)] - if on_device == "cpu" or soft_label: - numeric_stable_mode = True - helpers.test_frontend_function( - input_dtypes=[x_dtype[0], label_dtype[0]], - frontend=frontend, - backend_to_test=backend_fw, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - logits=logits, - label=labels, - soft_label=soft_label, - ignore_index=ignore_index, - numeric_stable_mode=numeric_stable_mode, - return_softmax=return_softmax, - axis=axis, - ) - - -@handle_frontend_test( - fn_tree="paddle.nn.functional.loss.square_error_cost", - dtype_and_input_and_label=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), num_arrays=2 - ), -) -def test_paddle_square_error_cost( - *, - dtype_and_input_and_label, - frontend, - test_flags, - fn_tree, - backend_fw, -): - input_dtypes, input_and_label = dtype_and_input_and_label - input, label = input_and_label - helpers.test_frontend_function( - input_dtypes=input_dtypes, - backend_to_test=backend_fw, - frontend=frontend, - test_flags=test_flags, - input=input, - label=label, - fn_tree=fn_tree, - ) - - @handle_frontend_test( fn_tree="paddle.nn.functional.triplet_margin_loss", dtype_and_inputs=helpers.dtype_and_values( diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_search.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_search.py index 544ef6163b272..abf9ba56fb1fe 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_search.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_search.py @@ -11,25 +11,6 @@ # --------------- # -# test_where -@st.composite -def _broadcastable_trio(draw): - shape = draw(helpers.get_shape(min_num_dims=1, min_dim_size=1)) - cond = draw(helpers.array_values(dtype="bool", shape=shape)) - dtypes, xs = draw( - helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("numeric"), - num_arrays=2, - shape=shape, - shared_dtype=True, - large_abs_safety_factor=16, - small_abs_safety_factor=16, - safety_factor_scale="log", - ) - ) - return cond, xs, dtypes - - # masked_select @st.composite def _dtypes_input_mask(draw): @@ -324,25 +305,3 @@ def test_paddle_topk( sorted=sorted, test_values=False, ) - - -@handle_frontend_test( - fn_tree="paddle.where", - broadcastables=_broadcastable_trio(), -) -def test_paddle_where( - *, broadcastables, test_flags, frontend, backend_fw, fn_tree, on_device -): - cond, xs, dtypes = broadcastables - - helpers.test_frontend_function( - input_dtypes=["bool"] + dtypes, - test_flags=test_flags, - frontend=frontend, - backend_to_test=backend_fw, - fn_tree=fn_tree, - on_device=on_device, - condition=cond, - x=xs[0], - y=xs[1], - ) diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_signal.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_signal.py index e69de29bb2d1d..8b137891791fe 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_signal.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_signal.py @@ -0,0 +1 @@ + diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_einsum.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_einsum.py new file mode 100644 index 0000000000000..8b137891791fe --- /dev/null +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_einsum.py @@ -0,0 +1 @@ + diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_manipulation.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_manipulation.py index 74fe6f3d61248..be36b2a116f1c 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_manipulation.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_manipulation.py @@ -1,5 +1,6 @@ # global from hypothesis import strategies as st +import math # local import ivy_tests.test_ivy.helpers as helpers @@ -12,6 +13,227 @@ # --- Helpers --- # # --------------- # + +# stack +@st.composite +def _arrays_axis_n_dtypes(draw): + num_dims = draw(st.shared(helpers.ints(min_value=2, max_value=5), key="num_dims")) + num_arrays = draw( + st.shared(helpers.ints(min_value=2, max_value=4), key="num_arrays") + ) + common_shape = draw( + helpers.list_of_size( + x=helpers.ints(min_value=2, max_value=3), + size=num_dims - 1, + ) + ) + axis = draw(st.sampled_from(list(range(num_dims)))) + xs = [] + input_dtypes = draw( + helpers.array_dtypes(available_dtypes=draw(helpers.get_dtypes("numeric"))) + ) + dtype = draw(st.sampled_from(input_dtypes)) + for _ in range(num_arrays): + x = draw( + helpers.array_values( + shape=common_shape, + dtype=dtype, + ) + ) + xs.append(x) + input_dtypes = [dtype] * len(input_dtypes) + return xs, input_dtypes, axis + + +# concat +@st.composite +def _arrays_idx_n_dtypes(draw): + num_dims = draw(st.shared(helpers.ints(min_value=1, max_value=4), key="num_dims")) + num_arrays = draw( + st.shared(helpers.ints(min_value=2, max_value=4), key="num_arrays") + ) + common_shape = draw( + helpers.list_of_size( + x=helpers.ints(min_value=2, max_value=3), + size=num_dims - 1, + ) + ) + unique_idx = draw(helpers.ints(min_value=0, max_value=num_dims - 1)) + unique_dims = draw( + helpers.list_of_size( + x=helpers.ints(min_value=2, max_value=3), + size=num_arrays, + ) + ) + xs = [] + input_dtypes = draw( + helpers.array_dtypes(available_dtypes=draw(helpers.get_dtypes("valid"))) + ) + dtype = draw(st.sampled_from(input_dtypes)) + for ud in unique_dims: + x = draw( + helpers.array_values( + shape=common_shape[:unique_idx] + [ud] + common_shape[unique_idx:], + dtype=dtype, + ) + ) + xs.append(x) + input_dtypes = [dtype] * len(input_dtypes) + return xs, input_dtypes, unique_idx + + +@st.composite +def _broadcast_to_helper(draw): + dtype_and_x = draw( + helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("valid"), + min_num_dims=1, + max_num_dims=6, + ) + ) + + dtype, x = dtype_and_x + input_shape = x[0].shape + + max_num_dims = 6 - len(input_shape) + shape = draw(helpers.get_shape(max_num_dims=max_num_dims)) + input_shape + + return dtype, x, shape + + +# flip +@st.composite +def _dtype_x_axis(draw, **kwargs): + dtype, x, shape = draw(helpers.dtype_and_values(**kwargs, ret_shape=True)) + axis = draw( + st.lists( + helpers.ints(min_value=0, max_value=len(shape) - 1), + min_size=len(shape), + max_size=len(shape), + unique=True, + ) + ) + return dtype, x, axis + + +# expand +@st.composite +def _expand_helper(draw): + dtype_and_x = draw( + helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("valid"), + min_num_dims=1, + max_num_dims=6, + ) + ) + + dtype, x = dtype_and_x + input_shape = x[0].shape + + max_num_dims = 6 - len(input_shape) + shape = draw(helpers.get_shape(max_num_dims=max_num_dims)) + input_shape + + return dtype, x, shape + + +@st.composite +def _gather_helper(draw): + dtype_and_param = draw( + helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("valid"), + min_num_dims=1, + max_num_dims=6, + ) + ) + + dtype_and_indices = draw( + helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("valid"), + min_num_dims=1, + max_num_dims=6, + ) + ) + dtype, param = dtype_and_param + dtype, indices = dtype_and_indices + return dtype, param, indices + + +# split +@st.composite +def _split_helper(draw): + dtypes, values, shape = draw( + helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("valid"), + min_num_dims=2, + max_num_dims=4, + min_dim_size=2, + max_dim_size=4, + ret_shape=True, + ) + ) + axis = draw(st.sampled_from(range(len(shape)))) + num_eles = shape[axis] + splits = [i for i in range(1, num_eles + 1) if num_eles % i == 0] + num_splits = draw(st.sampled_from(splits)) + return dtypes, values, num_splits, axis + + +# squeeze +@st.composite +def _squeeze_helper(draw): + shape = draw(st.shared(helpers.get_shape(), key="value_shape")) + valid_axes = [] + for index, axis in enumerate(shape): + if axis == 1: + valid_axes.append(index) + valid_axes.insert(0, None) + + return draw(st.sampled_from(valid_axes)) + + +# tile +@st.composite +def _tile_helper(draw): + dtype, x, shape = draw( + helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("valid"), + min_num_dims=1, + max_num_dims=4, + min_dim_size=2, + max_dim_size=3, + ret_shape=True, + ) + ) + repeats = draw( + helpers.list_of_size( + x=helpers.ints(min_value=1, max_value=3), + size=len(shape), + ) + ) + return dtype, x, repeats + + +# Helpers # +# ------ # + + +@st.composite +def dtypes_x_reshape(draw): + shape = draw(helpers.get_shape(min_num_dims=1)) + dtypes, x = draw( + helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("numeric"), + shape=shape, + ) + ) + shape = draw( + helpers.get_shape(min_num_dims=1).filter( + lambda s: math.prod(s) == math.prod(shape) + ) + ) + return dtypes, x, shape + + @st.composite def dtypes_x_reshape_(draw): shape = draw(helpers.get_shape(min_num_dims=1)) @@ -27,9 +249,236 @@ def dtypes_x_reshape_(draw): # --- Main --- # # ------------ # + +# abs +@handle_frontend_test( + fn_tree="paddle.abs", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), + ), +) +def test_paddle_abs( + *, + dtype_and_x, + on_device, + fn_tree, + frontend, + test_flags, + backend_fw, +): + input_dtype, x = dtype_and_x + helpers.test_frontend_function( + input_dtypes=input_dtype, + backend_to_test=backend_fw, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + x=x[0], + ) + + +@handle_frontend_test( + fn_tree="paddle.broadcast_to", + dtype_x_and_shape=_broadcast_to_helper(), +) +def test_paddle_broadcast_to( + *, + dtype_x_and_shape, + on_device, + fn_tree, + backend_fw, + frontend, + test_flags, +): + input_dtype, x, shape = dtype_x_and_shape + helpers.test_frontend_function( + input_dtypes=input_dtype, + backend_to_test=backend_fw, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + x=x[0], + shape=shape, + ) + + +# cast +@handle_frontend_test( + fn_tree="paddle.cast", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("valid"), + ), + dtype=helpers.get_dtypes("valid", full=False), +) +def test_paddle_cast( + *, + dtype_and_x, + dtype, + on_device, + backend_fw, + fn_tree, + frontend, + test_flags, +): + input_dtype, x = dtype_and_x + helpers.test_frontend_function( + input_dtypes=input_dtype, + backend_to_test=backend_fw, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + x=x[0], + dtype=dtype[0], + ) + + +@handle_frontend_test( + fn_tree="paddle.concat", + xs_n_input_dtypes_n_unique_idx=_arrays_idx_n_dtypes(), + test_with_out=st.just(False), +) +def test_paddle_concat( + *, + xs_n_input_dtypes_n_unique_idx, + on_device, + fn_tree, + frontend, + backend_fw, + test_flags, +): + xs, input_dtypes, unique_idx = xs_n_input_dtypes_n_unique_idx + helpers.test_frontend_function( + input_dtypes=input_dtypes, + backend_to_test=backend_fw, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + x=xs, + axis=unique_idx, + ) + + +@handle_frontend_test( + fn_tree="paddle.expand", + dtype_x_and_shape=_expand_helper(), +) +def test_paddle_expand( + *, + dtype_x_and_shape, + on_device, + fn_tree, + backend_fw, + frontend, + test_flags, +): + input_dtype, x, shape = dtype_x_and_shape + helpers.test_frontend_function( + input_dtypes=input_dtype, + backend_to_test=backend_fw, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + x=x[0], + shape=shape, + ) + + +@handle_frontend_test( + fn_tree="paddle.flip", + dtype_x_axis=_dtype_x_axis( + available_dtypes=helpers.get_dtypes("numeric"), + min_num_dims=1, + min_dim_size=1, + ), + test_with_out=st.just(False), +) +def test_paddle_flip( + *, + dtype_x_axis, + on_device, + fn_tree, + frontend, + test_flags, + backend_fw, +): + input_dtype, x, axis = dtype_x_axis + helpers.test_frontend_function( + input_dtypes=input_dtype, + backend_to_test=backend_fw, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + x=x[0], + axis=axis, + ) + + +@handle_frontend_test( + fn_tree="paddle.gather", + dtype_param_and_indices=_gather_helper(), +) +def test_paddle_gather( + *, + dtype_param_and_indices, + on_device, + fn_tree, + frontend, + backend_fw, + test_flags, +): + input_dtype, param, indices = dtype_param_and_indices + helpers.test_frontend_function( + input_dtypes=input_dtype, + backend_to_test=backend_fw, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + param=param[0], + indices=indices[0], + ) + + +# Tests # +# ----- # + + +# reshape +@handle_frontend_test( + fn_tree="paddle.reshape", + dtypes_x_reshape=dtypes_x_reshape(), +) +def test_paddle_reshape( + *, + dtypes_x_reshape, + on_device, + fn_tree, + frontend, + test_flags, + backend_fw, +): + input_dtype, x, shape = dtypes_x_reshape + helpers.test_frontend_function( + input_dtypes=input_dtype, + backend_to_test=backend_fw, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + x=x[0], + shape=shape, + ) + + # reshape_ @handle_frontend_test( - fn_tree="paddle.tensor.manipulation.reshape_", + fn_tree="paddle.reshape_", dtypes_x_reshape=dtypes_x_reshape_(), ) def test_paddle_reshape_( @@ -52,3 +501,260 @@ def test_paddle_reshape_( x=x[0], shape=shape, ) + + +# roll +@handle_frontend_test( + fn_tree="paddle.roll", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("numeric"), + min_num_dims=2, + min_dim_size=2, + ), + shift=helpers.ints(min_value=1, max_value=10), + axis=helpers.ints(min_value=-1, max_value=1), + test_with_out=st.just(False), +) +def test_paddle_roll( + *, + dtype_and_x, + shift, + axis, + on_device, + fn_tree, + frontend, + test_flags, + backend_fw, +): + input_dtype, x = dtype_and_x + helpers.test_frontend_function( + input_dtypes=input_dtype, + backend_to_test=backend_fw, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + x=x[0], + shifts=shift, + axis=axis, + ) + + +# rot90 +@handle_frontend_test( + fn_tree="paddle.rot90", + dtype_m_k_axes=_get_dtype_values_k_axes_for_rot90( + available_dtypes=helpers.get_dtypes(kind="valid"), + min_num_dims=1, + max_num_dims=5, + min_dim_size=1, + max_dim_size=10, + ), +) +def test_paddle_rot90( + *, + dtype_m_k_axes, + on_device, + fn_tree, + frontend, + backend_fw, + test_flags, +): + input_dtype, m, k, axes = dtype_m_k_axes + helpers.test_frontend_function( + input_dtypes=input_dtype, + backend_to_test=backend_fw, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + x=m, + k=k, + axes=tuple(axes), + ) + + +@handle_frontend_test( + fn_tree="paddle.split", + dt_x_num_splits_axis=_split_helper(), + test_with_out=st.just(False), +) +def test_paddle_split( + *, + dt_x_num_splits_axis, + on_device, + fn_tree, + frontend, + test_flags, + backend_fw, +): + input_dtypes, x, num_splits, axis = dt_x_num_splits_axis + helpers.test_frontend_function( + input_dtypes=input_dtypes, + backend_to_test=backend_fw, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + x=x[0], + num_or_sections=num_splits, + axis=axis, + ) + + +@handle_frontend_test( + fn_tree="paddle.squeeze", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("valid"), + shape=st.shared(helpers.get_shape(), key="value_shape"), + ), + axis=_squeeze_helper(), +) +def test_paddle_squeeze( + *, + dtype_and_x, + axis, + on_device, + fn_tree, + frontend, + test_flags, + backend_fw, +): + input_dtype, x = dtype_and_x + helpers.test_frontend_function( + input_dtypes=input_dtype, + backend_to_test=backend_fw, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + x=x[0], + axis=axis, + ) + + +@handle_frontend_test( + fn_tree="paddle.stack", + _arrays_n_dtypes_axis=_arrays_axis_n_dtypes(), + test_with_out=st.just(False), +) +def test_paddle_stack( + *, + _arrays_n_dtypes_axis, + on_device, + fn_tree, + frontend, + test_flags, + backend_fw, +): + xs, input_dtypes, axis = _arrays_n_dtypes_axis + helpers.test_frontend_function( + input_dtypes=input_dtypes, + backend_to_test=backend_fw, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + x=xs, + axis=axis, + ) + + +# take_along_axis +@handle_frontend_test( + fn_tree="paddle.take_along_axis", + dtype_indices_axis=helpers.array_indices_axis( + array_dtypes=helpers.get_dtypes(kind="valid"), + indices_dtypes=["int64"], + min_num_dims=1, + max_num_dims=5, + min_dim_size=1, + max_dim_size=10, + indices_same_dims=True, + ), +) +def test_paddle_take_along_axis( + *, + dtype_indices_axis, + on_device, + fn_tree, + frontend, + test_flags, + backend_fw, +): + input_dtypes, value, indices, axis, _ = dtype_indices_axis + helpers.test_frontend_function( + input_dtypes=input_dtypes, + backend_to_test=backend_fw, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + arr=value, + indices=indices, + axis=axis, + ) + + +@handle_frontend_test( + fn_tree="paddle.tile", + dt_x_repeats=_tile_helper(), + test_with_out=st.just(False), +) +def test_paddle_tile( + *, + dt_x_repeats, + on_device, + fn_tree, + frontend, + backend_fw, + test_flags, +): + input_dtypes, x, repeats = dt_x_repeats + helpers.test_frontend_function( + input_dtypes=input_dtypes, + backend_to_test=backend_fw, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + x=x[0], + repeat_times=repeats, + ) + + +# unstack +@handle_frontend_test( + fn_tree="paddle.unstack", + dtypes_values=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("numeric"), + min_num_dims=2, + max_num_dims=2, + max_dim_size=1, + ), + number_positional_args=st.just(1), + axis=st.integers(-1, 0), + test_with_out=st.just(False), +) +def test_paddle_unstack( + *, + dtypes_values, + axis, + on_device, + fn_tree, + backend_fw, + frontend, + test_flags, +): + x_dtype, x = dtypes_values + axis = axis + helpers.test_frontend_function( + input_dtypes=x_dtype, + backend_to_test=backend_fw, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + x=x[0], + axis=axis, + ) diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_math.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_math.py index 6ed321898c071..62ab9db541e88 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_math.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_math.py @@ -10,42 +10,37 @@ ) -# sin -@handle_frontend_test( - fn_tree="paddle.sin", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), - ), -) -def test_paddle_sin( - *, - dtype_and_x, - on_device, - fn_tree, - frontend, - test_flags, - backend_fw, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_function( - input_dtypes=input_dtype, - backend_to_test=backend_fw, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - x=x[0], +# --- Helpers --- # +# --------------- # + + +@st.composite +def _test_paddle_take_helper(draw): + mode = draw(st.sampled_from(["raise", "clip", "wrap"])) + + safe_bounds = mode == "raise" + + dtypes, xs, indices, _, _ = draw( + helpers.array_indices_axis( + array_dtypes=helpers.get_dtypes("float_and_integer"), + indices_dtypes=["int32", "int64"], + valid_bounds=safe_bounds, + ) ) + return dtypes, xs, indices, mode + -# cos +# --- Main --- # +# ------------ # + + +# abs @handle_frontend_test( - fn_tree="paddle.cos", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), - ), + fn_tree="paddle.tensor.math.abs", + dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")), ) -def test_paddle_cos( +def test_paddle_abs( *, dtype_and_x, on_device, @@ -95,14 +90,14 @@ def test_paddle_acos( ) -# cosh +# acosh @handle_frontend_test( - fn_tree="paddle.tensor.math.cosh", + fn_tree="paddle.tensor.math.acosh", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), ) -def test_paddle_cosh( +def test_paddle_acosh( *, dtype_and_x, on_device, @@ -124,15 +119,20 @@ def test_paddle_cosh( ) -# tanh +# add @handle_frontend_test( - fn_tree="paddle.tensor.math.tanh", - aliases=["paddle.tanh", "paddle.nn.functional.tanh"], + fn_tree="paddle.add", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), + num_arrays=2, + allow_inf=False, + large_abs_safety_factor=2, + small_abs_safety_factor=2, + safety_factor_scale="log", + shared_dtype=True, ), ) -def test_paddle_tanh( +def test_paddle_add( *, dtype_and_x, on_device, @@ -146,31 +146,45 @@ def test_paddle_tanh( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, - test_flags=test_flags, fn_tree=fn_tree, + test_flags=test_flags, on_device=on_device, - atol=1e-2, x=x[0], + y=x[1], ) -# acosh +# addmm @handle_frontend_test( - fn_tree="paddle.tensor.math.acosh", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), + fn_tree="paddle.tensor.math.addmm", + dtype_input_xy=_get_dtype_and_3dbatch_matrices(with_input=True, input_3d=True), + beta=st.floats( + min_value=-5, + max_value=5, + allow_nan=False, + allow_subnormal=False, + allow_infinity=False, + ), + alpha=st.floats( + min_value=-5, + max_value=5, + allow_nan=False, + allow_subnormal=False, + allow_infinity=False, ), ) -def test_paddle_acosh( +def test_paddle_addmm( *, - dtype_and_x, + dtype_input_xy, + beta, + alpha, on_device, fn_tree, frontend, test_flags, backend_fw, ): - input_dtype, x = dtype_and_x + input_dtype, input, x, y = dtype_input_xy helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, @@ -178,116 +192,117 @@ def test_paddle_acosh( test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, - atol=1e-2, + input=input[0], x=x[0], + y=y[0], + beta=beta, + alpha=alpha, ) -# asin +# amax @handle_frontend_test( - fn_tree="paddle.tensor.math.asin", + fn_tree="paddle.tensor.math.amax", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), + num_arrays=2, + allow_inf=False, + shared_dtype=True, ), ) -def test_paddle_asin( +def test_paddle_amax( *, dtype_and_x, - frontend, - test_flags, + on_device, fn_tree, backend_fw, - on_device, + frontend, + test_flags, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, - backend_to_test=backend_fw, frontend=frontend, - test_flags=test_flags, + backend_to_test=backend_fw, fn_tree=fn_tree, + test_flags=test_flags, on_device=on_device, x=x[0], ) -# log +# amin @handle_frontend_test( - fn_tree="paddle.log", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), + fn_tree="paddle.tensor.math.amin", + dtype_and_x=helpers.dtype_values_axis( + available_dtypes=helpers.get_dtypes("valid"), + valid_axis=True, ), + keepdim=st.booleans(), ) -def test_paddle_log( +def test_paddle_amin( *, dtype_and_x, + keepdim, on_device, fn_tree, + backend_fw, frontend, test_flags, - backend_fw, ): - input_dtype, x = dtype_and_x + input_dtype, x, axis = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, - backend_to_test=backend_fw, frontend=frontend, - test_flags=test_flags, + backend_to_test=backend_fw, fn_tree=fn_tree, + test_flags=test_flags, on_device=on_device, x=x[0], + axis=axis, + keepdim=keepdim, ) -# divide @handle_frontend_test( - fn_tree="paddle.divide", + fn_tree="paddle.tensor.math.angle", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), - num_arrays=2, - allow_inf=False, - large_abs_safety_factor=2, - small_abs_safety_factor=2, - safety_factor_scale="log", - shared_dtype=True, + available_dtypes=["float64", "complex64", "complex128"], ), ) -def test_paddle_divide( +def test_paddle_angle( *, dtype_and_x, on_device, fn_tree, frontend, - test_flags, backend_fw, + test_flags, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, - backend_to_test=backend_fw, frontend=frontend, + backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], - y=x[1], ) -# multiply +# any @handle_frontend_test( - fn_tree="paddle.multiply", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), - num_arrays=2, - allow_inf=False, - large_abs_safety_factor=2, - small_abs_safety_factor=2, - safety_factor_scale="log", - shared_dtype=True, + fn_tree="paddle.tensor.math.any", + dtype_and_x=helpers.dtype_values_axis( + available_dtypes=["bool"], + valid_axis=True, + allow_neg_axes=True, + force_int_axis=True, + min_num_dims=1, ), ) -def test_paddle_multiply( +def test_paddle_any( *, dtype_and_x, on_device, @@ -296,97 +311,85 @@ def test_paddle_multiply( test_flags, backend_fw, ): - input_dtype, x = dtype_and_x + input_dtype, x, axis = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, - backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, + backend_to_test=backend_fw, x=x[0], - y=x[1], + axis=axis, + keepdim=False, ) -# add +# asin @handle_frontend_test( - fn_tree="paddle.add", + fn_tree="paddle.tensor.math.asin", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), - num_arrays=2, - allow_inf=False, - large_abs_safety_factor=2, - small_abs_safety_factor=2, - safety_factor_scale="log", - shared_dtype=True, ), ) -def test_paddle_add( +def test_paddle_asin( *, dtype_and_x, - on_device, - fn_tree, frontend, test_flags, + fn_tree, backend_fw, + on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, - fn_tree=fn_tree, test_flags=test_flags, + fn_tree=fn_tree, on_device=on_device, x=x[0], - y=x[1], ) -# subtract +# asinh @handle_frontend_test( - fn_tree="paddle.subtract", + fn_tree="paddle.tensor.math.asinh", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), - num_arrays=2, - allow_inf=False, - large_abs_safety_factor=2, - small_abs_safety_factor=2, - safety_factor_scale="log", - shared_dtype=True, ), ) -def test_paddle_subtract( +def test_paddle_asinh( *, dtype_and_x, on_device, fn_tree, frontend, - test_flags, backend_fw, + test_flags, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, - backend_to_test=backend_fw, frontend=frontend, - fn_tree=fn_tree, + backend_to_test=backend_fw, test_flags=test_flags, + fn_tree=fn_tree, on_device=on_device, + atol=1e-2, x=x[0], - y=x[1], ) -# sqrt +# atan @handle_frontend_test( - fn_tree="paddle.tensor.math.sqrt", + fn_tree="paddle.tensor.math.atan", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), + available_dtypes=helpers.get_dtypes("float"), ), ) -def test_paddle_sqrt( +def test_paddle_atan( *, dtype_and_x, frontend, @@ -407,49 +410,56 @@ def test_paddle_sqrt( ) -# atanh +# atan2 @handle_frontend_test( - fn_tree="paddle.tensor.math.atanh", + fn_tree="paddle.atan2", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), + num_arrays=2, + allow_inf=False, + large_abs_safety_factor=2, + small_abs_safety_factor=2, + safety_factor_scale="log", + shared_dtype=True, ), ) -def test_paddle_atanh( +def test_paddle_atan2( *, dtype_and_x, on_device, fn_tree, frontend, - test_flags, backend_fw, + test_flags, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, - backend_to_test=backend_fw, frontend=frontend, - test_flags=test_flags, + backend_to_test=backend_fw, fn_tree=fn_tree, + test_flags=test_flags, on_device=on_device, x=x[0], + y=x[1], ) -# atan +# atanh @handle_frontend_test( - fn_tree="paddle.tensor.math.atan", + fn_tree="paddle.tensor.math.atanh", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), ) -def test_paddle_atan( +def test_paddle_atanh( *, dtype_and_x, + on_device, + fn_tree, frontend, test_flags, - fn_tree, backend_fw, - on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( @@ -463,15 +473,14 @@ def test_paddle_atan( ) -# round +# ceil @handle_frontend_test( - fn_tree="paddle.tensor.math.round", + fn_tree="paddle.tensor.math.ceil", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), - min_value=1, ), ) -def test_paddle_round( +def test_paddle_ceil( *, dtype_and_x, frontend, @@ -492,24 +501,23 @@ def test_paddle_round( ) -# round_ +# conj @handle_frontend_test( - fn_tree="paddle.tensor.math.round_", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), - min_value=1, + fn_tree="paddle.tensor.math.conj", + dtype_and_input=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("numeric"), ), ) -def test_paddle_round_( +def test_paddle_conj( *, - dtype_and_x, + dtype_and_input, frontend, + backend_fw, test_flags, fn_tree, - backend_fw, on_device, ): - input_dtype, x = dtype_and_x + input_dtype, x = dtype_and_input helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, @@ -521,21 +529,21 @@ def test_paddle_round_( ) -# ceil +# cos @handle_frontend_test( - fn_tree="paddle.tensor.math.ceil", + fn_tree="paddle.cos", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), ) -def test_paddle_ceil( +def test_paddle_cos( *, dtype_and_x, + on_device, + fn_tree, frontend, test_flags, - fn_tree, backend_fw, - on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( @@ -549,14 +557,14 @@ def test_paddle_ceil( ) -# sinh +# cosh @handle_frontend_test( - fn_tree="paddle.sinh", + fn_tree="paddle.tensor.math.cosh", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), ) -def test_paddle_sinh( +def test_paddle_cosh( *, dtype_and_x, on_device, @@ -573,89 +581,66 @@ def test_paddle_sinh( test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, + atol=1e-2, x=x[0], ) -# pow +# cumprod @handle_frontend_test( - fn_tree="paddle.pow", - dtype_and_x=helpers.dtype_and_values( + fn_tree="paddle.tensor.math.cumprod", + dtype_x_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("valid"), - num_arrays=2, - allow_inf=False, - shared_dtype=True, + valid_axis=True, + force_int_axis=True, + min_num_dims=1, + min_value=-5, + max_value=5, ), ) -def test_paddle_pow( +def test_paddle_cumprod( *, - dtype_and_x, + dtype_x_axis, on_device, fn_tree, frontend, - test_flags, backend_fw, + test_flags, ): - input_dtype, x = dtype_and_x + input_dtype, x, axis = dtype_x_axis helpers.test_frontend_function( input_dtypes=input_dtype, - backend_to_test=backend_fw, frontend=frontend, + backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], - y=x[1], + dim=axis, ) -# abs +# deg2rad @handle_frontend_test( - fn_tree="paddle.tensor.math.abs", - dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")), + fn_tree="paddle.deg2rad", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), + ), ) -def test_paddle_abs( +def test_paddle_deg2rad( *, dtype_and_x, on_device, fn_tree, frontend, - test_flags, backend_fw, + test_flags, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, - backend_to_test=backend_fw, frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - x=x[0], - ) - - -# conj -@handle_frontend_test( - fn_tree="paddle.tensor.math.conj", - dtype_and_input=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("numeric"), - ), -) -def test_paddle_conj( - *, - dtype_and_input, - frontend, - backend_fw, - test_flags, - fn_tree, - on_device, -): - input_dtype, x = dtype_and_input - helpers.test_frontend_function( - input_dtypes=input_dtype, backend_to_test=backend_fw, - frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, @@ -663,37 +648,60 @@ def test_paddle_conj( ) -# floor +# diff @handle_frontend_test( - fn_tree="paddle.tensor.math.floor", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), + fn_tree="paddle.tensor.math.diff", + dtype_n_x_n_axis=helpers.dtype_values_axis( + available_dtypes=st.shared(helpers.get_dtypes("valid"), key="dtype"), + min_num_dims=1, + valid_axis=True, + force_int_axis=True, + ), + n=st.integers(min_value=1, max_value=1), + dtype_prepend=helpers.dtype_and_values( + available_dtypes=st.shared(helpers.get_dtypes("valid"), key="dtype"), + min_num_dims=1, + max_num_dims=1, + ), + dtype_append=helpers.dtype_and_values( + available_dtypes=st.shared(helpers.get_dtypes("valid"), key="dtype"), + min_num_dims=1, + max_num_dims=1, ), ) -def test_paddle_floor( +def test_paddle_diff( *, - dtype_and_x, + dtype_n_x_n_axis, + n, + dtype_prepend, + dtype_append, + test_flags, frontend, backend_fw, - test_flags, fn_tree, on_device, ): - input_dtype, x = dtype_and_x + input_dtype, x, axis = dtype_n_x_n_axis + _, prepend = dtype_prepend + _, append = dtype_append helpers.test_frontend_function( input_dtypes=input_dtype, + test_flags=test_flags, frontend=frontend, backend_to_test=backend_fw, - test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], + n=n, + axis=axis, + prepend=prepend[0], + append=append[0], ) -# remainder +# divide @handle_frontend_test( - fn_tree="paddle.remainder", + fn_tree="paddle.divide", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, @@ -704,20 +712,20 @@ def test_paddle_floor( shared_dtype=True, ), ) -def test_paddle_remainder( +def test_paddle_divide( *, dtype_and_x, on_device, fn_tree, frontend, - backend_fw, test_flags, + backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, - frontend=frontend, backend_to_test=backend_fw, + frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, @@ -726,21 +734,21 @@ def test_paddle_remainder( ) -# log2 +# erf @handle_frontend_test( - fn_tree="paddle.log2", + fn_tree="paddle.tensor.math.erf", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), ) -def test_paddle_log2( +def test_paddle_erf( *, dtype_and_x, - on_device, - fn_tree, frontend, backend_fw, test_flags, + fn_tree, + on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( @@ -754,15 +762,14 @@ def test_paddle_log2( ) -# log1p +# exp @handle_frontend_test( - fn_tree="paddle.log1p", + fn_tree="paddle.exp", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), - max_value=1e5, ), ) -def test_paddle_log1p( +def test_paddle_exp( *, dtype_and_x, on_device, @@ -783,14 +790,14 @@ def test_paddle_log1p( ) -# rad2deg +# expm1 @handle_frontend_test( - fn_tree="paddle.rad2deg", + fn_tree="paddle.expm1", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), + available_dtypes=helpers.get_dtypes("valid"), ), ) -def test_paddle_rad2deg( +def test_paddle_expm1( *, dtype_and_x, on_device, @@ -811,21 +818,21 @@ def test_paddle_rad2deg( ) -# deg2rad +# floor @handle_frontend_test( - fn_tree="paddle.deg2rad", + fn_tree="paddle.tensor.math.floor", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), + available_dtypes=helpers.get_dtypes("valid"), ), ) -def test_paddle_deg2rad( +def test_paddle_floor( *, dtype_and_x, - on_device, - fn_tree, frontend, backend_fw, test_flags, + fn_tree, + on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( @@ -839,23 +846,22 @@ def test_paddle_deg2rad( ) -# tan @handle_frontend_test( - fn_tree="paddle.tensor.math.tan", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), + fn_tree="paddle.fmax", + dtypes_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), num_arrays=2, shared_dtype=True ), ) -def test_paddle_tan( +def test_paddle_fmax( *, - dtype_and_x, + dtypes_and_x, on_device, fn_tree, frontend, backend_fw, test_flags, ): - input_dtype, x = dtype_and_x + input_dtype, x = dtypes_and_x helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, @@ -863,61 +869,57 @@ def test_paddle_tan( test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, - atol=1e-2, x=x[0], + y=x[1], ) -# atan2 @handle_frontend_test( - fn_tree="paddle.atan2", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), - num_arrays=2, - allow_inf=False, - large_abs_safety_factor=2, - small_abs_safety_factor=2, - safety_factor_scale="log", - shared_dtype=True, + fn_tree="paddle.fmin", + dtypes_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), num_arrays=2, shared_dtype=True ), ) -def test_paddle_atan2( +def test_paddle_fmin( *, - dtype_and_x, + dtypes_and_x, on_device, fn_tree, frontend, backend_fw, test_flags, ): - input_dtype, x = dtype_and_x + input_dtype, x = dtypes_and_x helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, backend_to_test=backend_fw, - fn_tree=fn_tree, test_flags=test_flags, + fn_tree=fn_tree, on_device=on_device, x=x[0], y=x[1], ) -# sign +# frac @handle_frontend_test( - fn_tree="paddle.tensor.math.sign", + fn_tree="paddle.tensor.math.frac", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), + available_dtypes=helpers.get_dtypes("valid"), + num_arrays=1, + max_value=1e6, + min_value=-1e6, ), ) -def test_paddle_sign( +def test_paddle_frac( *, dtype_and_x, - on_device, - fn_tree, frontend, backend_fw, test_flags, + fn_tree, + on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( @@ -931,14 +933,20 @@ def test_paddle_sign( ) -# neg +# gcd @handle_frontend_test( - fn_tree="paddle.neg", + fn_tree="paddle.gcd", dtype_and_x=helpers.dtype_and_values( - available_dtypes=["float32", "float64", "int8", "int16", "int32", "int64"], + available_dtypes=helpers.get_dtypes("valid"), + min_value=-100, + max_value=100, + min_num_dims=1, + min_dim_size=1, + num_arrays=2, + shared_dtype=True, ), ) -def test_paddle_neg( +def test_paddle_gcd( *, dtype_and_x, on_device, @@ -952,22 +960,28 @@ def test_paddle_neg( input_dtypes=input_dtype, frontend=frontend, backend_to_test=backend_fw, - test_flags=test_flags, fn_tree=fn_tree, + test_flags=test_flags, on_device=on_device, x=x[0], + y=x[1], ) -# lgamma +# heaviside @handle_frontend_test( - fn_tree="paddle.tensor.math.lgamma", + fn_tree="paddle.tensor.math.heaviside", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), + num_arrays=2, + allow_inf=False, + large_abs_safety_factor=2, + small_abs_safety_factor=2, safety_factor_scale="log", + shared_dtype=True, ), ) -def test_paddle_lgamma( +def test_paddle_heaviside( *, dtype_and_x, on_device, @@ -981,29 +995,29 @@ def test_paddle_lgamma( input_dtypes=input_dtype, frontend=frontend, backend_to_test=backend_fw, - test_flags=test_flags, fn_tree=fn_tree, + test_flags=test_flags, on_device=on_device, - atol=1e-4, x=x[0], + y=x[1], ) -# exp +# isfinite @handle_frontend_test( - fn_tree="paddle.exp", + fn_tree="paddle.isfinite", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), ) -def test_paddle_exp( +def test_paddle_isfinite( *, dtype_and_x, - on_device, - fn_tree, frontend, backend_fw, test_flags, + fn_tree, + on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( @@ -1017,21 +1031,21 @@ def test_paddle_exp( ) -# expm1 +# isinf @handle_frontend_test( - fn_tree="paddle.expm1", + fn_tree="paddle.isinf", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), ) -def test_paddle_expm1( +def test_paddle_isinf( *, dtype_and_x, - on_device, - fn_tree, frontend, backend_fw, test_flags, + fn_tree, + on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( @@ -1045,21 +1059,21 @@ def test_paddle_expm1( ) -# square +# isnan @handle_frontend_test( - fn_tree="paddle.tensor.math.square", + fn_tree="paddle.isnan", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), + available_dtypes=helpers.get_dtypes("valid"), ), ) -def test_paddle_square( +def test_paddle_isnan( *, dtype_and_x, - on_device, - fn_tree, frontend, backend_fw, test_flags, + fn_tree, + on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( @@ -1073,20 +1087,23 @@ def test_paddle_square( ) -# reciprocal +# kron @handle_frontend_test( - fn_tree="paddle.reciprocal", + fn_tree="paddle.tensor.math.kron", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), + num_arrays=2, + allow_inf=False, + shared_dtype=True, ), ) -def test_paddle_reciprocal( +def test_paddle_kron( *, dtype_and_x, on_device, fn_tree, - frontend, backend_fw, + frontend, test_flags, ): input_dtype, x = dtype_and_x @@ -1094,10 +1111,11 @@ def test_paddle_reciprocal( input_dtypes=input_dtype, frontend=frontend, backend_to_test=backend_fw, - test_flags=test_flags, fn_tree=fn_tree, + test_flags=test_flags, on_device=on_device, x=x[0], + y=x[1], ) @@ -1135,54 +1153,51 @@ def test_paddle_lcm( ) -# cumprod +# lerp @handle_frontend_test( - fn_tree="paddle.tensor.math.cumprod", - dtype_x_axis=helpers.dtype_values_axis( - available_dtypes=helpers.get_dtypes("valid"), - valid_axis=True, - force_int_axis=True, - min_num_dims=1, - min_value=-5, - max_value=5, + fn_tree="paddle.tensor.math.lerp", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), + num_arrays=3, + allow_inf=False, + large_abs_safety_factor=2, + small_abs_safety_factor=2, + safety_factor_scale="log", + shared_dtype=True, ), ) -def test_paddle_cumprod( +def test_paddle_lerp( *, - dtype_x_axis, + dtype_and_x, on_device, fn_tree, frontend, backend_fw, test_flags, ): - input_dtype, x, axis = dtype_x_axis + input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, backend_to_test=backend_fw, - test_flags=test_flags, fn_tree=fn_tree, + test_flags=test_flags, on_device=on_device, x=x[0], - dim=axis, + y=x[1], + weight=x[2], ) -# gcd +# lgamma @handle_frontend_test( - fn_tree="paddle.gcd", + fn_tree="paddle.tensor.math.lgamma", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - min_value=-100, - max_value=100, - min_num_dims=1, - min_dim_size=1, - num_arrays=2, - shared_dtype=True, + available_dtypes=helpers.get_dtypes("float"), + safety_factor_scale="log", ), ) -def test_paddle_gcd( +def test_paddle_lgamma( *, dtype_and_x, on_device, @@ -1196,50 +1211,51 @@ def test_paddle_gcd( input_dtypes=input_dtype, frontend=frontend, backend_to_test=backend_fw, - fn_tree=fn_tree, test_flags=test_flags, + fn_tree=fn_tree, on_device=on_device, + atol=1e-4, x=x[0], - y=x[1], ) +# log @handle_frontend_test( - fn_tree="paddle.fmin", - dtypes_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), num_arrays=2, shared_dtype=True + fn_tree="paddle.log", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), ), ) -def test_paddle_fmin( +def test_paddle_log( *, - dtypes_and_x, + dtype_and_x, on_device, fn_tree, frontend, - backend_fw, test_flags, + backend_fw, ): - input_dtype, x = dtypes_and_x + input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, - frontend=frontend, backend_to_test=backend_fw, + frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], - y=x[1], ) -# logit +# log1p @handle_frontend_test( - fn_tree="paddle.logit", + fn_tree="paddle.log1p", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), + available_dtypes=helpers.get_dtypes("valid"), + max_value=1e5, ), ) -def test_paddle_logit( +def test_paddle_log1p( *, dtype_and_x, on_device, @@ -1257,25 +1273,24 @@ def test_paddle_logit( fn_tree=fn_tree, on_device=on_device, x=x[0], - eps=1e-2, ) -# isnan +# log2 @handle_frontend_test( - fn_tree="paddle.isnan", + fn_tree="paddle.log2", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), ) -def test_paddle_isnan( +def test_paddle_log2( *, dtype_and_x, + on_device, + fn_tree, frontend, backend_fw, test_flags, - fn_tree, - on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( @@ -1289,21 +1304,21 @@ def test_paddle_isnan( ) -# isfinite +# logit @handle_frontend_test( - fn_tree="paddle.isfinite", + fn_tree="paddle.logit", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), + available_dtypes=helpers.get_dtypes("float"), ), ) -def test_paddle_isfinite( +def test_paddle_logit( *, dtype_and_x, + on_device, + fn_tree, frontend, backend_fw, test_flags, - fn_tree, - on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( @@ -1314,26 +1329,31 @@ def test_paddle_isfinite( fn_tree=fn_tree, on_device=on_device, x=x[0], + eps=1e-2, ) -# isinf +# max @handle_frontend_test( - fn_tree="paddle.isinf", - dtype_and_x=helpers.dtype_and_values( + fn_tree="paddle.tensor.math.max", + dtype_and_x=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("valid"), + min_axis=-1, + max_axis=0, + min_num_dims=1, + force_int_axis=False, ), ) -def test_paddle_isinf( +def test_paddle_max( *, dtype_and_x, + on_device, + fn_tree, frontend, backend_fw, test_flags, - fn_tree, - on_device, ): - input_dtype, x = dtype_and_x + input_dtype, x, axis = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, @@ -1342,16 +1362,21 @@ def test_paddle_isinf( fn_tree=fn_tree, on_device=on_device, x=x[0], + axis=axis, + keepdim=False, ) +# maximum @handle_frontend_test( - fn_tree="paddle.tensor.math.angle", + fn_tree="paddle.maximum", dtype_and_x=helpers.dtype_and_values( - available_dtypes=["float64", "complex64", "complex128"], + available_dtypes=helpers.get_dtypes("float"), + num_arrays=2, + shared_dtype=True, ), ) -def test_paddle_angle( +def test_paddle_maximum( *, dtype_and_x, on_device, @@ -1369,25 +1394,31 @@ def test_paddle_angle( fn_tree=fn_tree, on_device=on_device, x=x[0], + y=x[1], ) +# min @handle_frontend_test( - fn_tree="paddle.fmax", - dtypes_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), num_arrays=2, shared_dtype=True + fn_tree="paddle.tensor.math.min", + dtype_and_x=helpers.dtype_values_axis( + available_dtypes=helpers.get_dtypes("valid"), + min_axis=-1, + max_axis=0, + min_num_dims=1, + force_int_axis=False, ), ) -def test_paddle_fmax( +def test_paddle_min( *, - dtypes_and_x, + dtype_and_x, on_device, fn_tree, frontend, backend_fw, test_flags, ): - input_dtype, x = dtypes_and_x + input_dtype, x, axis = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, @@ -1396,7 +1427,8 @@ def test_paddle_fmax( fn_tree=fn_tree, on_device=on_device, x=x[0], - y=x[1], + axis=axis, + keepdim=False, ) @@ -1430,76 +1462,76 @@ def test_paddle_minimum( ) -# erf +# mm @handle_frontend_test( - fn_tree="paddle.tensor.math.erf", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - ), + fn_tree="paddle.tensor.math.mm", + dtype_xy=_get_dtype_input_and_matrices(), ) -def test_paddle_erf( +def test_paddle_mm( *, - dtype_and_x, + dtype_xy, + on_device, + fn_tree, frontend, - backend_fw, test_flags, - fn_tree, - on_device, + backend_fw, ): - input_dtype, x = dtype_and_x + input_dtype, x, y = dtype_xy helpers.test_frontend_function( input_dtypes=input_dtype, - frontend=frontend, backend_to_test=backend_fw, + frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, - x=x[0], + input=x, + mat2=y, ) -# trunc +# multiply @handle_frontend_test( - fn_tree="paddle.trunc", + fn_tree="paddle.multiply", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float", "int"), + available_dtypes=helpers.get_dtypes("float"), + num_arrays=2, + allow_inf=False, + large_abs_safety_factor=2, + small_abs_safety_factor=2, + safety_factor_scale="log", + shared_dtype=True, ), ) -def test_paddle_trunc( +def test_paddle_multiply( *, dtype_and_x, on_device, fn_tree, frontend, - backend_fw, test_flags, + backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, - frontend=frontend, backend_to_test=backend_fw, + frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], + y=x[1], ) +# neg @handle_frontend_test( - fn_tree="paddle.tensor.math.sgn", + fn_tree="paddle.neg", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_complex"), - min_num_dims=1, - max_num_dims=1, - min_dim_size=1, - max_dim_size=1, - abs_smallest_val=1e-10, - min_value=-10, - max_value=10, + available_dtypes=["float32", "float64", "int8", "int16", "int32", "int64"], ), ) -def test_paddle_sgn( +def test_paddle_neg( *, dtype_and_x, on_device, @@ -1520,16 +1552,18 @@ def test_paddle_sgn( ) -# maximum +# outer @handle_frontend_test( - fn_tree="paddle.maximum", + fn_tree="paddle.tensor.math.outer", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), + available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, + min_num_dims=1, + max_num_dims=1, shared_dtype=True, ), ) -def test_paddle_maximum( +def test_paddle_outer( *, dtype_and_x, on_device, @@ -1543,107 +1577,91 @@ def test_paddle_maximum( input_dtypes=input_dtype, frontend=frontend, backend_to_test=backend_fw, - test_flags=test_flags, fn_tree=fn_tree, + test_flags=test_flags, on_device=on_device, x=x[0], y=x[1], ) -# frac +# pow @handle_frontend_test( - fn_tree="paddle.tensor.math.frac", + fn_tree="paddle.pow", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), - num_arrays=1, - max_value=1e6, - min_value=-1e6, + num_arrays=2, + allow_inf=False, + shared_dtype=True, ), ) -def test_paddle_frac( +def test_paddle_pow( *, dtype_and_x, + on_device, + fn_tree, frontend, - backend_fw, test_flags, - fn_tree, - on_device, + backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, - frontend=frontend, backend_to_test=backend_fw, + frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], + y=x[1], ) -@st.composite -def diagonal_draw(draw): - dtype, x = draw( - helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("numeric"), - min_num_dims=2, - max_num_dims=5, - min_dim_size=2, - max_dim_size=50, - ) - ) - axes = draw( - st.lists( - helpers.ints(min_value=-(len(x)), max_value=len(x)), - min_size=2, - max_size=len(x) + 1, - unique=True, - ).filter(lambda axes: axes[0] % 2 != axes[1] % 2) - ) - - return dtype, x, axes - - -# diagonal +# prod @handle_frontend_test( - fn_tree="paddle.tensor.math.diagonal", - data=diagonal_draw(), - offset=helpers.ints(min_value=-10, max_value=50), + fn_tree="paddle.tensor.math.prod", + dtype_and_x=helpers.dtype_values_axis( + available_dtypes=helpers.get_dtypes("numeric"), + min_axis=-1, + max_axis=0, + min_num_dims=1, + min_value=-10, + max_value=10, + force_int_axis=False, + allow_nan=False, + ), ) -def test_paddle_diagonal( +def test_paddle_prod( *, - data, - offset, + dtype_and_x, on_device, + backend_fw, fn_tree, frontend, - backend_fw, test_flags, ): - dtype, x, axes = data + input_dtype, x, axis = dtype_and_x helpers.test_frontend_function( - backend_to_test=backend_fw, - input_dtypes=dtype, + input_dtypes=input_dtype, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], - offset=offset, - axis1=axes[0], - axis2=axes[1], + axis=axis, + keepdim=False, + backend_to_test=backend_fw, ) -# asinh +# rad2deg @handle_frontend_test( - fn_tree="paddle.tensor.math.asinh", + fn_tree="paddle.rad2deg", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), ) -def test_paddle_asinh( +def test_paddle_rad2deg( *, dtype_and_x, on_device, @@ -1660,23 +1678,18 @@ def test_paddle_asinh( test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, - atol=1e-2, x=x[0], ) -# max +# reciprocal @handle_frontend_test( - fn_tree="paddle.tensor.math.max", - dtype_and_x=helpers.dtype_values_axis( - available_dtypes=helpers.get_dtypes("valid"), - min_axis=-1, - max_axis=0, - min_num_dims=1, - force_int_axis=False, + fn_tree="paddle.reciprocal", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), ), ) -def test_paddle_max( +def test_paddle_reciprocal( *, dtype_and_x, on_device, @@ -1685,7 +1698,7 @@ def test_paddle_max( backend_fw, test_flags, ): - input_dtype, x, axis = dtype_and_x + input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, @@ -1694,17 +1707,15 @@ def test_paddle_max( fn_tree=fn_tree, on_device=on_device, x=x[0], - axis=axis, - keepdim=False, ) -# lerp +# remainder @handle_frontend_test( - fn_tree="paddle.tensor.math.lerp", + fn_tree="paddle.remainder", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), - num_arrays=3, + num_arrays=2, allow_inf=False, large_abs_safety_factor=2, small_abs_safety_factor=2, @@ -1712,7 +1723,7 @@ def test_paddle_max( shared_dtype=True, ), ) -def test_paddle_lerp( +def test_paddle_remainder( *, dtype_and_x, on_device, @@ -1726,80 +1737,69 @@ def test_paddle_lerp( input_dtypes=input_dtype, frontend=frontend, backend_to_test=backend_fw, - fn_tree=fn_tree, test_flags=test_flags, + fn_tree=fn_tree, on_device=on_device, x=x[0], y=x[1], - weight=x[2], ) -# outer +# round @handle_frontend_test( - fn_tree="paddle.tensor.math.outer", + fn_tree="paddle.tensor.math.round", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - num_arrays=2, - min_num_dims=1, - max_num_dims=1, - shared_dtype=True, + available_dtypes=helpers.get_dtypes("float"), + min_value=1, ), ) -def test_paddle_outer( +def test_paddle_round( *, dtype_and_x, - on_device, - fn_tree, frontend, - backend_fw, test_flags, + fn_tree, + backend_fw, + on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, - frontend=frontend, backend_to_test=backend_fw, - fn_tree=fn_tree, + frontend=frontend, test_flags=test_flags, + fn_tree=fn_tree, on_device=on_device, x=x[0], - y=x[1], ) -# heaviside +# round_ @handle_frontend_test( - fn_tree="paddle.tensor.math.heaviside", + fn_tree="paddle.tensor.math.round_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), - num_arrays=2, - allow_inf=False, - large_abs_safety_factor=2, - small_abs_safety_factor=2, - safety_factor_scale="log", - shared_dtype=True, + min_value=1, ), ) -def test_paddle_heaviside( +def test_paddle_round_( *, dtype_and_x, - on_device, - fn_tree, frontend, - backend_fw, test_flags, + fn_tree, + backend_fw, + on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, - frontend=frontend, backend_to_test=backend_fw, - fn_tree=fn_tree, + frontend=frontend, test_flags=test_flags, + fn_tree=fn_tree, on_device=on_device, x=x[0], - y=x[1], ) @@ -1859,143 +1859,113 @@ def test_paddle_rsqrt_( ) -# prod @handle_frontend_test( - fn_tree="paddle.tensor.math.prod", - dtype_and_x=helpers.dtype_values_axis( - available_dtypes=helpers.get_dtypes("numeric"), - min_axis=-1, - max_axis=0, + fn_tree="paddle.tensor.math.sgn", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float_and_complex"), min_num_dims=1, + max_num_dims=1, + min_dim_size=1, + max_dim_size=1, + abs_smallest_val=1e-10, min_value=-10, max_value=10, - force_int_axis=False, - allow_nan=False, ), ) -def test_paddle_prod( +def test_paddle_sgn( *, dtype_and_x, on_device, - backend_fw, fn_tree, frontend, + backend_fw, test_flags, ): - input_dtype, x, axis = dtype_and_x + input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, + backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], - axis=axis, - keepdim=False, - backend_to_test=backend_fw, ) -# any +# sign @handle_frontend_test( - fn_tree="paddle.tensor.math.any", - dtype_and_x=helpers.dtype_values_axis( - available_dtypes=["bool"], - valid_axis=True, - allow_neg_axes=True, - force_int_axis=True, - min_num_dims=1, + fn_tree="paddle.tensor.math.sign", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), ), ) -def test_paddle_any( +def test_paddle_sign( *, dtype_and_x, on_device, fn_tree, frontend, - test_flags, backend_fw, + test_flags, ): - input_dtype, x, axis = dtype_and_x + input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, + backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, - backend_to_test=backend_fw, x=x[0], - axis=axis, - keepdim=False, ) -# diff +# sin @handle_frontend_test( - fn_tree="paddle.tensor.math.diff", - dtype_n_x_n_axis=helpers.dtype_values_axis( - available_dtypes=st.shared(helpers.get_dtypes("valid"), key="dtype"), - min_num_dims=1, - valid_axis=True, - force_int_axis=True, - ), - n=st.integers(min_value=1, max_value=1), - dtype_prepend=helpers.dtype_and_values( - available_dtypes=st.shared(helpers.get_dtypes("valid"), key="dtype"), - min_num_dims=1, - max_num_dims=1, - ), - dtype_append=helpers.dtype_and_values( - available_dtypes=st.shared(helpers.get_dtypes("valid"), key="dtype"), - min_num_dims=1, - max_num_dims=1, + fn_tree="paddle.sin", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), ), ) -def test_paddle_diff( +def test_paddle_sin( *, - dtype_n_x_n_axis, - n, - dtype_prepend, - dtype_append, - test_flags, + dtype_and_x, + on_device, + fn_tree, frontend, + test_flags, backend_fw, - fn_tree, - on_device, ): - input_dtype, x, axis = dtype_n_x_n_axis - _, prepend = dtype_prepend - _, append = dtype_append + input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, - test_flags=test_flags, - frontend=frontend, backend_to_test=backend_fw, + frontend=frontend, + test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], - n=n, - axis=axis, - prepend=prepend[0], - append=append[0], ) -# mm +# sinh @handle_frontend_test( - fn_tree="paddle.tensor.math.mm", - dtype_xy=_get_dtype_input_and_matrices(), + fn_tree="paddle.sinh", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), + ), ) -def test_paddle_mm( +def test_paddle_sinh( *, - dtype_xy, + dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): - input_dtype, x, y = dtype_xy + input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, @@ -2003,81 +1973,125 @@ def test_paddle_mm( test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, - input=x, - mat2=y, + x=x[0], ) -# addmm +# sqrt @handle_frontend_test( - fn_tree="paddle.tensor.math.addmm", - dtype_input_xy=_get_dtype_and_3dbatch_matrices(with_input=True, input_3d=True), - beta=st.floats( - min_value=-5, - max_value=5, - allow_nan=False, - allow_subnormal=False, - allow_infinity=False, + fn_tree="paddle.tensor.math.sqrt", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("valid"), ), - alpha=st.floats( - min_value=-5, - max_value=5, - allow_nan=False, - allow_subnormal=False, - allow_infinity=False, +) +def test_paddle_sqrt( + *, + dtype_and_x, + frontend, + test_flags, + fn_tree, + backend_fw, + on_device, +): + input_dtype, x = dtype_and_x + helpers.test_frontend_function( + input_dtypes=input_dtype, + backend_to_test=backend_fw, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + x=x[0], + ) + + +# square +@handle_frontend_test( + fn_tree="paddle.tensor.math.square", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), ), ) -def test_paddle_addmm( +def test_paddle_square( *, - dtype_input_xy, - beta, - alpha, + dtype_and_x, on_device, fn_tree, frontend, - test_flags, backend_fw, + test_flags, ): - input_dtype, input, x, y = dtype_input_xy + input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, + frontend=frontend, backend_to_test=backend_fw, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + x=x[0], + ) + + +# stanh +@handle_frontend_test( + fn_tree="paddle.tensor.math.stanh", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), + ), + scale_a=st.floats(1e-5, 1e5), + scale_b=st.floats(1e-5, 1e5), +) +def test_paddle_stanh( + *, + dtype_and_x, + on_device, + fn_tree, + frontend, + test_flags, + scale_a, + scale_b, +): + input_dtype, x = dtype_and_x + helpers.test_frontend_function( + input_dtypes=input_dtype, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, - input=input[0], x=x[0], - y=y[0], - beta=beta, - alpha=alpha, + scale_a=scale_a, + scale_b=scale_b, ) -# kron +# subtract @handle_frontend_test( - fn_tree="paddle.tensor.math.kron", + fn_tree="paddle.subtract", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, allow_inf=False, + large_abs_safety_factor=2, + small_abs_safety_factor=2, + safety_factor_scale="log", shared_dtype=True, ), ) -def test_paddle_kron( +def test_paddle_subtract( *, dtype_and_x, on_device, fn_tree, - backend_fw, frontend, test_flags, + backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, - frontend=frontend, backend_to_test=backend_fw, + frontend=frontend, fn_tree=fn_tree, test_flags=test_flags, on_device=on_device, @@ -2086,23 +2100,6 @@ def test_paddle_kron( ) -@st.composite -def _test_paddle_take_helper(draw): - mode = draw(st.sampled_from(["raise", "clip", "wrap"])) - - safe_bounds = mode == "raise" - - dtypes, xs, indices, _, _ = draw( - helpers.array_indices_axis( - array_dtypes=helpers.get_dtypes("float_and_integer"), - indices_dtypes=["int32", "int64"], - valid_bounds=safe_bounds, - ) - ) - - return dtypes, xs, indices, mode - - # take @handle_frontend_test( fn_tree="paddle.take", dtype_and_values=_test_paddle_take_helper() @@ -2130,23 +2127,20 @@ def test_paddle_take( ) -# amax +# tan @handle_frontend_test( - fn_tree="paddle.tensor.math.amax", + fn_tree="paddle.tensor.math.tan", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - num_arrays=2, - allow_inf=False, - shared_dtype=True, + available_dtypes=helpers.get_dtypes("float"), ), ) -def test_paddle_amax( +def test_paddle_tan( *, dtype_and_x, on_device, fn_tree, - backend_fw, frontend, + backend_fw, test_flags, ): input_dtype, x = dtype_and_x @@ -2154,73 +2148,67 @@ def test_paddle_amax( input_dtypes=input_dtype, frontend=frontend, backend_to_test=backend_fw, - fn_tree=fn_tree, test_flags=test_flags, + fn_tree=fn_tree, on_device=on_device, + atol=1e-2, x=x[0], ) -# stanh +# tanh @handle_frontend_test( - fn_tree="paddle.tensor.math.stanh", + fn_tree="paddle.tensor.math.tanh", + aliases=["paddle.tanh", "paddle.nn.functional.tanh"], dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), + available_dtypes=helpers.get_dtypes("valid"), ), - scale_a=st.floats(1e-5, 1e5), - scale_b=st.floats(1e-5, 1e5), ) -def test_paddle_stanh( +def test_paddle_tanh( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, - scale_a, - scale_b, + backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, + backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, + atol=1e-2, x=x[0], - scale_a=scale_a, - scale_b=scale_b, ) -# amin +# trunc @handle_frontend_test( - fn_tree="paddle.tensor.math.amin", - dtype_and_x=helpers.dtype_values_axis( - available_dtypes=helpers.get_dtypes("valid"), - valid_axis=True, + fn_tree="paddle.trunc", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float", "int"), ), - keepdim=st.booleans(), ) -def test_paddle_amin( +def test_paddle_trunc( *, dtype_and_x, - keepdim, on_device, fn_tree, - backend_fw, frontend, + backend_fw, test_flags, ): - input_dtype, x, axis = dtype_and_x + input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, backend_to_test=backend_fw, - fn_tree=fn_tree, test_flags=test_flags, + fn_tree=fn_tree, on_device=on_device, x=x[0], - axis = axis, - keepdim = keepdim, ) diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_random.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_random.py index 8880032bf12b2..71c1f9f9fc227 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_random.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_random.py @@ -2,38 +2,235 @@ from hypothesis import strategies as st # local + import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import handle_frontend_test @handle_frontend_test( - fn_tree="paddle.tensor.random.exponential_", + fn_tree="paddle.poisson", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=0, max_value=1000, min_num_dims=1, - max_num_dims=10, + max_num_dims=1, min_dim_size=2, - max_dim_size=10, + max_dim_size=2, ), ) -def test_paddle_exponential_( - fn_tree, - dtype_and_x, +def test_paddle_poisson(dtype_and_x, backend_fw, frontend, test_flags, fn_tree): + dtype, x = dtype_and_x + helpers.test_frontend_function( + input_dtypes=dtype, + frontend=frontend, + backend_to_test=backend_fw, + test_flags=test_flags, + fn_tree=fn_tree, + test_values=False, + x=x[0], + ) + + +@handle_frontend_test( + fn_tree="paddle.rand", + input_dtypes=st.sampled_from(["int32", "int64"]), + shape=helpers.get_shape( + allow_none=False, + min_num_dims=0, + min_dim_size=1, + ), + dtype=helpers.get_dtypes("valid", full=False), +) +def test_paddle_rand( + *, + input_dtypes, + shape, + dtype, frontend, backend_fw, test_flags, + fn_tree, +): + helpers.test_frontend_function( + input_dtypes=[input_dtypes], + frontend=frontend, + backend_to_test=backend_fw, + test_flags=test_flags, + fn_tree=fn_tree, + test_values=False, + shape=shape, + dtype=dtype[0], + ) + + +# randint +@handle_frontend_test( + fn_tree="paddle.randint", + low=helpers.ints(min_value=0, max_value=10), + high=helpers.ints(min_value=11, max_value=20), + dtype=helpers.get_dtypes("integer"), + shape=helpers.get_shape( + allow_none=False, min_num_dims=2, max_num_dims=7, min_dim_size=2 + ), +) +def test_paddle_randint( + low, + high, + dtype, + backend_fw, + frontend, + test_flags, + shape, + fn_tree, ): - dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=dtype, + backend_to_test=backend_fw, + frontend=frontend, + test_values=False, + fn_tree=fn_tree, + test_flags=test_flags, + low=low, + high=high, + shape=shape, + ) + + +@handle_frontend_test( + fn_tree="paddle.randint_like", + input_dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("valid"), + shape=helpers.get_shape( + allow_none=False, min_num_dims=2, max_num_dims=7, min_dim_size=2 + ), + ), + low=st.integers(min_value=0, max_value=10), + high=st.integers(min_value=11, max_value=20), + dtype=helpers.get_dtypes("integer"), +) +def test_paddle_randint_like( + input_dtype_and_x, + low, + high, + dtype, + frontend, + backend_fw, + test_flags, + fn_tree, + on_device, +): + input_dtype, x = input_dtype_and_x + helpers.test_frontend_function( + input_dtypes=input_dtype, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, + on_device=on_device, test_values=False, x=x[0], + low=low, + high=high, + dtype=dtype[0], + ) + + +@handle_frontend_test( + fn_tree="paddle.randn", + input_dtypes=st.sampled_from(["int32", "int64"]), + shape=helpers.get_shape( + allow_none=False, min_num_dims=1, max_num_dims=1, min_dim_size=2 + ), + dtype=st.sampled_from(["float32", "float64"]), +) +def test_paddle_randn( + *, + input_dtypes, + shape, + dtype, + frontend, + backend_fw, + test_flags, + fn_tree, +): + helpers.test_frontend_function( + input_dtypes=[input_dtypes], + frontend=frontend, + backend_to_test=backend_fw, + test_flags=test_flags, + fn_tree=fn_tree, + test_values=False, + shape=shape, + dtype=dtype, + ) + + +@handle_frontend_test( + fn_tree="paddle.standard_normal", + input_dtypes=st.sampled_from([["int32"], ["int64"]]), + shape=helpers.get_shape( + min_num_dims=1, + min_dim_size=1, + ), + dtype=helpers.get_dtypes("valid", full=False), +) +def test_paddle_standard_normal( + input_dtypes, + shape, + dtype, + frontend, + backend_fw, + test_flags, + fn_tree, +): + helpers.test_frontend_function( + input_dtypes=input_dtypes, + frontend=frontend, + backend_to_test=backend_fw, + test_flags=test_flags, + fn_tree=fn_tree, + test_values=False, + shape=shape, + dtype=dtype[0], + ) + + +@handle_frontend_test( + fn_tree="paddle.uniform", + input_dtypes=helpers.get_dtypes("float"), + shape=st.tuples( + st.integers(min_value=2, max_value=5), st.integers(min_value=2, max_value=5) + ), + dtype=helpers.get_dtypes("valid", full=False), + min=st.floats(allow_nan=False, allow_infinity=False, width=32), + max=st.floats(allow_nan=False, allow_infinity=False, width=32), + seed=st.integers(min_value=2, max_value=5), +) +def test_paddle_uniform( + input_dtypes, + shape, + dtype, + min, + max, + seed, + frontend, + backend_fw, + test_flags, + fn_tree, +): + helpers.test_frontend_function( + input_dtypes=input_dtypes, + frontend=frontend, + backend_to_test=backend_fw, + test_flags=test_flags, + fn_tree=fn_tree, + test_values=False, + shape=shape, + dtype=dtype[0], + min=min, + max=max, + seed=seed, ) diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_tensor.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_tensor.py index d53cee3018a24..2e9a57c783373 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_tensor.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_tensor.py @@ -35,60 +35,6 @@ def _filter_query(query): ) -# clip -@st.composite -def _get_clip_inputs(draw): - shape = draw( - helpers.get_shape( - min_num_dims=1, max_num_dims=5, min_dim_size=1, max_dim_size=10 - ) - ) - x_dtype, x = draw( - helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - shape=shape, - min_value=0, - max_value=50, - ) - ) - min = draw( - helpers.array_values(dtype=x_dtype[0], shape=(1,), min_value=0, max_value=25) - ) - max = draw( - helpers.array_values(dtype=x_dtype[0], shape=(1,), min_value=26, max_value=50) - ) - if draw(st.booleans()): - min = None - elif draw(st.booleans()): - max = None - return x_dtype, x, min, max - - -# clip_ -@st.composite -def _get_clip_inputs_(draw): - shape = draw( - helpers.get_shape( - min_num_dims=1, max_num_dims=5, min_dim_size=1, max_dim_size=10 - ) - ) - x_dtype, x = draw( - helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - shape=shape, - min_value=0, - max_value=50, - ) - ) - min = draw( - helpers.array_values(dtype=x_dtype[0], shape=(1,), min_value=0, max_value=25) - ) - max = draw( - helpers.array_values(dtype=x_dtype[0], shape=(1,), min_value=26, max_value=50) - ) - return x_dtype, x, min, max - - # cond @st.composite def _get_dtype_and_matrix_non_singular(draw, dtypes): @@ -331,39 +277,6 @@ def test_paddle_instance_var( ) -# is_floating_point -@handle_frontend_method( - class_tree=CLASS_TREE, - init_tree="paddle.to_tensor", - method_name="is_floating_point", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=["int16", "int32", "int64", "float32", "float64"], - ), -) -def test_paddle_is_floating_point( - dtype_and_x, - frontend_method_data, - init_flags, - method_flags, - frontend, - backend_fw, - on_device, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_method( - init_input_dtypes=input_dtype, - init_all_as_kwargs_np={"data": x[0]}, - method_input_dtypes=input_dtype, - backend_to_test=backend_fw, - method_all_as_kwargs_np={}, - frontend_method_data=frontend_method_data, - init_flags=init_flags, - method_flags=method_flags, - frontend=frontend, - on_device=on_device, - ) - - # abs @handle_frontend_method( class_tree=CLASS_TREE, @@ -442,7 +355,7 @@ def test_paddle_tensor_acosh( init_tree="paddle.to_tensor", method_name="add_", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True + available_dtypes=helpers.get_dtypes("float"), ), ) def test_paddle_tensor_add_( @@ -462,7 +375,7 @@ def test_paddle_tensor_add_( "data": x[0], }, method_input_dtypes=input_dtype, - method_all_as_kwargs_np={"y": x[1]}, + method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, @@ -685,50 +598,6 @@ def test_paddle_tensor_argmax( ) -# argmin -@handle_frontend_method( - class_tree=CLASS_TREE, - init_tree="paddle.to_tensor", - method_name="argmin", - dtype_x_axis=helpers.dtype_values_axis( - available_dtypes=st.one_of(helpers.get_dtypes("valid")), - min_axis=-1, - max_axis=0, - min_num_dims=1, - force_int_axis=True, - ), - keep_dims=st.booleans(), -) -def test_paddle_tensor_argmin( - dtype_x_axis, - keep_dims, - on_device, - backend_fw, - frontend_method_data, - init_flags, - method_flags, - frontend, -): - input_dtypes, x, axis = dtype_x_axis - helpers.test_frontend_method( - init_input_dtypes=input_dtypes, - backend_to_test=backend_fw, - init_all_as_kwargs_np={ - "object": x[0], - }, - method_input_dtypes=input_dtypes, - method_all_as_kwargs_np={ - "axis": axis, - "keepdim": keep_dims, - }, - frontend=frontend, - frontend_method_data=frontend_method_data, - init_flags=init_flags, - method_flags=method_flags, - on_device=on_device, - ) - - # argsort @handle_frontend_method( class_tree=CLASS_TREE, @@ -1052,41 +921,6 @@ def test_paddle_tensor_ceil( ) -# ceil_ -@handle_frontend_method( - class_tree=CLASS_TREE, - init_tree="paddle.to_tensor", - method_name="ceil_", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - ), -) -def test_paddle_tensor_ceil_( - dtype_and_x, - frontend_method_data, - init_flags, - method_flags, - frontend, - on_device, - backend_fw, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_method( - init_input_dtypes=input_dtype, - backend_to_test=backend_fw, - init_all_as_kwargs_np={ - "data": x[0], - }, - method_input_dtypes=input_dtype, - method_all_as_kwargs_np={}, - frontend_method_data=frontend_method_data, - init_flags=init_flags, - method_flags=method_flags, - frontend=frontend, - on_device=on_device, - ) - - # cholesky @handle_frontend_method( class_tree=CLASS_TREE, @@ -1124,78 +958,6 @@ def test_paddle_tensor_cholesky( ) -@handle_frontend_method( - class_tree=CLASS_TREE, - init_tree="paddle.to_tensor", - method_name="clip", - input_and_ranges=_get_clip_inputs(), -) -def test_paddle_tensor_clip( - input_and_ranges, - frontend, - frontend_method_data, - backend_fw, - init_flags, - method_flags, - on_device, -): - input_dtype, x, min, max = input_and_ranges - helpers.test_frontend_method( - init_input_dtypes=input_dtype, - init_all_as_kwargs_np={ - "data": x[0], - }, - method_input_dtypes=input_dtype, - method_all_as_kwargs_np={"min": min, "max": max}, - frontend_method_data=frontend_method_data, - init_flags=init_flags, - method_flags=method_flags, - frontend=frontend, - backend_to_test=backend_fw, - on_device=on_device, - ) - - -# clip_ -@handle_frontend_method( - class_tree=CLASS_TREE, - init_tree="paddle.to_tensor", - method_name="clip_", - input_and_ranges=_get_clip_inputs_(), -) -def test_paddle_tensor_clip_( - input_and_ranges, - frontend, - frontend_method_data, - backend_fw, - init_flags, - method_flags, - on_device, -): - input_dtype, x, min_val, max_val = input_and_ranges - if min_val > max_val: - max_value = min_val - min_value = max_val - else: - max_value = max_val - min_value = min_val - - helpers.test_frontend_method( - init_input_dtypes=input_dtype, - init_all_as_kwargs_np={ - "data": x[0], - }, - method_input_dtypes=input_dtype, - method_all_as_kwargs_np={"min": min_value, "max": max_value}, - frontend_method_data=frontend_method_data, - init_flags=init_flags, - method_flags=method_flags, - frontend=frontend, - backend_to_test=backend_fw, - on_device=on_device, - ) - - @handle_frontend_method( class_tree=CLASS_TREE, init_tree="paddle.to_tensor", @@ -2154,42 +1916,6 @@ def test_paddle_tensor_isnan( ) -# lerp_ -@handle_frontend_method( - class_tree=CLASS_TREE, - init_tree="paddle.to_tensor", - method_name="lerp_", - dtypes_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), num_arrays=3, shared_dtype=True - ), -) -def test_paddle_tensor_lerp_( - dtypes_and_x, - frontend_method_data, - init_flags, - method_flags, - frontend, - on_device, - backend_fw, -): - input_dtype, x = dtypes_and_x - helpers.test_frontend_method( - init_input_dtypes=input_dtype, - backend_to_test=backend_fw, - init_all_as_kwargs_np={"data": x[0]}, - method_input_dtypes=input_dtype, - method_all_as_kwargs_np={ - "y": x[1], - "weight": x[2], - }, - frontend_method_data=frontend_method_data, - init_flags=init_flags, - method_flags=method_flags, - frontend=frontend, - on_device=on_device, - ) - - # less_than @handle_frontend_method( class_tree=CLASS_TREE, @@ -2629,42 +2355,6 @@ def test_paddle_tensor_numel( ) -# pow -@handle_frontend_method( - class_tree=CLASS_TREE, - init_tree="paddle.to_tensor", - method_name="pow", - dtypes_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - num_arrays=2, - allow_inf=False, - shared_dtype=True, - ), -) -def test_paddle_tensor_pow( - dtypes_and_x, - frontend_method_data, - init_flags, - method_flags, - frontend, - on_device, - backend_fw, -): - input_dtype, x = dtypes_and_x - helpers.test_frontend_method( - init_input_dtypes=input_dtype, - backend_to_test=backend_fw, - init_all_as_kwargs_np={"data": x[0]}, - method_input_dtypes=input_dtype, - method_all_as_kwargs_np={"y": x[1]}, - frontend_method_data=frontend_method_data, - init_flags=init_flags, - method_flags=method_flags, - frontend=frontend, - on_device=on_device, - ) - - # rad2deg @handle_frontend_method( class_tree=CLASS_TREE, @@ -2735,45 +2425,6 @@ def test_paddle_tensor_reciprocal( ) -# remainder -@handle_frontend_method( - class_tree=CLASS_TREE, - init_tree="paddle.to_tensor", - method_name="remainder", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), - num_arrays=2, - allow_inf=False, - large_abs_safety_factor=2, - small_abs_safety_factor=2, - safety_factor_scale="log", - shared_dtype=True, - ), -) -def test_paddle_tensor_remainder( - dtype_and_x, - frontend_method_data, - init_flags, - method_flags, - frontend, - on_device, - backend_fw, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_method( - init_input_dtypes=input_dtype, - backend_to_test=backend_fw, - init_all_as_kwargs_np={"data": x[0]}, - method_input_dtypes=input_dtype, - method_all_as_kwargs_np={"y": x[1]}, - frontend_method_data=frontend_method_data, - init_flags=init_flags, - method_flags=method_flags, - frontend=frontend, - on_device=on_device, - ) - - # remainder_ @handle_frontend_method( class_tree=CLASS_TREE, @@ -2891,41 +2542,6 @@ def test_paddle_tensor_rsqrt( ) -# rsqrt_ -@handle_frontend_method( - class_tree=CLASS_TREE, - init_tree="paddle.to_tensor", - method_name="rsqrt_", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - ), -) -def test_paddle_tensor_rsqrt_( - dtype_and_x, - frontend_method_data, - init_flags, - method_flags, - frontend, - on_device, - backend_fw, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_method( - init_input_dtypes=input_dtype, - backend_to_test=backend_fw, - init_all_as_kwargs_np={ - "data": x[0], - }, - method_input_dtypes=input_dtype, - method_all_as_kwargs_np={}, - frontend_method_data=frontend_method_data, - init_flags=init_flags, - method_flags=method_flags, - frontend=frontend, - on_device=on_device, - ) - - @given( dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False), @@ -3123,39 +2739,6 @@ def test_paddle_tensor_sqrt( ) -# sqrt_ -@handle_frontend_method( - class_tree=CLASS_TREE, - init_tree="paddle.to_tensor", - method_name="sqrt_", - dtype_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - ), -) -def test_paddle_tensor_sqrt_( - dtype_x, - frontend, - frontend_method_data, - init_flags, - method_flags, - on_device, - backend_fw, -): - input_dtype, x = dtype_x - helpers.test_frontend_method( - init_input_dtypes=input_dtype, - backend_to_test=backend_fw, - init_all_as_kwargs_np={"data": x[0]}, - method_input_dtypes=input_dtype, - method_all_as_kwargs_np={}, - frontend_method_data=frontend_method_data, - init_flags=init_flags, - method_flags=method_flags, - frontend=frontend, - on_device=on_device, - ) - - # square @handle_frontend_method( class_tree=CLASS_TREE, @@ -3191,92 +2774,6 @@ def test_paddle_tensor_square( ) -# squeeze_ -@handle_frontend_method( - class_tree=CLASS_TREE, - init_tree="paddle.to_tensor", - method_name="squeeze_", - dtype_value=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - shape=st.shared(helpers.get_shape(), key="shape"), - ), - axis=helpers.get_axis( - shape=st.shared(helpers.get_shape(), key="shape"), - allow_neg=True, - force_int=True, - ), -) -def test_paddle_tensor_squeeze_( - dtype_value, - axis, - frontend_method_data, - init_flags, - method_flags, - frontend, - on_device, - backend_fw, -): - input_dtype, x = dtype_value - helpers.test_frontend_method( - init_input_dtypes=input_dtype, - backend_to_test=backend_fw, - init_all_as_kwargs_np={ - "data": x[0], - }, - method_input_dtypes=input_dtype, - method_all_as_kwargs_np={ - "axis": axis, - }, - frontend_method_data=frontend_method_data, - init_flags=init_flags, - method_flags=method_flags, - frontend=frontend, - on_device=on_device, - ) - - -# stanh -@handle_frontend_method( - class_tree=CLASS_TREE, - init_tree="paddle.to_tensor", - method_name="stanh", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - ), - scale_a=st.floats(1e-5, 1e5), - scale_b=st.floats(1e-5, 1e5), -) -def test_paddle_tensor_stanh( - dtype_and_x, - frontend_method_data, - scale_a, - scale_b, - init_flags, - method_flags, - frontend, - backend_fw, - on_device, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_method( - init_input_dtypes=input_dtype, - init_all_as_kwargs_np={ - "data": x[0], - }, - method_input_dtypes=input_dtype, - backend_to_test=backend_fw, - method_all_as_kwargs_np={ - "scale_a": scale_a, - "scale_b": scale_b, - }, - frontend_method_data=frontend_method_data, - init_flags=init_flags, - method_flags=method_flags, - frontend=frontend, - on_device=on_device, - ) - - # subtract @handle_frontend_method( class_tree=CLASS_TREE, @@ -3456,76 +2953,29 @@ def test_paddle_tensor_unsqueeze( ) -# unsqueeze_ +# sqrt_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="paddle.to_tensor", - method_name="unsqueeze_", - dtype_value=helpers.dtype_and_values( + method_name="sqrt_", + dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), - shape=st.shared(helpers.get_shape(), key="shape"), - ), - axis=helpers.get_axis( - shape=st.shared(helpers.get_shape(), key="shape"), - allow_neg=True, - force_int=True, ), ) -def test_paddle_tensor_unsqueeze_( - dtype_value, - axis, - frontend_method_data, - init_flags, - method_flags, +def test_torch_tensor_sqrt_( + dtype_x, frontend, - on_device, - backend_fw, -): - input_dtype, x = dtype_value - helpers.test_frontend_method( - init_input_dtypes=input_dtype, - backend_to_test=backend_fw, - init_all_as_kwargs_np={ - "data": x[0], - }, - method_input_dtypes=input_dtype, - method_all_as_kwargs_np={ - "axis": axis, - }, - frontend_method_data=frontend_method_data, - init_flags=init_flags, - method_flags=method_flags, - frontend=frontend, - on_device=on_device, - ) - - -# zero_ -@handle_frontend_method( - class_tree=CLASS_TREE, - init_tree="paddle.to_tensor", - method_name="zero_", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), - allow_inf=False, - ), -) -def test_paddle_tensor_zero_( - dtype_and_x, frontend_method_data, init_flags, method_flags, - frontend, on_device, backend_fw, ): - input_dtype, x = dtype_and_x + input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, - init_all_as_kwargs_np={ - "data": x[0], - }, + init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, diff --git a/ivy_tests/test_ivy/test_frontends/test_scipy/test_fft/test_fft.py b/ivy_tests/test_ivy/test_frontends/test_scipy/test_fft/test_fft.py index e3bc4da379eb3..6e37c555f41df 100644 --- a/ivy_tests/test_ivy/test_frontends/test_scipy/test_fft/test_fft.py +++ b/ivy_tests/test_ivy/test_frontends/test_scipy/test_fft/test_fft.py @@ -138,40 +138,6 @@ def _x_and_ifftn(draw): return _x_and_ifftn + (workers,) -@st.composite -def _x_and_rfftn(draw): - min_rfftn_points = 2 - dtype = draw(helpers.get_dtypes("float")) - x_dim = draw( - helpers.get_shape( - min_dim_size=2, max_dim_size=100, min_num_dims=1, max_num_dims=3 - ) - ) - x = draw( - helpers.array_values( - dtype=dtype[0], - shape=tuple(x_dim), - min_value=-1e10, - max_value=1e10, - large_abs_safety_factor=2.5, - small_abs_safety_factor=2.5, - safety_factor_scale="log", - ) - ) - axes = draw( - st.lists( - st.integers(0, len(x_dim) - 1), min_size=1, max_size=len(x_dim), unique=True - ) - ) - s = draw( - st.lists( - st.integers(min_rfftn_points, 256), min_size=len(axes), max_size=len(axes) - ) - ) - norm = draw(st.sampled_from(["backward", "forward", "ortho"])) - return dtype, x, s, axes, norm - - # --- Main --- # # ------------ # @@ -188,13 +154,11 @@ def test_scipy_dct( test_flags, fn_tree, on_device, - backend_fw, ): input_dtype, x, _type, n, axis, norm = dtype_x_and_args helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, - backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, @@ -223,13 +187,11 @@ def test_scipy_fft( test_flags, fn_tree, on_device, - backend_fw, ): dtype, x, dim, norm, n = d_x_d_n_n helpers.test_frontend_function( input_dtypes=dtype, frontend=frontend, - backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, @@ -252,13 +214,11 @@ def test_scipy_fft2( test_flags, fn_tree, on_device, - backend_fw, ): dtype, x, s, ax, norm = d_x_d_s_n helpers.test_frontend_function( input_dtypes=dtype, frontend=frontend, - backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, @@ -281,13 +241,11 @@ def test_scipy_idct( test_flags, fn_tree, on_device, - backend_fw, ): input_dtype, x, _type, n, axis, norm = dtype_x_and_args helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, - backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, @@ -313,18 +271,16 @@ def test_scipy_ifft( test_flags, fn_tree, on_device, - backend_fw, ): dtype, x, dim, norm, n = d_x_d_n_n helpers.test_frontend_function( input_dtypes=dtype, frontend=frontend, - backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x, - axis=dim, + dim=dim, norm=norm, n=n, ) @@ -342,14 +298,12 @@ def test_scipy_ifftn( test_flags, fn_tree, on_device, - backend_fw, ): dtype, x, s, ax, norm, workers = d_x_d_s_n_workers helpers.test_frontend_function( input_dtypes=dtype, frontend=frontend, test_flags=test_flags, - backend_to_test=backend_fw, fn_tree=fn_tree, on_device=on_device, x=x[0], @@ -363,20 +317,26 @@ def test_scipy_ifftn( # rfftn @handle_frontend_test( fn_tree="scipy.fft.rfftn", - dtype_and_x=_x_and_rfftn(), + d_x_d_s_n_workers=_x_and_ifftn(), + test_with_out=st.just(False), ) -def test_scipy_rfftn(dtype_and_x, frontend, backend_fw, test_flags, fn_tree, on_device): - dtype, x, s, axes, norm = dtype_and_x +def test_scipy_rfftn( + d_x_d_s_n_workers, + frontend, + test_flags, + fn_tree, + on_device, +): + dtype, x, s, ax, norm, workers = d_x_d_s_n_workers helpers.test_frontend_function( input_dtypes=dtype, - backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, - test_values=True, - x=x, + x=x[0], s=s, - axes=axes, + axes=ax, norm=norm, + workers=workers, ) diff --git a/ivy_tests/test_ivy/test_frontends/test_scipy/test_linalg/test_linalg.py b/ivy_tests/test_ivy/test_frontends/test_scipy/test_linalg/test_linalg.py index bb2f61a1c45b0..3f7d69283fa4f 100644 --- a/ivy_tests/test_ivy/test_frontends/test_scipy/test_linalg/test_linalg.py +++ b/ivy_tests/test_ivy/test_frontends/test_scipy/test_linalg/test_linalg.py @@ -1,12 +1,13 @@ # TODO: uncomment after frontend is not required # global +import ivy import sys from hypothesis import strategies as st import numpy as np # local import ivy_tests.test_ivy.helpers as helpers -from ivy_tests.test_ivy.helpers import handle_frontend_test, BackendHandler +from ivy_tests.test_ivy.helpers import handle_frontend_test # --- Helpers --- # @@ -325,14 +326,13 @@ def test_scipy_svd( frontend, test_flags, fn_tree, - backend_fw, on_device, + backend_fw, ): dtype, x = dtype_and_x - x = x[0] - x = ( - np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3 - ) # make symmetric positive-definite + x = np.asarray(x[0], dtype=dtype[0]) + # make symmetric positive-definite beforehand + x = np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3 ret, ret_gt = helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, @@ -341,22 +341,14 @@ def test_scipy_svd( test_values=False, fn_tree=fn_tree, on_device=on_device, - a=x, + a=x[0], full_matrices=full_matrices, compute_uv=compute_uv, ) - with BackendHandler.update_backend(backend_fw) as ivy_backend: - for u, v in zip(ret, ret_gt): - u = ivy_backend.to_numpy(ivy_backend.abs(u)) - v = ivy_backend.to_numpy(ivy_backend.abs(v)) - helpers.value_test( - ret_np_flat=u, - ret_np_from_gt_flat=v, - rtol=1e-04, - atol=1e-04, - backend=backend_fw, - ground_truth_backend=frontend, - ) + for u, v in zip(ret, ret_gt): + u = ivy.to_numpy(ivy.abs(u)) + v = ivy.to_numpy(ivy.abs(v)) + helpers.value_test(ret_np_flat=u, ret_np_from_gt_flat=v, rtol=1e-04, atol=1e-04) # svdvals diff --git a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_general_functions.py b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_general_functions.py index b703153a9896f..7bf8e2eb7f56a 100644 --- a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_general_functions.py +++ b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_general_functions.py @@ -911,60 +911,6 @@ def test_tensorflow_foldl( ) -# foldr -@handle_frontend_test( - fn_tree="tensorflow.foldr", - fn=st.sampled_from( - [ - lambda a, b: a + b, - lambda a, b: a - b, - lambda a, b: a * b, - ], - ), - initializer=st.one_of(st.none(), st.floats(min_value=-1e3, max_value=1e3)), - dtype_and_values=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float", full=False), - min_value=-1e3, - max_value=1e3, - max_dim_size=10, - max_num_dims=4, - min_dim_size=1, - min_num_dims=1, - ), - parallel_iterations=st.just(10), - swap_memory=st.booleans(), - name=st.none(), -) -def test_tensorflow_foldr( - *, - fn, - initializer, - dtype_and_values, - frontend, - backend_fw, - fn_tree, - test_flags, - parallel_iterations, - swap_memory, - name, -): - dtype, elems = dtype_and_values - elems = np.atleast_1d(elems) - helpers.test_frontend_function( - input_dtypes=dtype, - fn=fn, - elems=elems, - initializer=initializer, - backend_to_test=backend_fw, - parallel_iterations=parallel_iterations, - swap_memory=swap_memory, - name=name, - frontend=frontend, - fn_tree=fn_tree, - test_flags=test_flags, - ) - - # gather @handle_frontend_test( fn_tree="tensorflow.gather", @@ -1151,51 +1097,6 @@ def test_tensorflow_linspace( ) -# meshgrid -@handle_frontend_test( - fn_tree="tensorflow.meshgrid", - dtype_and_values=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("integer"), - max_num_dims=2, - min_num_dims=2, - min_dim_size=2, - max_dim_size=5, - ), - indexing=st.sampled_from(["xy", "ij"]), - test_with_out=st.just(False), -) -def test_tensorflow_meshgrid( - *, - dtype_and_values, - indexing, - on_device, - fn_tree, - frontend, - backend_fw, - test_flags, -): - dtype, arrays = dtype_and_values - arrays = arrays[0] - kwargs = {} - - for i, array in enumerate(arrays): - kwargs[f"a{i}"] = array - - kwargs["indexing"] = indexing - - test_flags.num_positional_args = len(arrays) - test_flags.generate_frontend_arrays = False - helpers.test_frontend_function( - input_dtypes=dtype, - frontend=frontend, - backend_to_test=backend_fw, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - **kwargs, - ) - - # no_op @handle_frontend_test( fn_tree="tensorflow.no_op", @@ -1964,28 +1865,6 @@ def test_tensorflow_stack( ) -# stop_gradient -@handle_frontend_test( - fn_tree="tensorflow.stop_gradient", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("numeric") - ), -) -def test_tensorflow_stop_gradient( - *, dtype_and_x, test_flags, backend_fw, fn_tree, frontend, on_device -): - dtype, x = dtype_and_x - helpers.test_frontend_function( - input_dtypes=dtype, - test_flags=test_flags, - frontend=frontend, - backend_to_test=backend_fw, - fn_tree=fn_tree, - on_device=on_device, - input=x[0], - ) - - # strided_slice @handle_frontend_test( fn_tree="tensorflow.strided_slice", diff --git a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_linalg.py b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_linalg.py index c33001f11444f..059054d9d8982 100644 --- a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_linalg.py +++ b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_linalg.py @@ -156,6 +156,25 @@ def _get_hermitian_pos_def_matrix(draw): return [input_dtype], hpd +# solve +@st.composite +def _get_second_matrix(draw): + input_dtype_strategy = st.shared( + st.sampled_from(draw(helpers.get_dtypes("float"))), + key="shared_dtype", + ) + input_dtype = draw(input_dtype_strategy) + + shared_size = draw( + st.shared(helpers.ints(min_value=2, max_value=4), key="shared_size") + ) + return input_dtype, draw( + helpers.array_values( + dtype=input_dtype, shape=tuple([shared_size, 1]), min_value=2, max_value=5 + ) + ) + + @st.composite def _get_second_matrix(draw): # batch_shape, shared, random_size diff --git a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py index 48935cb1b6021..db0e0a9de988f 100644 --- a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py +++ b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py @@ -761,36 +761,6 @@ def test_tensorflow_cumsum( # NOQA ) -# digamma -@handle_frontend_test( - fn_tree="tensorflow.math.digamma", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), - num_arrays=1, - ), - test_with_out=st.just(False), -) -def test_tensorflow_digamma( - *, - dtype_and_x, - on_device, - fn_tree, - frontend, - test_flags, - backend_fw, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_function( - input_dtypes=input_dtype, - backend_to_test=backend_fw, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - x=x[0], - ) - - # divide @handle_frontend_test( fn_tree="tensorflow.math.divide", diff --git a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_raw_ops.py b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_raw_ops.py index e734dfa5406f7..bf328d73ea81a 100644 --- a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_raw_ops.py +++ b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_raw_ops.py @@ -157,35 +157,6 @@ def get_list_split(draw): return draw(get_int_split()) -# Tile -@st.composite -def _multiple_shape_helper(draw): - input_dtype, input_array, input_shape = draw( - helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), ret_shape=True - ) - ) - input_dims = len(input_shape) - - dt_n_multiples = draw( - helpers.dtype_and_values( - available_dtypes=["int32", "int64"], - min_value=0, - max_value=10, - shape=draw( - helpers.get_shape( - min_num_dims=1, - max_num_dims=1, - min_dim_size=input_dims, - max_dim_size=input_dims, - ) - ), - ) - ) - - return input_dtype, input_array, dt_n_multiples - - @st.composite def _pad_helper(draw, return_constant_values=False): dtype, input, shape = draw( @@ -236,7 +207,7 @@ def _pow_helper_shared_dtype(draw): dtype1, dtype2 = dtype x1, x2 = x if "int" in dtype2: - x2 = ivy.nested_map(x2, lambda x: abs(x), include_derived={"list": True}) + x2 = ivy.nested_map(x2, lambda x: abs(x), include_derived={list: True}) if ivy.is_int_dtype(dtype2): max_val = ivy.iinfo(dtype2).max @@ -1817,45 +1788,6 @@ def test_tensorflow_FFT( # NOQA ) -# FFT2D -@handle_frontend_test( - fn_tree="tensorflow.raw_ops.FFT2D", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("complex"), - min_value=-1e5, - max_value=1e5, - min_num_dims=2, - max_num_dims=5, - min_dim_size=2, - max_dim_size=5, - large_abs_safety_factor=2.5, - small_abs_safety_factor=2.5, - safety_factor_scale="log", - ), -) -def test_tensorflow_FFT2D( - *, - dtype_and_x, - frontend, - test_flags, - fn_tree, - backend_fw, - on_device, -): - dtype, x = dtype_and_x - helpers.test_frontend_function( - input_dtypes=dtype, - backend_to_test=backend_fw, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - input=x[0], - rtol=1e-02, - atol=1e-02, - ) - - # fill @handle_frontend_test( fn_tree="tensorflow.raw_ops.Fill", @@ -3751,36 +3683,6 @@ def test_tensorflow_Softplus( # NOQA ) -# Softsign -@handle_frontend_test( - fn_tree="tensorflow.raw_ops.Softsign", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), - min_num_dims=1, - ), - test_with_out=st.just(False), -) -def test_tensorflow_Softsign( - *, - dtype_and_x, - frontend, - test_flags, - fn_tree, - backend_fw, - on_device, -): - dtype, x = dtype_and_x - helpers.test_frontend_function( - input_dtypes=dtype, - backend_to_test=backend_fw, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - features=x[0], - ) - - # Split @handle_frontend_test( fn_tree="tensorflow.raw_ops.Split", @@ -4194,32 +4096,6 @@ def test_tensorflow_TanhGrad( # NOQA ) -@handle_frontend_test( - fn_tree="tensorflow.raw_ops.Tile", all_arguments=_multiple_shape_helper() -) -def test_tensorflow_Tile( - *, - all_arguments, - test_flags, - frontend, - fn_tree, - on_device, - backend_fw, -): - input_dtype, input_matrix, dt_and_multiples = all_arguments - dt_mul, multiples = dt_and_multiples - helpers.test_frontend_function( - input_dtypes=input_dtype + dt_mul, - input=input_matrix[0], - multiples=multiples[0], - test_flags=test_flags, - backend_to_test=backend_fw, - frontend=frontend, - fn_tree=fn_tree, - on_device=on_device, - ) - - @handle_frontend_test( fn_tree="tensorflow.raw_ops.TruncateDiv", dtype_and_x=helpers.dtype_and_values( diff --git a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_tensor.py b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_tensor.py index 30655e5736998..b76fcae600ad3 100644 --- a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_tensor.py +++ b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_tensor.py @@ -6,6 +6,7 @@ # local import ivy import ivy_tests.test_ivy.helpers as helpers +from ivy.functional.backends.tensorflow.general import _check_query from ivy_tests.test_ivy.helpers import handle_frontend_method, BackendHandler from ivy_tests.test_ivy.test_frontends.test_tensorflow.test_raw_ops import ( _pow_helper_shared_dtype, @@ -63,13 +64,6 @@ def _array_and_shape( return dtype, [array, to_shape] -# same implementation as in tensorflow backend but was causing backend conflict issues -def _check_query(query): - return not isinstance(query, list) and ( - not (ivy.is_array(query) and ivy.is_bool_dtype(query) ^ bool(query.ndim > 0)) - ) - - # --- Main --- # # ------------ # diff --git a/ivy_tests/test_ivy/test_frontends/test_torch/test_linalg.py b/ivy_tests/test_ivy/test_frontends/test_torch/test_linalg.py index 51a4c7c6add48..577967c5f3e1d 100644 --- a/ivy_tests/test_ivy/test_frontends/test_torch/test_linalg.py +++ b/ivy_tests/test_ivy/test_frontends/test_torch/test_linalg.py @@ -60,39 +60,6 @@ def _generate_multi_dot_dtype_and_arrays(draw): return input_dtype, [matrix_1[1][0], matrix_2[1][0], matrix_3[1][0]] -@st.composite -def _get_axis_and_p(draw): - p = draw(st.sampled_from(["fro", "nuc", 1, 2, -1, -2, float("inf"), -float("inf")])) - if p == "fro" or p == "nuc": - max_axes_size = 2 - min_axes_size = 2 - else: - min_axes_size = 1 - max_axes_size = 5 - x_dtype, values, axis = draw( - helpers.dtype_values_axis( - available_dtypes=helpers.get_dtypes("valid"), - min_num_dims=2, - valid_axis=True, - min_value=-1e04, - max_value=1e04, - min_axes_size=min_axes_size, - max_axes_size=max_axes_size, - large_abs_safety_factor=2, - safety_factor_scale="log", - ) - ) - axis = axis[0] if isinstance(axis, tuple) and len(axis) == 1 else axis - # ToDo: fix the castable dtype helper. Right now using `dtype` causes errors - # dtype should be real for real inputs, but got ComplexDouble - x_dtype, values, dtype = draw( - helpers.get_castable_dtype( - draw(helpers.get_dtypes("valid")), x_dtype[0], values[0] - ) - ) - return p, x_dtype, values, axis, x_dtype - - # helpers @st.composite def _get_dtype_and_matrix( @@ -901,40 +868,6 @@ def test_torch_multi_dot( ) -@handle_frontend_test( - fn_tree="torch.linalg.norm", - args=_get_axis_and_p(), - keepdim=st.booleans(), - test_with_out=st.just(False), -) -def test_torch_norm( - *, - args, - keepdim, - on_device, - fn_tree, - frontend, - test_flags, - backend_fw, -): - p, x_dtype, x, axis, dtype = args - helpers.test_frontend_function( - input_dtypes=[x_dtype], - backend_to_test=backend_fw, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - rtol=1e-01, - atol=1e-08, - input=x, - ord=p, - dim=axis, - keepdim=keepdim, - dtype=dtype, - ) - - # pinv # TODO: add testing for hermitian @handle_frontend_test( diff --git a/ivy_tests/test_ivy/test_frontends/test_torch/test_miscellaneous_ops.py b/ivy_tests/test_ivy/test_frontends/test_torch/test_miscellaneous_ops.py index 9608623269c24..72c290ebecf3d 100644 --- a/ivy_tests/test_ivy/test_frontends/test_torch/test_miscellaneous_ops.py +++ b/ivy_tests/test_ivy/test_frontends/test_torch/test_miscellaneous_ops.py @@ -812,42 +812,6 @@ def test_torch_diag( ) -# diagflat -@handle_frontend_test( - fn_tree="torch.diagflat", - dtype_and_values=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - min_num_dims=1, - max_num_dims=5, - min_dim_size=1, - max_dim_size=5, - ), - offset=st.integers(min_value=-4, max_value=4), - test_with_out=st.just(False), -) -def test_torch_diagflat( - dtype_and_values, - offset, - test_flags, - backend_fw, - frontend, - fn_tree, - on_device, -): - input_dtype, x = dtype_and_values - helpers.test_frontend_function( - input_dtypes=input_dtype, - frontend=frontend, - backend_to_test=backend_fw, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - test_values=False, - x=x[0], - offset=offset, - ) - - @handle_frontend_test( fn_tree="torch.diagonal", dtype_and_values=helpers.dtype_and_values( diff --git a/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_vision_functions.py b/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_vision_functions.py index 9c34752b567e5..3e534c3852bb6 100644 --- a/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_vision_functions.py +++ b/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_vision_functions.py @@ -112,63 +112,6 @@ def _pad_helper(draw): return dtype, input[0], padding, value, mode -@st.composite -def grid_sample_helper(draw, dtype, mode, mode_3d, padding_mode): - dtype = draw(dtype) - align_corners = draw(st.booleans()) - dims = draw(st.integers(4, 5)) - height = draw(helpers.ints(min_value=5, max_value=10)) - width = draw(helpers.ints(min_value=5, max_value=10)) - channels = draw(helpers.ints(min_value=1, max_value=3)) - - grid_h = draw(helpers.ints(min_value=2, max_value=4)) - grid_w = draw(helpers.ints(min_value=2, max_value=4)) - batch = draw(helpers.ints(min_value=1, max_value=5)) - - padding_mode = draw(st.sampled_from(padding_mode)) - if dims == 4: - mode = draw(st.sampled_from(mode)) - x = draw( - helpers.array_values( - dtype=dtype[0], - shape=[batch, channels, height, width], - min_value=-1, - max_value=1, - ) - ) - - grid = draw( - helpers.array_values( - dtype=dtype[0], - shape=[batch, grid_h, grid_w, 2], - min_value=-1, - max_value=1, - ) - ) - elif dims == 5: - mode = draw(st.sampled_from(mode_3d)) - depth = draw(helpers.ints(min_value=10, max_value=15)) - grid_d = draw(helpers.ints(min_value=5, max_value=10)) - x = draw( - helpers.array_values( - dtype=dtype[0], - shape=[batch, channels, depth, height, width], - min_value=-1, - max_value=1, - ) - ) - - grid = draw( - helpers.array_values( - dtype=dtype[0], - shape=[batch, grid_d, grid_h, grid_w, 3], - min_value=-1, - max_value=1, - ) - ) - return dtype, x, grid, mode, padding_mode, align_corners - - # --- Main --- # # ------------ # @@ -201,40 +144,6 @@ def test_torch_affine_grid( ) -@handle_frontend_test( - fn_tree="torch.nn.functional.grid_sample", - dtype_x_grid_modes=grid_sample_helper( - dtype=helpers.get_dtypes("valid", full=False), - mode=["nearest", "bilinear", "bicubic"], - mode_3d=["nearest", "bilinear"], - padding_mode=["border", "zeros", "reflection"], - ), -) -def test_torch_grid_sample( - *, - dtype_x_grid_modes, - on_device, - backend_fw, - fn_tree, - frontend, - test_flags, -): - dtype, x, grid, mode, padding_mode, align_corners = dtype_x_grid_modes - helpers.test_frontend_function( - input_dtypes=dtype, - backend_to_test=backend_fw, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - input=x, - grid=grid, - mode=mode, - padding_mode=padding_mode, - align_corners=align_corners, - ) - - @handle_frontend_test( fn_tree="torch.nn.functional.interpolate", dtype_and_input_and_other=_interp_args( @@ -420,7 +329,7 @@ def test_torch_upsample_bilinear( test_flags, backend_fw, ): - input_dtype, x, _, size, _, scale_factor, _ = dtype_and_input_and_other + input_dtype, x, _, size, _ = dtype_and_input_and_other helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, @@ -430,7 +339,6 @@ def test_torch_upsample_bilinear( on_device=on_device, input=x[0], size=size, - scale_factor=scale_factor, ) diff --git a/ivy_tests/test_ivy/test_frontends/test_torch/test_pointwise_ops.py b/ivy_tests/test_ivy/test_frontends/test_torch/test_pointwise_ops.py index 0577ad06d862b..a502749812473 100644 --- a/ivy_tests/test_ivy/test_frontends/test_torch/test_pointwise_ops.py +++ b/ivy_tests/test_ivy/test_frontends/test_torch/test_pointwise_ops.py @@ -2153,30 +2153,6 @@ def test_torch_mul( ) -# mvlgamma -@handle_frontend_test( - fn_tree="torch.mvlgamma", - dtype_and_input=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float") - ), - p=helpers.ints(min_value=1, max_value=11), -) -def test_torch_mvlgamma( - *, dtype_and_input, frontend, test_flags, fn_tree, backend_fw, on_device, p -): - input_dtype, input = dtype_and_input - helpers.test_frontend_function( - input_dtypes=input_dtype, - backend_to_test=backend_fw, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - input=input[0], - p=p, - ) - - @handle_frontend_test( fn_tree="torch.nan_to_num", dtype_and_x=helpers.dtype_and_values( diff --git a/ivy_tests/test_ivy/test_frontends/test_torch/test_reduction_ops.py b/ivy_tests/test_ivy/test_frontends/test_torch/test_reduction_ops.py index 3dd529381668b..c9505a8e5407e 100644 --- a/ivy_tests/test_ivy/test_frontends/test_torch/test_reduction_ops.py +++ b/ivy_tests/test_ivy/test_frontends/test_torch/test_reduction_ops.py @@ -43,7 +43,7 @@ def _get_axis_and_p(draw, kind="valid"): ) input_dtype, x, axis = dtype_x_axis - if type(input_dtype[0]) == str: # noqa: E721 + if type(input_dtype[0]) == str: if "complex" in input_dtype[0]: kind = "complex" if "float" in input_dtype[0]: @@ -61,7 +61,7 @@ def _get_axis_and_p(draw, kind="valid"): else: dtype = input_dtype[0] - return p, input_dtype, x, axis, dtype + return p, dtype_x_axis, dtype # --- Main --- # diff --git a/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py b/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py index 53c056b9ae711..04e3b112fbcef 100644 --- a/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py +++ b/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py @@ -288,6 +288,16 @@ def _get_dtype_and_multiplicative_matrices(draw): ) +@st.composite +def _get_dtype_and_multiplicative_matrices(draw): + return draw( + st.one_of( + _get_dtype_input_and_matrices(), + _get_dtype_and_3dbatch_matrices(), + ) + ) + + @st.composite def _get_dtype_input_and_vectors(draw, with_input=False, same_size=False): dim_size1 = draw(helpers.ints(min_value=2, max_value=5)) @@ -1486,41 +1496,6 @@ def test_torch_baddbmm_( ) -# char -@handle_frontend_method( - class_tree=CLASS_TREE, - init_tree="torch.tensor", - method_name="char", - dtype_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - min_value=-128, - max_value=127, - ), -) -def test_torch_char( - dtype_x, - frontend, - frontend_method_data, - init_flags, - method_flags, - on_device, - backend_fw, -): - input_dtype, x = dtype_x - helpers.test_frontend_method( - init_input_dtypes=input_dtype, - backend_to_test=backend_fw, - init_all_as_kwargs_np={"data": x[0]}, - method_input_dtypes=input_dtype, - method_all_as_kwargs_np={}, - frontend_method_data=frontend_method_data, - init_flags=init_flags, - method_flags=method_flags, - frontend=frontend, - on_device=on_device, - ) - - # index_fill @handle_frontend_method( class_tree=CLASS_TREE, @@ -1604,42 +1579,6 @@ def test_torch_instance_sinc( ) -# sinc_ -@handle_frontend_method( - class_tree=CLASS_TREE, - init_tree="torch.tensor", - method_name="sinc_", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - ), -) -def test_torch_instance_sinc_( - *, - dtype_and_x, - frontend, - backend_fw, - frontend_method_data, - init_flags, - method_flags, - on_device, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_method( - init_input_dtypes=input_dtype, - init_all_as_kwargs_np={ - "data": x[0], - }, - method_input_dtypes=input_dtype, - method_all_as_kwargs_np={}, - frontend_method_data=frontend_method_data, - init_flags=init_flags, - method_flags=method_flags, - frontend=frontend, - backend_to_test=backend_fw, - on_device=on_device, - ) - - # isnan @handle_frontend_method( class_tree=CLASS_TREE, @@ -5196,45 +5135,6 @@ def test_torch_tensor_copysign( ) -# copysign_ -@handle_frontend_method( - class_tree=CLASS_TREE, - init_tree="torch.tensor", - method_name="copysign_", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - min_num_dims=1, - num_arrays=2, - ), -) -def test_torch_tensor_copysign_( - dtype_and_x, - frontend_method_data, - init_flags, - method_flags, - frontend, - on_device, - backend_fw, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_method( - init_input_dtypes=input_dtype, - backend_to_test=backend_fw, - init_all_as_kwargs_np={ - "data": x[0], - }, - method_input_dtypes=input_dtype, - method_all_as_kwargs_np={ - "other": x[1], - }, - frontend_method_data=frontend_method_data, - init_flags=init_flags, - method_flags=method_flags, - frontend=frontend, - on_device=on_device, - ) - - # cos @handle_frontend_method( class_tree=CLASS_TREE, @@ -5295,7 +5195,7 @@ def test_torch_tensor_cos_( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ - "data": list(x[0]) if isinstance(x[0], int) else x[0], + "data": list(x[0]) if type(x[0]) == int else x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, @@ -5472,48 +5372,6 @@ def test_torch_tensor_cross( ) -# cummax -@handle_frontend_method( - class_tree=CLASS_TREE, - init_tree="torch.tensor", - method_name="cummax", - dtype_value=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"), - ), - dim=helpers.get_axis( - shape=st.shared(helpers.get_shape(), key="shape"), - allow_neg=False, - force_int=True, - ), -) -def test_torch_tensor_cummax( - dtype_value, - dim, - frontend_method_data, - init_flags, - method_flags, - frontend, - on_device, - backend_fw, -): - input_dtype, x = dtype_value - helpers.test_frontend_method( - init_input_dtypes=input_dtype, - backend_to_test=backend_fw, - init_all_as_kwargs_np={ - "data": x[0], - }, - method_input_dtypes=input_dtype, - method_all_as_kwargs_np={"dim": dim}, - frontend_method_data=frontend_method_data, - init_flags=init_flags, - method_flags=method_flags, - frontend=frontend, - on_device=on_device, - ) - - # cumprod @handle_frontend_method( class_tree=CLASS_TREE, @@ -6283,7 +6141,7 @@ def test_torch_tensor_erf( init_tree="torch.tensor", method_name="erf_", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), + available_dtypes=helpers.get_dtypes("float"), ), ) def test_torch_tensor_erf_( @@ -8662,44 +8520,6 @@ def test_torch_tensor_max( ) -# maximum -@handle_frontend_method( - class_tree=CLASS_TREE, - init_tree="torch.tensor", - method_name="maximum", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - num_arrays=2, - ), -) -def test_torch_tensor_maximum( - dtype_and_x, - frontend_method_data, - init_flags, - method_flags, - frontend, - on_device, - backend_fw, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_method( - init_input_dtypes=input_dtype, - backend_to_test=backend_fw, - init_all_as_kwargs_np={ - "data": x[0], - }, - method_input_dtypes=input_dtype, - method_all_as_kwargs_np={ - "other": x[1], - }, - frontend_method_data=frontend_method_data, - init_flags=init_flags, - method_flags=method_flags, - frontend=frontend, - on_device=on_device, - ) - - # mean @handle_frontend_method( class_tree=CLASS_TREE, @@ -9359,44 +9179,6 @@ def test_torch_tensor_neg_( ) -# negative -@handle_frontend_method( - class_tree=CLASS_TREE, - init_tree="torch.tensor", - method_name="negative", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), - min_value=-1e04, - max_value=1e04, - allow_inf=False, - ), -) -def test_torch_tensor_negative( - dtype_and_x, - frontend_method_data, - init_flags, - method_flags, - frontend, - on_device, - backend_fw, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_method( - init_input_dtypes=input_dtype, - backend_to_test=backend_fw, - init_all_as_kwargs_np={ - "data": x[0], - }, - method_input_dtypes=input_dtype, - method_all_as_kwargs_np={}, - frontend_method_data=frontend_method_data, - init_flags=init_flags, - method_flags=method_flags, - frontend=frontend, - on_device=on_device, - ) - - # new_empty (not actually intuitive for testing) @handle_frontend_method( class_tree=CLASS_TREE, @@ -10052,50 +9834,6 @@ def test_torch_tensor_quantile( ) -# random_ -@handle_frontend_method( - class_tree=CLASS_TREE, - init_tree="torch.tensor", - method_name="random_", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_integer"), - min_value=1, - max_value=5, - min_num_dims=1, - max_num_dims=5, - ), - to=helpers.ints(min_value=1, max_value=100), -) -def test_torch_tensor_random_( - dtype_and_x, - to, - frontend, - frontend_method_data, - init_flags, - method_flags, - on_device, - backend_fw, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_method( - init_input_dtypes=input_dtype, - backend_to_test=backend_fw, - method_input_dtypes=input_dtype, - frontend_method_data=frontend_method_data, - init_all_as_kwargs_np={ - "data": x[0], - }, - method_all_as_kwargs_np={ - "to": to, - }, - init_flags=init_flags, - method_flags=method_flags, - frontend=frontend, - on_device=on_device, - test_values=False, - ) - - # ravel @handle_frontend_method( class_tree=CLASS_TREE, @@ -12696,48 +12434,6 @@ def test_torch_tensor_where( ) -# xlogy_ -@handle_frontend_method( - class_tree=CLASS_TREE, - init_tree="torch.tensor", - method_name="xlogy_", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), - num_arrays=2, - min_num_dims=1, - min_value=-100, - max_value=100, - shared_dtype=True, - ), -) -def test_torch_tensor_xlogy_( - dtype_and_x, - frontend, - backend_fw, - frontend_method_data, - init_flags, - method_flags, - on_device, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_method( - init_input_dtypes=input_dtype, - init_all_as_kwargs_np={ - "data": x[0], - }, - method_input_dtypes=input_dtype, - method_all_as_kwargs_np={ - "other": x[1], - }, - frontend_method_data=frontend_method_data, - init_flags=init_flags, - method_flags=method_flags, - frontend=frontend, - backend_to_test=backend_fw, - on_device=on_device, - ) - - # zero_ tests @handle_frontend_method( class_tree=CLASS_TREE, @@ -12774,48 +12470,6 @@ def test_torch_tensor_zero_( ) -# triu -@handle_frontend_method( - class_tree=CLASS_TREE, - init_tree="torch.tensor", - method_name="triu", - dtype_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("numeric"), - min_num_dims=2, - max_num_dims=5, - min_dim_size=1, - max_dim_size=5, - ), - diagonal=st.integers( - min_value=-4, - max_value=4, - ), -) -def test_torch_triu( - dtype_x, - diagonal, - frontend, - frontend_method_data, - init_flags, - method_flags, - on_device, - backend_fw, -): - input_dtype, x = dtype_x - helpers.test_frontend_method( - init_input_dtypes=input_dtype, - backend_to_test=backend_fw, - init_all_as_kwargs_np={"data": x[0]}, - method_input_dtypes=input_dtype, - method_all_as_kwargs_np={"diagonal": diagonal}, - frontend_method_data=frontend_method_data, - init_flags=init_flags, - method_flags=method_flags, - frontend=frontend, - on_device=on_device, - ) - - # triu_ @handle_frontend_method( class_tree=CLASS_TREE, @@ -12856,94 +12510,3 @@ def test_torch_triu_( frontend=frontend, on_device=on_device, ) - - -# unique -@handle_frontend_method( - class_tree=CLASS_TREE, - init_tree="torch.tensor", - method_name="unique", - dtype_x_axis=helpers.dtype_values_axis( - available_dtypes=helpers.get_dtypes("valid"), - valid_axis=True, - force_int_axis=True, - ), - sorted=st.booleans(), - return_inverse=st.booleans(), - return_counts=st.booleans(), -) -def test_torch_unique( - dtype_x_axis, - sorted, - return_inverse, - return_counts, - frontend, - frontend_method_data, - init_flags, - method_flags, - on_device, - backend_fw, -): - input_dtype, x, axis = dtype_x_axis - helpers.test_frontend_method( - init_input_dtypes=input_dtype, - backend_to_test=backend_fw, - init_all_as_kwargs_np={"data": x[0]}, - method_input_dtypes=input_dtype, - method_all_as_kwargs_np={ - "sorted": sorted, - "return_inverse": return_inverse, - "return_counts": return_counts, - "dim": axis, - }, - frontend_method_data=frontend_method_data, - init_flags=init_flags, - method_flags=method_flags, - frontend=frontend, - on_device=on_device, - ) - - -# unique_consecutive -@handle_frontend_method( - class_tree=CLASS_TREE, - init_tree="torch.tensor", - method_name="unique_consecutive", - dtype_x_axis=helpers.dtype_values_axis( - available_dtypes=helpers.get_dtypes("valid"), - min_num_dims=2, - min_dim_size=2, - force_int_axis=True, - valid_axis=True, - ), - return_inverse=st.booleans(), - return_counts=st.booleans(), -) -def test_torch_unique_consecutive( - dtype_x_axis, - return_inverse, - return_counts, - frontend, - frontend_method_data, - init_flags, - method_flags, - on_device, - backend_fw, -): - input_dtype, x, axis = dtype_x_axis - helpers.test_frontend_method( - init_input_dtypes=input_dtype, - backend_to_test=backend_fw, - init_all_as_kwargs_np={"data": x[0]}, - method_input_dtypes=input_dtype, - method_all_as_kwargs_np={ - "return_inverse": return_inverse, - "return_counts": return_counts, - "dim": axis, - }, - frontend_method_data=frontend_method_data, - init_flags=init_flags, - method_flags=method_flags, - frontend=frontend, - on_device=on_device, - ) diff --git a/ivy_tests/test_ivy/test_functional/test_core/test_device.py b/ivy_tests/test_ivy/test_functional/test_core/test_device.py index b4bf40ba7fe16..3cfc21219ab29 100644 --- a/ivy_tests/test_ivy/test_functional/test_core/test_device.py +++ b/ivy_tests/test_ivy/test_functional/test_core/test_device.py @@ -701,7 +701,7 @@ def test_to_device( device[1:].split(":")[-2:] ) elif backend_fw == "torch": - assert type(dev_from_new_x) == type(device) # noqa: E721 + assert type(dev_from_new_x) == type(device) else: assert dev_from_new_x == device diff --git a/ivy_tests/test_ivy/test_functional/test_core/test_elementwise.py b/ivy_tests/test_ivy/test_functional/test_core/test_elementwise.py index dddf7963e4f8d..a4f19e0de9668 100644 --- a/ivy_tests/test_ivy/test_functional/test_core/test_elementwise.py +++ b/ivy_tests/test_ivy/test_functional/test_core/test_elementwise.py @@ -138,7 +138,7 @@ def cast_filter(dtype1_x1_dtype2): dtype2 = dtype2[0] if "int" in dtype2: x2 = ivy.nested_map( - x2[0], lambda x: abs(x), include_derived={"list": True}, shallow=False + x2[0], lambda x: abs(x), include_derived={list: True}, shallow=False ) return [dtype1, dtype2], [x1, x2] @@ -173,7 +173,7 @@ def test_abs(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): @handle_test( fn_tree="functional.ivy.acos", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_complex"), + available_dtypes=helpers.get_dtypes("float"), large_abs_safety_factor=4, small_abs_safety_factor=4, ), @@ -196,11 +196,10 @@ def test_acos(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): @handle_test( fn_tree="functional.ivy.acosh", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_complex"), + available_dtypes=helpers.get_dtypes("float"), min_value=1, - large_abs_safety_factor=2.1, - small_abs_safety_factor=2.1, - safety_factor_scale="log", + large_abs_safety_factor=4, + small_abs_safety_factor=4, ), ) def test_acosh(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): @@ -288,7 +287,7 @@ def test_angle( @handle_test( fn_tree="functional.ivy.asin", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_complex"), + available_dtypes=helpers.get_dtypes("float"), large_abs_safety_factor=4, small_abs_safety_factor=4, ), @@ -311,7 +310,7 @@ def test_asin(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): @handle_test( fn_tree="functional.ivy.asinh", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_complex"), + available_dtypes=helpers.get_dtypes("float"), large_abs_safety_factor=4, small_abs_safety_factor=4, ), @@ -333,12 +332,7 @@ def test_asinh(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): # atan @handle_test( fn_tree="functional.ivy.atan", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_complex"), - large_abs_safety_factor=2, - small_abs_safety_factor=2, - safety_factor_scale="log", - ), + dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")), ) def test_atan(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): input_dtype, x = dtype_and_x @@ -385,9 +379,7 @@ def test_atan2(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): # atanh @handle_test( fn_tree="functional.ivy.atanh", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_complex") - ), + dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")), ) def test_atanh(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): input_dtype, x = dtype_and_x @@ -591,9 +583,7 @@ def test_ceil(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): # cos @handle_test( fn_tree="functional.ivy.cos", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_complex") - ), + dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")), ) def test_cos(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): input_dtype, x = dtype_and_x @@ -610,9 +600,7 @@ def test_cos(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): # cosh @handle_test( fn_tree="functional.ivy.cosh", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_complex"), - ), + dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")), ) def test_cosh(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): input_dtype, x = dtype_and_x @@ -718,9 +706,7 @@ def test_erf(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): # exp @handle_test( fn_tree="functional.ivy.exp", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_complex") - ), + dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")), ) def test_exp(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): input_dtype, x = dtype_and_x @@ -738,7 +724,7 @@ def test_exp(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): @handle_test( fn_tree="functional.ivy.exp2", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_complex"), + available_dtypes=helpers.get_dtypes("float"), min_value=-10, max_value=10, min_num_dims=1, @@ -763,13 +749,7 @@ def test_exp2(dtype_and_x, test_flags, backend_fw, fn_name, on_device): # expm1 @handle_test( fn_tree="functional.ivy.expm1", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_complex"), - # Can't use linear or log safety factor, since the function is exponential, - # next best option is a hardcoded maximum that won't break any data type. - # expm1 is designed for very small values anyway - max_value=20.0, - ), + dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")), ) def test_expm1(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): input_dtype, x = dtype_and_x @@ -1184,10 +1164,7 @@ def test_less_equal(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): # log @handle_test( fn_tree="functional.ivy.log", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_complex"), - safety_factor_scale="log", - ), + dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")), ) def test_log(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): input_dtype, x = dtype_and_x @@ -1206,10 +1183,7 @@ def test_log(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): # log10 @handle_test( fn_tree="functional.ivy.log10", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_complex"), - safety_factor_scale="log", - ), + dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")), ) def test_log10(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): input_dtype, x = dtype_and_x @@ -1231,9 +1205,8 @@ def test_log10(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): @handle_test( fn_tree="functional.ivy.log1p", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_complex"), + available_dtypes=helpers.get_dtypes("float"), small_abs_safety_factor=2, - large_abs_safety_factor=2.1, safety_factor_scale="log", ), ) @@ -1254,10 +1227,7 @@ def test_log1p(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): # log2 @handle_test( fn_tree="functional.ivy.log2", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_complex"), - safety_factor_scale="log", - ), + dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")), ) def test_log2(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): input_dtype, x = dtype_and_x @@ -1768,9 +1738,7 @@ def test_sign(*, dtype_and_x, np_variant, test_flags, backend_fw, fn_name, on_de # sin @handle_test( fn_tree="functional.ivy.sin", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_complex") - ), + dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")), ) def test_sin(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): input_dtype, x = dtype_and_x @@ -1787,9 +1755,7 @@ def test_sin(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): # sinh @handle_test( fn_tree="functional.ivy.sinh", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_complex") - ), + dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")), ) def test_sinh(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): input_dtype, x = dtype_and_x @@ -1807,13 +1773,7 @@ def test_sinh(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): @handle_test( fn_tree="functional.ivy.sqrt", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_complex"), - allow_inf=False, - # Safety factor is to account for complex, where taking square root - # involves taking absolute value first - large_abs_safety_factor=2, - small_abs_safety_factor=2, - safety_factor_scale="log", + available_dtypes=helpers.get_dtypes("float"), allow_inf=False ).filter(lambda x: x[0][0] not in ["bfloat16"]), ) def test_sqrt(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): @@ -1882,9 +1842,7 @@ def test_subtract(*, dtype_and_x, alpha, test_flags, backend_fw, fn_name, on_dev # tan @handle_test( fn_tree="functional.ivy.tan", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_complex") - ), + dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")), ) def test_tan(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): input_dtype, x = dtype_and_x @@ -1903,12 +1861,9 @@ def test_tan(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): # tanh @handle_test( fn_tree="functional.ivy.tanh", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_complex") - ), - complex_mode=st.sampled_from(["jax", "split", "magnitude"]), + dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")), ) -def test_tanh(*, dtype_and_x, complex_mode, test_flags, backend_fw, fn_name, on_device): +def test_tanh(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): input_dtype, x = dtype_and_x helpers.test_function( input_dtypes=input_dtype, @@ -1919,7 +1874,6 @@ def test_tanh(*, dtype_and_x, complex_mode, test_flags, backend_fw, fn_name, on_ rtol_=1e-1, atol_=1e-2, x=x[0], - complex_mode=complex_mode, ) diff --git a/ivy_tests/test_ivy/test_functional/test_core/test_general.py b/ivy_tests/test_ivy/test_functional/test_core/test_general.py index 65a1114e4c653..1c4aa8fe876a6 100644 --- a/ivy_tests/test_ivy/test_functional/test_core/test_general.py +++ b/ivy_tests/test_ivy/test_functional/test_core/test_general.py @@ -118,16 +118,6 @@ def _isin_data_generation_helper(draw): return assume_unique, draw(dtype_and_x) -def _supports_inplace_update(ivy_backend, test_flags) -> bool: - supports_array_inplace_update = ( - not test_flags.as_variable and ivy_backend.inplace_arrays_supported() - ) - supports_variable_inplace_update = ( - test_flags.as_variable and ivy_backend.inplace_variables_supported() - ) - return supports_array_inplace_update or supports_variable_inplace_update - - # fourier_encode # @given( # x=helpers.dtype_and_values(ivy_np.valid_float_dtypes, min_num_dims=1), @@ -1232,8 +1222,9 @@ def test_inplace_decrement(x_val_and_dtypes, test_flags, on_device, backend_fw): x = ivy_backend.array(x, dtype=dtype, device=on_device) val = ivy_backend.array(val, dtype=dtype, device=on_device) new_val = x - val - supports_update = _supports_inplace_update(ivy_backend, test_flags) - if supports_update: + if (not test_flags.as_variable and ivy_backend.inplace_arrays_supported()) or ( + test_flags.as_variable and ivy_backend.inplace_variables_supported() + ): x_inplace = ivy_backend.inplace_decrement(x, val) assert id(x_inplace) == id(x) x = helpers.flatten_and_to_np(ret=x, backend=backend_fw) @@ -1268,8 +1259,9 @@ def test_inplace_increment(x_val_and_dtypes, test_flags, on_device, backend_fw): x = ivy_backend.array(x, dtype=dtype, device=on_device) val = ivy_backend.array(val, dtype=dtype, device=on_device) new_val = x + val - supports_update = _supports_inplace_update(ivy_backend, test_flags) - if supports_update: + if (not test_flags.as_variable and ivy_backend.inplace_arrays_supported()) or ( + test_flags.as_variable and ivy_backend.inplace_variables_supported() + ): x_inplace = ivy_backend.inplace_increment(x, val) assert id(x_inplace) == id(x) x = helpers.flatten_and_to_np(ret=x, backend=backend_fw) @@ -1288,10 +1280,9 @@ def test_inplace_increment(x_val_and_dtypes, test_flags, on_device, backend_fw): shared_dtype=True, ), keep_x_dtype=st.booleans(), - inplace_mode=st.sampled_from(["lenient", "strict"]), ) def test_inplace_update( - x_val_and_dtypes, keep_x_dtype, inplace_mode, test_flags, on_device, backend_fw + x_val_and_dtypes, keep_x_dtype, test_flags, on_device, backend_fw ): with BackendHandler.update_backend(backend_fw) as ivy_backend: dtype = x_val_and_dtypes[0][0] @@ -1300,10 +1291,9 @@ def test_inplace_update( x, val = x_val_and_dtypes[1] x = ivy_backend.array(x.tolist(), dtype=dtype, device=on_device) val = ivy_backend.array(val.tolist(), dtype=dtype, device=on_device) - - ivy_backend.set_inplace_mode(inplace_mode) - supports_update = _supports_inplace_update(ivy_backend, test_flags) - if supports_update or ivy_backend.inplace_mode == "lenient": + if (not test_flags.as_variable and ivy_backend.inplace_arrays_supported()) or ( + test_flags.as_variable and ivy_backend.inplace_variables_supported() + ): if keep_x_dtype: x_dtype = x.dtype x_inplace = ivy_backend.inplace_update(x, val, keep_input_dtype=True) @@ -1316,9 +1306,6 @@ def test_inplace_update( helpers.value_test( backend=backend_fw, ret_np_flat=x, ret_np_from_gt_flat=val ) - elif not supports_update and ivy_backend.inplace_mode == "strict": - with pytest.raises(ivy.utils.exceptions.InplaceUpdateException): - ivy_backend.inplace_update(x, val) def test_inplace_variables_supported(backend_fw): @@ -1621,10 +1608,10 @@ def test_scatter_nd(x, reduction, test_flags, backend_fw, fn_name, on_device): # ------# -@pytest.mark.parametrize("mode", ["lenient", "strict"]) -def test_set_inplace_mode(mode): - ivy.set_inplace_mode(mode) - assert ivy.inplace_mode == mode +@given(fw_str=st.sampled_from(["numpy", "jax", "torch", "tensorflow"])) +def test_set_framework(fw_str): + ivy.set_backend(fw_str) + ivy.previous_backend() # set_item @@ -1656,6 +1643,7 @@ def test_set_item( on_device=on_device, backend_to_test=backend_fw, fn_name=fn_name, + rtol_=1e-03, # needed only for the paddle backend x=x, query=query, val=val, @@ -1904,13 +1892,6 @@ def test_try_else_none(x): assert fn is None -@pytest.mark.parametrize("mode", ["lenient", "strict"]) -def test_unset_inplace_mode(mode): - ivy.set_inplace_mode(mode) - ivy.unset_inplace_mode() - assert ivy.inplace_mode == "lenient" - - def test_use_within_use_framework(): with ivy.functional.backends.numpy.use: pass diff --git a/ivy_tests/test_ivy/test_functional/test_core/test_manipulation.py b/ivy_tests/test_ivy/test_functional/test_core/test_manipulation.py index 6ae42c6de5ca6..f3564e10e7317 100644 --- a/ivy_tests/test_ivy/test_functional/test_core/test_manipulation.py +++ b/ivy_tests/test_ivy/test_functional/test_core/test_manipulation.py @@ -1,6 +1,7 @@ """Collection of tests for manipulation functions.""" # global + import numpy as np from hypothesis import strategies as st, assume @@ -66,61 +67,6 @@ def _basic_min_x_max(draw): return [dtype], (value[0], min_val, max_val) -@st.composite -def _broadcastable_arrays(draw): - shapes = draw(helpers.mutually_broadcastable_shapes(num_shapes=3)) - dtypes, values = draw( - helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), shape=shapes[0] - ) - ) - min_val = draw( - st.one_of( - st.floats(-5, 5), - st.just(None), - helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), shape=shapes[1] - ), - ) - ) - max_val = draw( - st.one_of( - st.floats(-5, 5), - st.just(None), - helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), shape=shapes[2] - ), - ) - ) - if min_val is None and max_val is None: - generate_max = draw(st.booleans()) - if generate_max: - max_val = draw( - helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), shape=shapes[2] - ) - ) - else: - min_val = draw( - helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), shape=shapes[1] - ) - ) - if min_val is not None: - if not isinstance(min_val, float): - dtypes.append(min_val[0][0]) - min_val = min_val[1][0] - else: - dtypes.append(ivy.float32) - if max_val is not None: - if not isinstance(max_val, float): - dtypes.append(max_val[0][0]) - max_val = max_val[1][0] - else: - dtypes.append(ivy.float32) - return dtypes, values[0], min_val, max_val - - @st.composite def _constant_pad_helper(draw): dtype, value, shape = draw( @@ -278,12 +224,12 @@ def _stack_helper(draw): # clip @handle_test( fn_tree="functional.ivy.clip", - dtype_x_min_max=_broadcastable_arrays(), + dtype_x_min_max=_basic_min_x_max(), ) def test_clip(*, dtype_x_min_max, test_flags, backend_fw, fn_name, on_device): - dtypes, x_list, min_val, max_val = dtype_x_min_max + dtypes, (x_list, min_val, max_val) = dtype_x_min_max helpers.test_function( - input_dtypes=dtypes, + input_dtypes=dtypes[0], test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, @@ -636,28 +582,6 @@ def test_squeeze(*, dtype_value, axis, test_flags, backend_fw, fn_name, on_devic ) -@handle_test( - fn_tree="functional.ivy.stack", - dtypes_arrays=_stack_helper(), - axis=helpers.get_axis( - shape=st.shared(helpers.get_shape(min_num_dims=1), key="values_shape"), - force_int=True, - ), -) -def test_stack(*, dtypes_arrays, axis, test_flags, backend_fw, fn_name, on_device): - dtypes, arrays = dtypes_arrays - - helpers.test_function( - input_dtypes=dtypes, - test_flags=test_flags, - backend_to_test=backend_fw, - fn_name=fn_name, - on_device=on_device, - arrays=arrays, - axis=axis, - ) - - # stack @handle_test( fn_tree="functional.ivy.stack", diff --git a/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_creation.py b/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_creation.py index ab63755d69bcb..e132bb235e375 100644 --- a/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_creation.py +++ b/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_creation.py @@ -292,46 +292,6 @@ def test_kaiser_window( ) -# mel_weight_matrix -@handle_test( - fn_tree="functional.ivy.experimental.mel_weight_matrix", - num_mel_bins=helpers.ints(min_value=5, max_value=10), - dft_length=helpers.ints(min_value=5, max_value=10), - sample_rate=helpers.ints(min_value=1000, max_value=2000), - lower_edge_hertz=helpers.floats(min_value=0.0, max_value=5.0), - upper_edge_hertz=helpers.floats(min_value=5.0, max_value=10.0), - test_with_out=st.just(False), - test_gradients=st.just(False), - test_instance_method=st.just(False), -) -def test_mel_weight_matrix( - *, - num_mel_bins, - dft_length, - sample_rate, - lower_edge_hertz, - upper_edge_hertz, - test_flags, - backend_fw, - fn_name, - on_device, -): - helpers.test_function( - input_dtypes=[], - test_flags=test_flags, - on_device=on_device, - backend_to_test=backend_fw, - rtol_=0.05, - atol_=0.05, - fn_name=fn_name, - num_mel_bins=num_mel_bins, - dft_length=dft_length, - sample_rate=sample_rate, - lower_edge_hertz=lower_edge_hertz, - upper_edge_hertz=upper_edge_hertz, - ) - - # ndenumerate @handle_test( fn_tree="functional.ivy.experimental.ndenumerate", @@ -593,6 +553,33 @@ def test_unsorted_segment_sum( ) +@handle_test( + fn_tree="functional.ivy.experimental.unsorted_segment_sum", + d_x_n_s=valid_unsorted_segment_min_inputs(), + test_with_out=st.just(False), + test_gradients=st.just(False), +) +def test_unsorted_segment_sum( + *, + d_x_n_s, + test_flags, + backend_fw, + fn_name, + on_device, +): + dtypes, data, num_segments, segment_ids = d_x_n_s + helpers.test_function( + input_dtypes=dtypes, + test_flags=test_flags, + on_device=on_device, + fw=backend_fw, + fn_name=fn_name, + data=data, + segment_ids=segment_ids, + num_segments=num_segments, + ) + + # vorbis_window @handle_test( fn_tree="functional.ivy.experimental.vorbis_window", diff --git a/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_linalg.py b/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_linalg.py index b1b01c92b0a53..ffdd059f5d1ca 100644 --- a/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_linalg.py +++ b/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_linalg.py @@ -141,51 +141,6 @@ def _generate_diag_args(draw): return dtype_x, offset, dtype_padding_value, align, num_rows, num_cols -# dot -@st.composite -def _generate_dot_dtype_and_arrays(draw): - shape_a = draw( - helpers.get_shape( - min_dim_size=2, max_dim_size=5, min_num_dims=0, max_num_dims=5 - ) - ) - shape_b = draw( - helpers.get_shape( - min_dim_size=2, max_dim_size=5, min_num_dims=0, max_num_dims=5 - ) - ) - - shape_a = list(shape_a) - shape_b = list(shape_b) - if len(shape_a) == 1 and len(shape_b) == 1: - shape_b[0] = shape_a[0] - elif len(shape_a) == 2 and len(shape_b) == 2: - shape_b[0] = shape_a[1] - elif len(shape_a) >= 2 and len(shape_b) == 1: - shape_b[0] = shape_a[-1] - elif len(shape_a) >= 1 and len(shape_b) >= 2: - shape_a[-1] = shape_b[-2] - - dtype_1, a = draw( - helpers.dtype_and_values( - shape=shape_a, - available_dtypes=helpers.get_dtypes("float"), - min_value=-10, - max_value=10, - ) - ) - dtype_2, b = draw( - helpers.dtype_and_values( - shape=shape_b, - dtype=dtype_1, - min_value=-10, - max_value=10, - ) - ) - - return [dtype_1[0], dtype_2[0]], [a[0], b[0]] - - @st.composite def _generate_eigh_tridiagonal_args(draw): dtype, alpha = draw( @@ -233,25 +188,6 @@ def _generate_eigh_tridiagonal_args(draw): return dtype, alpha, beta, eigvals_only, select, select_range, tol -@st.composite -def _generate_general_inner_product_args(draw): - dim = draw(st.integers(min_value=1, max_value=3)) - x_dtype, x = draw( - helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - shape=(dim, dim), - min_value=1, - max_value=10.0, - num_arrays=2, - shared_dtype=True, - allow_nan=False, - ) - ) - max_value = dim - 1 if dim > 1 else dim - n_modes = draw(st.integers(min_value=1, max_value=max_value) | st.just(None)) - return x_dtype, x, n_modes - - # multi_dot @st.composite def _generate_multi_dot_dtype_and_arrays(draw): @@ -852,27 +788,6 @@ def test_diagflat(*, test_flags, backend_fw, fn_name, args_packet, on_device): ) -@handle_test( - fn_tree="functional.ivy.experimental.dot", - data=_generate_dot_dtype_and_arrays(), -) -def test_dot(*, data, test_flags, backend_fw, fn_name, on_device): - (input_dtypes, x) = data - return helpers.test_function( - backend_to_test=backend_fw, - test_flags=test_flags, - fn_name=fn_name, - on_device=on_device, - xs_grad_idxs=[[0, 0]], - input_dtypes=input_dtypes, - test_values=True, - rtol_=0.5, - atol_=0.5, - a=x[0], - b=x[1], - ) - - @handle_test( fn_tree="functional.ivy.experimental.eig", dtype_x=helpers.dtype_and_values( @@ -1014,26 +929,6 @@ def test_eigvals(dtype_x, test_flags, backend_fw, fn_name): ) -@handle_test( - fn_tree="functional.ivy.experimental.general_inner_product", - data=_generate_general_inner_product_args(), -) -def test_general_inner_product(*, data, test_flags, backend_fw, fn_name, on_device): - input_dtypes, x, n_modes = data - helpers.test_function( - backend_to_test=backend_fw, - test_flags=test_flags, - fn_name=fn_name, - on_device=on_device, - rtol_=1e-1, - atol_=1e-1, - input_dtypes=input_dtypes, - a=x[0], - b=x[1], - n_modes=n_modes, - ) - - @handle_test( fn_tree="functional.ivy.experimental.initialize_tucker", data=_initialize_tucker_data(), diff --git a/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_manipulation.py b/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_manipulation.py index 234c75451f748..6f964a658cf1e 100644 --- a/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_manipulation.py +++ b/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_manipulation.py @@ -246,11 +246,14 @@ def _pad_helper(draw): ), min_size=ndim, max_size=ndim, - ).filter( - lambda x: all(shape[i] + x[i][0] + x[i][1] >= 0 for i in range(ndim)) ) ) - constant_values = draw(helpers.number(min_value=0, max_value=100)) + constant_values = draw( + helpers.number( + min_value=0, + max_value=100, + ).filter(lambda _x: ivy.as_ivy_dtype(type(_x)) == dtype[0]) + ) else: pad_width = draw(_st_tuples_or_int(ndim)) constant_values = draw(_st_tuples_or_int(ndim)) diff --git a/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_sparse_array.py b/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_sparse_array.py index de62bbd2be3df..8082dc60733ce 100644 --- a/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_sparse_array.py +++ b/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_sparse_array.py @@ -2,8 +2,6 @@ from hypothesis import strategies as st # local -import ivy -import numpy as np import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import handle_method @@ -180,44 +178,6 @@ def _sparse_csr_indices_values_shape(draw): # ------------ # -# adding sparse array to dense array -@handle_method( - init_tree="ivy.array", - method_tree="Array.__add__", - sparse_data=_sparse_coo_indices_values_shape(), -) -def test_array_add_sparse( - sparse_data, - method_name, - class_name, - on_device, -): - coo_ind, val_dtype, val, shp = sparse_data - - # set backed to 'torch' as this is the only backend which supports sparse arrays - ivy.set_backend("torch") - - # initiate a sparse array - sparse_inst = ivy.sparse_array.SparseArray( - coo_indices=coo_ind, - values=val, - dense_shape=shp, - format="coo", - ) - - # create an Array instance - array_class = getattr(ivy, class_name) - x = np.random.random_sample(shp) - x = ivy.array(x, dtype=val_dtype, device=on_device) - - # call add method - add_method = getattr(array_class, method_name) - res = add_method(x, sparse_inst) - - # make sure the result is an Array instance - assert isinstance(res, array_class) - - # bsc - to_dense_array @handle_method( method_tree="SparseArray.to_dense_array", diff --git a/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_statistical.py b/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_statistical.py index 716ff0ae7353c..f43f00c3f4526 100644 --- a/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_statistical.py +++ b/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_statistical.py @@ -15,30 +15,6 @@ # --------------- # -@st.composite -def _get_castable_float_dtype_nan(draw, min_value=None, max_value=None): - available_dtypes = helpers.get_dtypes("float") - shape = draw(helpers.get_shape(min_num_dims=1, max_num_dims=4, max_dim_size=6)) - dtype, values = draw( - helpers.dtype_and_values( - available_dtypes=available_dtypes, - num_arrays=1, - large_abs_safety_factor=6, - small_abs_safety_factor=24, - safety_factor_scale="log", - shape=shape, - min_value=min_value, - max_value=max_value, - allow_nan=True, - ) - ) - axis = draw(helpers.get_axis(shape=shape, force_int=True)) - dtype1, values, dtype2 = draw( - helpers.get_castable_dtype(draw(available_dtypes), dtype[0], values[0]) - ) - return dtype1, [values], axis, dtype2 - - @st.composite def _get_dtype_value1_value2_cov( draw, @@ -263,27 +239,17 @@ def _quantile_helper(draw): ) ) q = draw( - st.one_of( - helpers.array_values( - dtype=helpers.get_dtypes("float"), - shape=helpers.get_shape(min_dim_size=1, max_num_dims=1, min_num_dims=1), - min_value=0.0, - max_value=1.0, - exclude_max=False, - exclude_min=False, - ), - st.floats(min_value=0.0, max_value=1.0), + helpers.array_values( + dtype=helpers.get_dtypes("float"), + shape=helpers.get_shape(min_dim_size=1, max_num_dims=1, min_num_dims=1), + min_value=0.0, + max_value=1.0, + exclude_max=False, + exclude_min=False, ) ) - interpolation_names = [ - "linear", - "lower", - "higher", - "midpoint", - "nearest", - "nearest_jax", - ] + interpolation_names = ["linear", "lower", "higher", "midpoint", "nearest"] interpolation = draw( helpers.list_of_size( x=st.sampled_from(interpolation_names), @@ -453,8 +419,9 @@ def test_cummax( atol_=1e-1, ) + # cummin + -# cummin @handle_test( fn_tree="functional.ivy.experimental.cummin", dtype_x_axis_castable=_get_castable_dtype(), @@ -646,41 +613,6 @@ def test_nanmedian( ) -@handle_test( - fn_tree="functional.ivy.experimental.nanprod", - dtype_x_axis_castable=_get_castable_float_dtype_nan(), - keep_dims=st.booleans(), - test_gradients=st.just(False), - initial=st.integers(min_value=-5, max_value=5), -) -def test_nanprod( - *, - dtype_x_axis_castable, - keep_dims, - test_flags, - initial, - backend_fw, - fn_name, - on_device, -): - input_dtype, x, axis, castable_dtype = dtype_x_axis_castable - x = x[0] - helpers.test_function( - input_dtypes=[input_dtype], - test_flags=test_flags, - rtol_=1e-1, - atol_=1e-1, - backend_to_test=backend_fw, - fn_name=fn_name, - on_device=on_device, - a=x, - axis=axis, - keepdims=keep_dims, - dtype=castable_dtype, - initial=initial, - ) - - # quantile @handle_test( fn_tree="functional.ivy.experimental.quantile", @@ -704,6 +636,4 @@ def test_quantile( axis=axis, interpolation=interpolation[0], keepdims=keep_dims, - atol_=1e-3, - rtol_=1e-3, ) diff --git a/ivy_tests/test_ivy/test_functional/test_experimental/test_nn/test_activations.py b/ivy_tests/test_ivy/test_functional/test_experimental/test_nn/test_activations.py index 7c213994ed770..f940f291a7714 100644 --- a/ivy_tests/test_ivy/test_functional/test_experimental/test_nn/test_activations.py +++ b/ivy_tests/test_ivy/test_functional/test_experimental/test_nn/test_activations.py @@ -44,7 +44,7 @@ def test_elu( @handle_test( fn_tree="functional.ivy.experimental.logit", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_complex"), + available_dtypes=helpers.get_dtypes("float"), large_abs_safety_factor=8, small_abs_safety_factor=8, safety_factor_scale="log", @@ -89,7 +89,7 @@ def test_logsigmoid(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): @handle_test( fn_tree="prelu", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), + available_dtypes=helpers.get_dtypes("float"), shape=st.shared(helpers.get_shape(), key="prelu"), large_abs_safety_factor=8, small_abs_safety_factor=8, @@ -117,7 +117,7 @@ def test_prelu(*, dtype_and_x, slope, test_flags, backend_fw, fn_name, on_device @handle_test( fn_tree="functional.ivy.experimental.relu6", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), + available_dtypes=helpers.get_dtypes("numeric"), large_abs_safety_factor=2, small_abs_safety_factor=2, safety_factor_scale="log", diff --git a/ivy_tests/test_ivy/test_functional/test_experimental/test_nn/test_layers.py b/ivy_tests/test_ivy/test_functional/test_experimental/test_nn/test_layers.py index 4c963d7bdd711..1536aa2be124c 100644 --- a/ivy_tests/test_ivy/test_functional/test_experimental/test_nn/test_layers.py +++ b/ivy_tests/test_ivy/test_functional/test_experimental/test_nn/test_layers.py @@ -170,7 +170,7 @@ def _interp_args(draw, mode=None, mode_list=None): @st.composite def _reduce_window_helper(draw, get_func_st): - dtype = draw(helpers.get_dtypes("valid", full=False, index=2)) + dtype = draw(helpers.get_dtypes("valid", full=False)) py_func = draw(get_func_st(dtype[0])) init_value = draw( helpers.dtype_and_values( @@ -911,8 +911,8 @@ def test_fft2(*, d_x_d_s_n, test_flags, backend_fw, fn_name, on_device): backend_to_test=backend_fw, on_device=on_device, fn_name=fn_name, - rtol_=1e-2, - atol_=1e-2, + # rtol_=1e-2, + # atol_=1e-2, x=x, s=s, dim=dim, diff --git a/ivy_tests/test_ivy/test_functional/test_experimental/test_nn/test_losses.py b/ivy_tests/test_ivy/test_functional/test_experimental/test_nn/test_losses.py index c41b3f0d2d63c..2542c2495d4e4 100644 --- a/ivy_tests/test_ivy/test_functional/test_experimental/test_nn/test_losses.py +++ b/ivy_tests/test_ivy/test_functional/test_experimental/test_nn/test_losses.py @@ -140,7 +140,7 @@ def test_log_poisson_loss( helpers.test_function( input_dtypes=targets_dtype + log_input_dtype, test_flags=test_flags, - backend_to_test=backend_fw, + backend_to_fix=backend_fw, fn_name=fn_name, on_device=on_device, targets=targets[0], @@ -200,52 +200,3 @@ def test_smooth_l1_loss( beta=beta, reduction=reduction, ) - - -# soft_margin_loss -@handle_test( - fn_tree="functional.ivy.experimental.soft_margin_loss", - dtype_and_input=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), - min_value=1e-04, - max_value=1, - allow_inf=False, - min_num_dims=1, - max_num_dims=3, - min_dim_size=3, - ), - dtype_and_target=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), - min_value=1e-04, - max_value=1, - allow_inf=False, - min_num_dims=1, - max_num_dims=3, - min_dim_size=3, - ), - reduction=st.sampled_from(["none", "sum", "mean"]), -) -def test_soft_margin_loss( - dtype_and_input, - dtype_and_target, - reduction, - test_flags, - backend_fw, - fn_name, - on_device, -): - input_dtype, input = dtype_and_input - target_dtype, target = dtype_and_target - - helpers.test_function( - input_dtypes=input_dtype + target_dtype, - test_flags=test_flags, - backend_to_test=backend_fw, - fn_name=fn_name, - on_device=on_device, - rtol_=1e-02, - atol_=1e-02, - pred=input[0], - target=target[0], - reduction=reduction, - ) diff --git a/ivy_tests/test_ivy/test_functional/test_nn/test_activations.py b/ivy_tests/test_ivy/test_functional/test_nn/test_activations.py index ac23f384b6398..4aa174c565573 100644 --- a/ivy_tests/test_ivy/test_functional/test_nn/test_activations.py +++ b/ivy_tests/test_ivy/test_functional/test_nn/test_activations.py @@ -13,23 +13,18 @@ fn_tree="functional.ivy.gelu", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float_and_complex"), + large_abs_safety_factor=1, + small_abs_safety_factor=1, + safety_factor_scale="linear", min_value=-1e4, max_value=1e4, ), approximate=st.booleans(), - complex_mode=st.sampled_from(["jax", "split", "magnitude"]), ) -def test_gelu( - *, - dtype_and_x, - approximate, - complex_mode, - test_flags, - backend_fw, - fn_name, - on_device, -): +def test_gelu(*, dtype_and_x, approximate, test_flags, backend_fw, fn_name, on_device): dtype, x = dtype_and_x + if "complex" in str(x[0].dtype): + approximate = True helpers.test_function( input_dtypes=dtype, backend_to_test=backend_fw, @@ -40,7 +35,6 @@ def test_gelu( rtol_=1e-2, x=x[0], approximate=approximate, - complex_mode=complex_mode, ) @@ -85,11 +79,8 @@ def test_hardswish( safety_factor_scale="log", ), alpha=st.floats(min_value=-1e-4, max_value=1e-4), - complex_mode=st.sampled_from(["jax", "split", "magnitude"]), ) -def test_leaky_relu( - *, dtype_and_x, alpha, complex_mode, test_flags, backend_fw, fn_name, on_device -): +def test_leaky_relu(*, dtype_and_x, alpha, test_flags, backend_fw, fn_name, on_device): dtype, x = dtype_and_x helpers.test_function( input_dtypes=dtype, @@ -101,7 +92,6 @@ def test_leaky_relu( atol_=1e-2, x=x[0], alpha=alpha, - complex_mode=complex_mode, ) @@ -165,9 +155,8 @@ def test_mish(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): small_abs_safety_factor=8, safety_factor_scale="log", ), - complex_mode=st.sampled_from(["jax", "split", "magnitude"]), ) -def test_relu(*, dtype_and_x, complex_mode, test_flags, backend_fw, fn_name, on_device): +def test_relu(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): dtype, x = dtype_and_x helpers.test_function( input_dtypes=dtype, @@ -176,7 +165,6 @@ def test_relu(*, dtype_and_x, complex_mode, test_flags, backend_fw, fn_name, on_ fn_name=fn_name, on_device=on_device, x=x[0], - complex_mode=complex_mode, ) @@ -208,10 +196,10 @@ def test_sigmoid(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device): @handle_test( fn_tree="functional.ivy.softmax", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_complex"), + available_dtypes=helpers.get_dtypes("float"), min_num_dims=1, large_abs_safety_factor=8, - small_abs_safety_factor=4, + small_abs_safety_factor=8, safety_factor_scale="log", ), axis=st.one_of( @@ -246,18 +234,9 @@ def test_softmax(*, dtype_and_x, axis, test_flags, backend_fw, fn_name, on_devic ), beta=st.one_of(helpers.number(min_value=0.1, max_value=10), st.none()), threshold=st.one_of(helpers.number(min_value=0.1, max_value=30), st.none()), - complex_mode=st.sampled_from(["jax", "split", "magnitude"]), ) def test_softplus( - *, - dtype_and_x, - beta, - threshold, - complex_mode, - test_flags, - backend_fw, - fn_name, - on_device, + *, dtype_and_x, beta, threshold, test_flags, backend_fw, fn_name, on_device ): assume(beta != 0) assume(threshold != 0) @@ -273,5 +252,4 @@ def test_softplus( x=x[0], beta=beta, threshold=threshold, - complex_mode=complex_mode, ) diff --git a/ivy_tests/test_ivy/test_stateful/test_activations.py b/ivy_tests/test_ivy/test_stateful/test_activations.py index 3630421a1b7cc..f9bf437ae759b 100644 --- a/ivy_tests/test_ivy/test_stateful/test_activations.py +++ b/ivy_tests/test_ivy/test_stateful/test_activations.py @@ -288,7 +288,7 @@ def test_log_softmax( @handle_method( method_tree="stateful.activations.Logit.__call__", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_complex"), + available_dtypes=helpers.get_dtypes("float"), large_abs_safety_factor=8, small_abs_safety_factor=8, safety_factor_scale="log", @@ -671,10 +671,10 @@ def test_silu( @handle_method( method_tree="stateful.activations.Softmax.__call__", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float_and_complex"), + available_dtypes=helpers.get_dtypes("float"), min_num_dims=1, - large_abs_safety_factor=10, - small_abs_safety_factor=10, + large_abs_safety_factor=8, + small_abs_safety_factor=8, safety_factor_scale="log", ), axis=helpers.ints(min_value=-1, max_value=0), diff --git a/ivy_tests/test_ivy/test_stateful/test_layers.py b/ivy_tests/test_ivy/test_stateful/test_layers.py index fbe572a379243..6326496eda3dd 100644 --- a/ivy_tests/test_ivy/test_stateful/test_layers.py +++ b/ivy_tests/test_ivy/test_stateful/test_layers.py @@ -17,7 +17,7 @@ test_layers as exp_layers_tests, ) from ivy_tests.test_ivy.test_functional.test_experimental.test_nn.test_layers import ( - _valid_dct, + valid_dct, ) all_constant_initializers = (ivy.Zeros, ivy.Ones) @@ -893,7 +893,7 @@ def test_conv3d_transpose_layer( @handle_method( method_tree="Dct.__call__", - dtype_x_and_args=_valid_dct(), + dtype_x_and_args=valid_dct(), ) def test_dct( *, @@ -1115,7 +1115,7 @@ def test_embedding_layer( # FFT @handle_method( method_tree="FFT.__call__", - x_and_fft=exp_layers_tests._x_and_fft(), + x_and_fft=exp_layers_tests.x_and_fft(), ) def test_fft_layer( *, diff --git a/ivy_tests/test_ivy/test_stateful/test_modules.py b/ivy_tests/test_ivy/test_stateful/test_modules.py index 84c801221ee81..b84aee7c64b6d 100644 --- a/ivy_tests/test_ivy/test_stateful/test_modules.py +++ b/ivy_tests/test_ivy/test_stateful/test_modules.py @@ -1018,7 +1018,7 @@ def test_train_eval(mode): cls.train(mode) assert mode == cls.training cls.eval() - assert not cls.training + assert False == cls.training # v with top v key chains