diff --git a/ivy/functional/frontends/paddle/tensor/tensor.py b/ivy/functional/frontends/paddle/tensor/tensor.py index 0ebe6c16043d..76072258dcf7 100644 --- a/ivy/functional/frontends/paddle/tensor/tensor.py +++ b/ivy/functional/frontends/paddle/tensor/tensor.py @@ -234,6 +234,26 @@ def clip(self, min=None, max=None, name=None): ret = ivy.clip(self._ivy_array, min, max) return paddle_frontend.Tensor(ret) + @with_supported_dtypes( + {"2.5.1 and below": ("float32", "float64", "int32", "int64")}, "paddle" + ) + def clip_(self, min=None, max=None, name=None): + ivy.utils.assertions.check_all_or_any_fn( + min, + max, + fn=ivy.exists, + type="any", + limit=[1, 2], + message="at most one of min or max can be None", + ) + if min is None: + self._ivy_array = ivy.minimum(self._ivy_array, max) + elif max is None: + self._ivy_array = ivy.maximum(self._ivy_array, min) + else: + self._ivy_array = ivy.clip(self._ivy_array, min, max) + return self + @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle") def tanh(self, name=None): return paddle_frontend.Tensor(ivy.tanh(self._ivy_array)) diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_tensor.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_tensor.py index 574010f97447..de7e23b9b8df 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_tensor.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_tensor.py @@ -64,6 +64,31 @@ def _get_clip_inputs(draw): return x_dtype, x, min, max +# clip_ +@st.composite +def _get_clip_inputs_(draw): + shape = draw( + helpers.get_shape( + min_num_dims=1, max_num_dims=5, min_dim_size=1, max_dim_size=10 + ) + ) + x_dtype, x = draw( + helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("valid"), + shape=shape, + min_value=0, + max_value=50, + ) + ) + min = draw( + helpers.array_values(dtype=x_dtype[0], shape=(1,), min_value=0, max_value=25) + ) + max = draw( + helpers.array_values(dtype=x_dtype[0], shape=(1,), min_value=26, max_value=50) + ) + return x_dtype, x, min, max + + # cond @st.composite def _get_dtype_and_matrix_non_singular(draw, dtypes): @@ -1098,6 +1123,46 @@ def test_paddle_tensor_clip( ) +# clip_ +@handle_frontend_method( + class_tree=CLASS_TREE, + init_tree="paddle.to_tensor", + method_name="clip_", + input_and_ranges=_get_clip_inputs_(), +) +def test_paddle_tensor_clip_( + input_and_ranges, + frontend, + frontend_method_data, + backend_fw, + init_flags, + method_flags, + on_device, +): + input_dtype, x, min_val, max_val = input_and_ranges + if min_val > max_val: + max_value = min_val + min_value = max_val + else: + max_value = max_val + min_value = min_val + + helpers.test_frontend_method( + init_input_dtypes=input_dtype, + init_all_as_kwargs_np={ + "data": x[0], + }, + method_input_dtypes=input_dtype, + method_all_as_kwargs_np={"min": min_value, "max": max_value}, + frontend_method_data=frontend_method_data, + init_flags=init_flags, + method_flags=method_flags, + frontend=frontend, + backend_to_test=backend_fw, + on_device=on_device, + ) + + @handle_frontend_method( class_tree=CLASS_TREE, init_tree="paddle.to_tensor",