diff --git a/ivy/functional/frontends/torch/tensor.py b/ivy/functional/frontends/torch/tensor.py index c116117f9773..85283479ecdb 100644 --- a/ivy/functional/frontends/torch/tensor.py +++ b/ivy/functional/frontends/torch/tensor.py @@ -1878,6 +1878,14 @@ def index_fill(self, dim, index, value): def cummax(self, dim): return torch_frontend.cummax(self, dim) + @with_unsupported_dtypes( + {"2.0.1 and below": ("bfloat16",)}, + "torch", + ) + def xlogy_(self, *, other, out=None): + self.ivy_array = torch_frontend.xlogy(self, other, out=out).ivy_array + return self + def ne(self, other): return torch_frontend.ne(self, other) diff --git a/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py b/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py index e5b3a553322d..06661fca0c00 100644 --- a/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py +++ b/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py @@ -12540,6 +12540,48 @@ def test_torch_tensor_where( ) +# xlogy_ +@handle_frontend_method( + class_tree=CLASS_TREE, + init_tree="torch.tensor", + method_name="xlogy_", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), + num_arrays=2, + min_num_dims=1, + min_value=-100, + max_value=100, + shared_dtype=True, + ), +) +def test_torch_tensor_xlogy_( + dtype_and_x, + frontend, + backend_fw, + frontend_method_data, + init_flags, + method_flags, + on_device, +): + input_dtype, x = dtype_and_x + helpers.test_frontend_method( + init_input_dtypes=input_dtype, + init_all_as_kwargs_np={ + "data": x[0], + }, + method_input_dtypes=input_dtype, + method_all_as_kwargs_np={ + "other": x[1], + }, + frontend_method_data=frontend_method_data, + init_flags=init_flags, + method_flags=method_flags, + frontend=frontend, + backend_to_test=backend_fw, + on_device=on_device, + ) + + # zero_ tests @handle_frontend_method( class_tree=CLASS_TREE,