diff --git a/ivy/functional/frontends/torch/tensor.py b/ivy/functional/frontends/torch/tensor.py index a70042444e4bd..2f63db126f868 100644 --- a/ivy/functional/frontends/torch/tensor.py +++ b/ivy/functional/frontends/torch/tensor.py @@ -765,6 +765,22 @@ def long(self, memory_format=None): def max(self, dim=None, keepdim=False): return torch_frontend.max(self, dim=dim, keepdim=keepdim) + @with_unsupported_dtypes( + { + "2.0.1 and below": ( + "complex", + "bfloat16", + "bool", + "uint16", + "uint32", + "uint64", + ) + }, + "torch", + ) + def maximum(self, other, *, out=None): + return torch_frontend.maximum(self, other=other, out=out) + @property def is_quantized(self): return "q" in ivy.dtype(self.ivy_array) diff --git a/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py b/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py index 2034c489572a2..183d525f37084 100644 --- a/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py +++ b/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py @@ -8662,6 +8662,44 @@ def test_torch_tensor_max( ) +# maximum +@handle_frontend_method( + class_tree=CLASS_TREE, + init_tree="torch.tensor", + method_name="maximum", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("valid"), + num_arrays=2, + ), +) +def test_torch_tensor_maximum( + dtype_and_x, + frontend_method_data, + init_flags, + method_flags, + frontend, + on_device, + backend_fw, +): + input_dtype, x = dtype_and_x + helpers.test_frontend_method( + init_input_dtypes=input_dtype, + backend_to_test=backend_fw, + init_all_as_kwargs_np={ + "data": x[0], + }, + method_input_dtypes=input_dtype, + method_all_as_kwargs_np={ + "other": x[1], + }, + frontend_method_data=frontend_method_data, + init_flags=init_flags, + method_flags=method_flags, + frontend=frontend, + on_device=on_device, + ) + + # mean @handle_frontend_method( class_tree=CLASS_TREE,