diff --git a/ivy/functional/frontends/torch/tensor.py b/ivy/functional/frontends/torch/tensor.py index b5fda8cf6cf1f..d6d065bf9f809 100644 --- a/ivy/functional/frontends/torch/tensor.py +++ b/ivy/functional/frontends/torch/tensor.py @@ -1021,6 +1021,9 @@ def atan2_(self, other): def fmin(self, other): return torch_frontend.fmin(self, other) + + def fmax(self,other): + return torch_frontend.fmax(self,other) @with_unsupported_dtypes({"2.0.1 and below": ("float16", "complex")}, "torch") def trunc(self): diff --git a/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py b/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py index 3ff4e9866e302..67975c6da83b6 100644 --- a/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py +++ b/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py @@ -7828,6 +7828,44 @@ def test_torch_fmin( on_device=on_device, ) +#fmax + +@handle_frontend_method( + class_tree=CLASS_TREE, + init_tree="torch.tensor", + method_name="fmax", + dtype_and_x= helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), + num_arrays=2, + ), +) +def test_torch_fmax( + dtype_and_x, + frontend_method_data, + init_flags, + method_flags, + frontend, + on_device, + backend_fw, +): + input_dtype, x = dtype_and_x + helpers.test_frontend_method( + init_input_dtypes=input_dtype, + backend_to_test=backend_fw, + init_all_as_kwargs_np={ + "data": x[0], + }, + method_input_dtypes=input_dtype, + method_all_as_kwargs_np={ + "other": x[1], + }, + frontend_method_data=frontend_method_data, + init_flags=init_flags, + method_flags=method_flags, + frontend=frontend, + on_device=on_device, + ) + # count_nonzero @handle_frontend_method(