diff --git a/paddle/phi/kernels/cpu/logical_kernel.cc b/paddle/phi/kernels/cpu/logical_kernel.cc index 33e3ea6e05f7e..06dff8428533f 100644 --- a/paddle/phi/kernels/cpu/logical_kernel.cc +++ b/paddle/phi/kernels/cpu/logical_kernel.cc @@ -77,6 +77,8 @@ void LogicalNotKernel(const Context& dev_ctx, int64_t, \ int, \ int8_t, \ + phi::dtype::complex, \ + phi::dtype::complex, \ int16_t) {} REGISTER_LOGICAL_CPU_KERNEL(logical_and, And) diff --git a/paddle/phi/kernels/kps/logical_kernel.cu b/paddle/phi/kernels/kps/logical_kernel.cu index d2a6346fd3f04..f7c390e65d0ff 100644 --- a/paddle/phi/kernels/kps/logical_kernel.cu +++ b/paddle/phi/kernels/kps/logical_kernel.cu @@ -97,6 +97,8 @@ PD_REGISTER_KERNEL(logical_xor, KPS, ALL_LAYOUT, phi::LogicalXorKernel, int) { int64_t, \ int, \ int8_t, \ + phi::dtype::complex, \ + phi::dtype::complex, \ int16_t) {} REGISTER_LOGICAL_CUDA_KERNEL(logical_and, And) diff --git a/python/paddle/tensor/logic.py b/python/paddle/tensor/logic.py index c9009b37ab71a..6632738695d7e 100755 --- a/python/paddle/tensor/logic.py +++ b/python/paddle/tensor/logic.py @@ -53,6 +53,8 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): "float32", "float64", "uint16", + "complex64", + "complex128", ], op_name, ) @@ -70,6 +72,8 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): "float32", "float64", "uint16", + "complex64", + "complex128", ], op_name, ) @@ -114,8 +118,8 @@ def logical_and(x, y, out=None, name=None): .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor Args: - x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64. - y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64. + x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64, complex64, complex128. + y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64, complex64, complex128. out(Tensor, optional): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. @@ -173,8 +177,8 @@ def logical_or(x, y, out=None, name=None): .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor Args: - x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64. - y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64. + x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64, complex64, complex128. + y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float16, float32, float64, complex64, complex128. out(Tensor): The ``Variable`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. @@ -234,8 +238,8 @@ def logical_xor(x, y, out=None, name=None): .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor Args: - x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, int32, int64, float16, float32, float64. - y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, int32, int64, float16, float32, float64. + x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, int32, int64, float16, float32, float64, complex64, complex128. + y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, int32, int64, float16, float32, float64, complex64, complex128. out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. @@ -296,7 +300,7 @@ def logical_not(x, out=None, name=None): .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor Args: - x(Tensor): Operand of logical_not operator. Must be a Tensor of type bool, int8, int16, in32, in64, float16, float32, or float64. + x(Tensor): Operand of logical_not operator. Must be a Tensor of type bool, int8, int16, in32, in64, float16, float32, or float64, complex64, complex128. out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor` will be created to save the output. name(str|None): The default value is None. Normally there is no need for users to set this property. For more information, please refer to :ref:`api_guide_Name`. diff --git a/test/legacy_test/test_logical_op.py b/test/legacy_test/test_logical_op.py index 80a42d4145181..10bf33e40a5e0 100755 --- a/test/legacy_test/test_logical_op.py +++ b/test/legacy_test/test_logical_op.py @@ -31,6 +31,8 @@ np.float16, np.float32, np.float64, + np.complex64, + np.complex128, ] TEST_META_OP_DATA = [ @@ -124,6 +126,10 @@ def np_data_generator(np_shape, dtype, *args, **kwargs): elif dtype == np.uint16: x = np.random.uniform(0.0, 1.0, np_shape).astype(np.float32) return convert_float_to_uint16(x) + elif dtype == np.complex64 or dtype == np.complex128: + return np.random.normal(0, 1, np_shape).astype(dtype) + ( + 1.0j * np.random.normal(0, 1, np_shape) + ).astype(dtype) else: return np.random.normal(0, 1, np_shape).astype(dtype) @@ -169,6 +175,41 @@ def test(unit_test, use_gpu=False, test_error=False): (dygraph_result.numpy() == np_result).all() ) unit_test.assertTrue((eager_result.numpy() == np_result).all()) + # add some corner case for complex datatype + for complex_data_type in [np.complex64, np.complex128]: + for x_data in (0 + 0j, 0 + 1j, 1 + 0j, 1 + 1j): + for y_data in (0 + 0j, 0 + 1j, 1 + 0j, 1 + 1j): + meta_data['x_np'] = ( + x_data * np.ones(shape_data['x_shape']) + ).astype(complex_data_type) + meta_data['y_np'] = ( + y_data * np.ones(shape_data['y_shape']) + ).astype(complex_data_type) + if meta_data['binary_op'] and test_error: + # catch C++ Exception + unit_test.assertRaises( + BaseException, run_static, **meta_data + ) + unit_test.assertRaises( + BaseException, run_dygraph, **meta_data + ) + continue + static_result = run_static(**meta_data) + dygraph_result = run_dygraph(**meta_data) + eager_result = run_eager(**meta_data) + if meta_data['binary_op']: + np_result = np_op( + meta_data['x_np'], meta_data['y_np'] + ) + else: + np_result = np_op(meta_data['x_np']) + unit_test.assertTrue((static_result == np_result).all()) + unit_test.assertTrue( + (dygraph_result.numpy() == np_result).all() + ) + unit_test.assertTrue( + (eager_result.numpy() == np_result).all() + ) def test_type_error(unit_test, use_gpu, type_str_map): @@ -180,7 +221,9 @@ def check_type(op_str, x, y, binary_op): y = paddle.to_tensor(y) error_type = BaseException if binary_op: - if type_str_map['x'] != type_str_map['y']: + if type_str_map['x'] != type_str_map['y'] and type_str_map[ + 'x' + ] not in [np.complex64, np.complex128]: unit_test.assertRaises(error_type, op, x=x, y=y) if not in_dynamic_mode(): error_type = TypeError