From 54fb6910bb9469cc16898fc361ae8c8d933ffddf Mon Sep 17 00:00:00 2001 From: Junjie Zhang <1356732652@qq.com> Date: Sat, 8 Feb 2025 15:01:38 +0800 Subject: [PATCH 1/2] Update test_converter_norm.py --- test/tensorrt/test_converter_norm.py | 55 +++++++++++++++++++++------- 1 file changed, 41 insertions(+), 14 deletions(-) diff --git a/test/tensorrt/test_converter_norm.py b/test/tensorrt/test_converter_norm.py index e302179f34ca54..bf31799d66682d 100644 --- a/test/tensorrt/test_converter_norm.py +++ b/test/tensorrt/test_converter_norm.py @@ -126,6 +126,35 @@ def fused_bias_dropout_residual_layer_norm( ) +def fused_bias_dropout_residual_layer_norm( + x, + residual, + bias_shape, + ln_scale_shape, + ln_bias_shape, + dropout_rate, + ln_epsilon, +): + bias = paddle.create_parameter( + shape=bias_shape, dtype='float32', name="bias" + ) + ln_scale = paddle.create_parameter( + shape=ln_scale_shape, dtype='float32', name="ln_scale" + ) + ln_bias = paddle.create_parameter( + shape=ln_bias_shape, dtype='float32', name="ln_bias" + ) + return paddle.incubate.nn.functional.fused_bias_dropout_residual_layer_norm( + x, + residual, + bias, + ln_scale, + ln_bias, + dropout_rate=dropout_rate, + ln_epsilon=ln_epsilon, + ) + + class TestFusedBiasDropoutResidualLayerNormTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = fused_bias_dropout_residual_layer_norm @@ -138,10 +167,10 @@ def setUp(self): "dropout_rate": 0.0, "ln_epsilon": 1e-5, } - self.program_config = {"feed_list": []} - self.min_shape = {} - self.opt_shape = {} - self.max_shape = {} + self.program_config = {"feed_list": ["x", "residual"]} + self.min_shape = {"x": [2, 4, 128]} + self.opt_shape = {"x": [4, 4, 128]} + self.max_shape = {"x": [8, 4, 128]} def test_fp16_trt_result(self): self.check_trt_result(rtol=1e-2, atol=1e-2, precision_mode="fp16") @@ -159,10 +188,10 @@ def setUp(self): "dropout_rate": 0.0, "ln_epsilon": 1e-5, } - self.program_config = {"feed_list": []} - self.min_shape = {} - self.opt_shape = {} - self.max_shape = {} + self.program_config = {"feed_list": ["x", "residual"]} + self.min_shape = {"x": [2, 4, 128]} + self.opt_shape = {"x": [4, 4, 128]} + self.max_shape = {"x": [8, 4, 128]} def test_fp32_trt_result(self): self.check_trt_result() @@ -170,8 +199,6 @@ def test_fp32_trt_result(self): class TestFusedBiasDropoutResidualLayerNormErrorTRTPattern(TensorRTBaseTest): def setUp(self): - paddle.seed(42) - np.random.seed(42) self.python_api = fused_bias_dropout_residual_layer_norm self.api_args = { "x": np.random.rand(2, 4, 128).astype("float32"), @@ -182,10 +209,10 @@ def setUp(self): "dropout_rate": 1.0, "ln_epsilon": 1e-5, } - self.program_config = {"feed_list": []} - self.min_shape = {} - self.opt_shape = {} - self.max_shape = {} + self.program_config = {"feed_list": ["x", "residual"]} + self.min_shape = {"x": [2, 4, 128]} + self.opt_shape = {"x": [4, 4, 128]} + self.max_shape = {"x": [8, 4, 128]} def test_trt_result(self): self.check_marker(expected_result=False) From 4f702ac6cdfc5ca501782152d5e4a093ce044e78 Mon Sep 17 00:00:00 2001 From: Junjie Zhang <1356732652@qq.com> Date: Sat, 8 Feb 2025 15:05:17 +0800 Subject: [PATCH 2/2] Update test_converter_norm.py --- test/tensorrt/test_converter_norm.py | 31 ---------------------------- 1 file changed, 31 deletions(-) diff --git a/test/tensorrt/test_converter_norm.py b/test/tensorrt/test_converter_norm.py index bf31799d66682d..304b705ad08758 100644 --- a/test/tensorrt/test_converter_norm.py +++ b/test/tensorrt/test_converter_norm.py @@ -95,37 +95,6 @@ def test_trt_result(self): self.check_marker(expected_result=False) -def fused_bias_dropout_residual_layer_norm( - x, - residual, - bias_shape, - ln_scale_shape, - ln_bias_shape, - dropout_rate, - ln_epsilon, -): - x = paddle.to_tensor(x) - residual = paddle.to_tensor(residual) - bias = paddle.create_parameter( - shape=bias_shape, dtype='float32', name="bias" - ) - ln_scale = paddle.create_parameter( - shape=ln_scale_shape, dtype='float32', name="ln_scale" - ) - ln_bias = paddle.create_parameter( - shape=ln_bias_shape, dtype='float32', name="ln_bias" - ) - return paddle.incubate.nn.functional.fused_bias_dropout_residual_layer_norm( - x, - residual, - bias, - ln_scale, - ln_bias, - dropout_rate=dropout_rate, - ln_epsilon=ln_epsilon, - ) - - def fused_bias_dropout_residual_layer_norm( x, residual,