Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[PIR] del test_with_pir_api #68384

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
16 changes: 0 additions & 16 deletions python/paddle/pir_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
# limitations under the License.


import os
from functools import wraps

import paddle
Expand Down Expand Up @@ -183,21 +182,6 @@ def __exit__(self, exc_type, exc_val, exc_tb):
_switch_to_pir_()


def test_with_pir_api(func):
@wraps(func)
def impl(*args, **kwargs):
skip_old_ir = os.environ.get("FLAGS_CI_skip_old_ir", "False")
skip_pir = os.environ.get("FLAGS_CI_skip_pir", "False")
if skip_old_ir == "False" or not skip_old_ir:
with OldIrGuard():
func(*args, **kwargs)
if skip_pir == "False" or not skip_pir:
with IrGuard():
func(*args, **kwargs)

return impl


def test_with_old_ir_only(func):
@wraps(func)
def impl(*args, **kwargs):
Expand Down
3 changes: 0 additions & 3 deletions test/contrib/test_image_classification_fp16.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
from paddle import base
from paddle.framework import in_pir_mode
from paddle.nn import Layer
from paddle.pir_utils import test_with_pir_api
from paddle.static.amp import decorate

paddle.enable_static()
Expand Down Expand Up @@ -629,12 +628,10 @@ def test_amp_lists_7(self):
{'lstm'},
)

@test_with_pir_api
def test_vgg_cuda(self):
with self.scope_prog_guard():
self.main('vgg', use_cuda=True)

@test_with_pir_api
def test_resnet_cuda(self):
with self.scope_prog_guard():
self.main('resnet', use_cuda=True)
Expand Down
2 changes: 0 additions & 2 deletions test/contrib/test_multi_precision_fp16_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
from paddle import base
from paddle.io import Dataset
from paddle.nn import Layer
from paddle.pir_utils import test_with_pir_api
from paddle.static.amp.fp16_utils import cast_model_to_fp16

paddle.enable_static()
Expand Down Expand Up @@ -254,7 +253,6 @@ def train_loop():


class TestImageMultiPrecision(unittest.TestCase):
@test_with_pir_api
def test_resnet_pure_fp16(self):
if not base.core.is_compiled_with_cuda():
return
Expand Down
2 changes: 0 additions & 2 deletions test/custom_op/test_custom_cast_op_jit.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@

import paddle
from paddle import static
from paddle.pir_utils import test_with_pir_api
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd

Expand Down Expand Up @@ -102,7 +101,6 @@ class TestCustomCastOp(unittest.TestCase):
def setUp(self):
self.dtypes = ['float32', 'float64']

@test_with_pir_api
def test_static(self):
for dtype in self.dtypes:
x = np.random.uniform(-1, 1, [4, 8]).astype("float32")
Expand Down
3 changes: 0 additions & 3 deletions test/custom_op/test_custom_concat.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@

import paddle
from paddle import static
from paddle.pir_utils import test_with_pir_api
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd

Expand Down Expand Up @@ -143,7 +142,6 @@ def test_dynamic(self):
for x_grad, pd_x_grad in zip(grad_inputs, pd_grad_inputs):
self.check_output(x_grad, pd_x_grad, "x_grad")

@test_with_pir_api
def test_static(self):
for dtype in self.dtypes:
for axis in self.axises:
Expand Down Expand Up @@ -176,7 +174,6 @@ def test_dynamic_with_attr(self):
for x_grad, pd_x_grad in zip(grad_inputs, pd_grad_inputs):
self.check_output(x_grad, pd_x_grad, "x_grad")

@test_with_pir_api
def test_static_with_attr(self):
for dtype in self.dtypes:
for axis in self.axises:
Expand Down
2 changes: 0 additions & 2 deletions test/custom_op/test_custom_conj.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@

import paddle
from paddle import static
from paddle.pir_utils import test_with_pir_api
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd

Expand Down Expand Up @@ -113,7 +112,6 @@ def test_dynamic(self):
check_output(out, pd_out, "out")
check_output(x_grad, pd_x_grad, "x's grad")

@test_with_pir_api
def test_static(self):
for dtype in self.dtypes:
np_input = np.random.random(self.shape).astype(dtype)
Expand Down
5 changes: 0 additions & 5 deletions test/custom_op/test_custom_inplace.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@

import paddle
from paddle import static
from paddle.pir_utils import test_with_pir_api
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd

Expand Down Expand Up @@ -430,7 +429,6 @@ def setUp(self):
np.random.random((3, 2)).astype("float32"),
]

@test_with_pir_api
def test_static_add(self):
for device in self.devices:
for dtype in self.dtypes:
Expand Down Expand Up @@ -509,7 +507,6 @@ def test_dynamic_add(self):
check_output(custom_x_grad, pd_x_grad, "x_grad")
check_output(custom_y_grad, pd_y_grad, "y_grad")

@test_with_pir_api
def test_static_add_vector(self):
for device in self.devices:
for dtype in self.dtypes:
Expand Down Expand Up @@ -582,7 +579,6 @@ def test_dynamic_add_vector(self):
check_output(custom_x_grad, pd_x_grad, "x_grad")
check_output(custom_y_grad, pd_y_grad, "y_grad")

@test_with_pir_api
def test_static_relu_net(self):
for device in self.devices:
for dtype in self.dtypes:
Expand Down Expand Up @@ -658,7 +654,6 @@ def test_dynamic_relu_net(self):
check_output(custom_x_grad, pd_x_grad, "x_grad")
check_output(custom_y_grad, pd_y_grad, "y_grad")

@test_with_pir_api
def test_static_multi_inplace(self):
for device in self.devices:
for dtype in self.dtypes:
Expand Down
2 changes: 0 additions & 2 deletions test/custom_op/test_custom_linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
import paddle
import paddle.nn.functional as F
from paddle import static
from paddle.pir_utils import test_with_pir_api
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd

Expand Down Expand Up @@ -119,7 +118,6 @@ def setUp(self):
self.np_weight = np.full([2, 4], fill_value=0.5, dtype="float32")
self.np_bias = np.ones([4], dtype="float32")

@test_with_pir_api
def test_static(self):
for device in self.devices:
for dtype in self.dtypes:
Expand Down
5 changes: 0 additions & 5 deletions test/custom_op/test_custom_optional.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@

import paddle
from paddle import static
from paddle.pir_utils import test_with_pir_api
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd

Expand Down Expand Up @@ -529,7 +528,6 @@ def setUp(self):
np.random.random((3, 2)).astype("float32"),
]

@test_with_pir_api
def test_optional_static_add(self):
for device in self.devices:
for dtype in self.dtypes:
Expand Down Expand Up @@ -592,7 +590,6 @@ def test_optional_dynamic_add(self):
check_output(custom_out, pd_out, "out")
check_output(custom_x_grad, pd_x_grad, "x_grad")

@test_with_pir_api
def test_optional_inplace_static_add(self):
for device in self.devices:
for dtype in self.dtypes:
Expand Down Expand Up @@ -664,7 +661,6 @@ def test_optional_inplace_dynamic_add(self):
check_output(custom_x_grad, pd_x_grad, "x_grad")
check_output(custom_y_grad, pd_y_grad, "y_grad")

@test_with_pir_api
def test_optional_vector_static_add(self):
for device in self.devices:
for dtype in self.dtypes:
Expand Down Expand Up @@ -727,7 +723,6 @@ def test_optional_vector_dynamic_add(self):
check_output(custom_out, pd_out, "out")
check_output(custom_x_grad, pd_x_grad, "x_grad")

@test_with_pir_api
def test_optional_inplace_vector_static_add(self):
for device in self.devices:
for dtype in self.dtypes:
Expand Down
2 changes: 0 additions & 2 deletions test/custom_op/test_custom_relu_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@

import paddle
from paddle import nn
from paddle.pir_utils import test_with_pir_api
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd

Expand Down Expand Up @@ -222,7 +221,6 @@ def tearDown(self):
paddle.disable_static()
self.temp_dir.cleanup()

@test_with_pir_api
def test_train_eval(self):
for device in self.devices:
# for train
Expand Down
2 changes: 0 additions & 2 deletions test/custom_op/test_custom_relu_op_jit.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
)

import paddle
from paddle.pir_utils import test_with_pir_api
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd

Expand Down Expand Up @@ -71,7 +70,6 @@ def setUp(self):
if paddle.is_compiled_with_cuda():
self.devices.append('gpu')

@test_with_pir_api
def test_static(self):
for device in self.devices:
for dtype in self.dtypes:
Expand Down
2 changes: 0 additions & 2 deletions test/custom_op/test_custom_relu_op_setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@

import paddle
from paddle import static
from paddle.pir_utils import test_with_pir_api
from paddle.utils.cpp_extension.extension_utils import run_cmd
from paddle.vision.transforms import Compose, Normalize

Expand Down Expand Up @@ -231,7 +230,6 @@ def _test_dynamic(self):
check_output(out, pd_out, "out")
check_output(x_grad, pd_x_grad, "x_grad")

@test_with_pir_api
def _test_static_save_and_load_inference_model(self):
paddle.enable_static()
np_data = np.random.random((1, 1, 28, 28)).astype("float32")
Expand Down
2 changes: 0 additions & 2 deletions test/custom_op/test_custom_tensor_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@

import paddle
from paddle import static
from paddle.pir_utils import test_with_pir_api
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd

Expand Down Expand Up @@ -252,7 +251,6 @@ def test_dynamic(self):
self._test_logical_operants()
self._test_compare_operants()

@test_with_pir_api
def test_static(self):
self.add = self.custom_module.custom_add
self.subtract = self.custom_module.custom_subtract
Expand Down
3 changes: 0 additions & 3 deletions test/custom_op/test_multi_out_jit.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@

import paddle
from paddle import static
from paddle.pir_utils import test_with_pir_api
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd

Expand Down Expand Up @@ -158,7 +157,6 @@ def check_multi_outputs(self, outs, is_dynamic=False):
self.assertTrue('int32' in str(one_int32.dtype))
check_output(one_int32, np.ones([4, 8]).astype('int32'), "one_int32")

@test_with_pir_api
def test_multi_out_static(self):
paddle.enable_static()
for device in self.devices:
Expand All @@ -178,7 +176,6 @@ def test_multi_out_dynamic(self):
self.assertTrue(len(outs) == 3)
self.check_multi_outputs(outs, True)

@test_with_pir_api
def test_discrete_out_static(self):
for device in self.devices:
for dtype in self.dtypes:
Expand Down
2 changes: 0 additions & 2 deletions test/deprecated/legacy_test/test_attribute_var_deprecated.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
import paddle
import paddle.inference as paddle_infer
from paddle.framework import in_pir_mode
from paddle.pir_utils import test_with_pir_api

paddle.enable_static()

Expand Down Expand Up @@ -79,7 +78,6 @@ def init_info(self):
self.shapes = [[10, 10]]
self.save_path = os.path.join(self.temp_dir.name, 'dropout')

@test_with_pir_api
def test_static(self):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,12 @@
import paddle
from paddle import base
from paddle.base import core
from paddle.pir_utils import test_with_pir_api
from paddle.static import Executor, append_backward
from paddle.static.nn.control_flow import ConditionalBlock


class ConditionalBlockTest(unittest.TestCase):
@test_with_pir_api

def test_forward(self):
main_program = base.Program()
startup_program = base.Program()
Expand Down
2 changes: 0 additions & 2 deletions test/deprecated/legacy_test/test_device_guard_deprecated.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@

import paddle
from paddle.base import core, in_pir_mode
from paddle.pir_utils import test_with_pir_api

paddle.enable_static()

Expand Down Expand Up @@ -93,7 +92,6 @@ def test_device_guard_with_id(self):

execute(main_program, startup_program)

@test_with_pir_api
def test_without_kernel_op(self):
main_program = paddle.static.Program()
startup_program = paddle.static.Program()
Expand Down
Loading