diff --git a/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.cpp b/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.cpp index f30fa2bb1416a3..c369b4b6eafa23 100644 --- a/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.cpp +++ b/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.cpp @@ -37,6 +37,12 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTest, ::testing::ValuesIn(configsInferRequestRunTests)), InferRequestRunTests::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTest, + InferRunTestsOnNewerDrivers, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_NPU), + ::testing::ValuesIn(configsInferRequestRunTests)), + InferRequestRunTests::getTestCaseName); + const std::vector batchingConfigs = { {ov::log::level(ov::log::Level::WARNING), ov::intel_npu::batch_mode(ov::intel_npu::BatchMode::PLUGIN)}, {ov::log::level(ov::log::Level::WARNING), ov::intel_npu::batch_mode(ov::intel_npu::BatchMode::COMPILER)}, diff --git a/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.hpp b/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.hpp index 54590a7abe513f..5bf1c6522bb32e 100644 --- a/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.hpp +++ b/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.hpp @@ -170,36 +170,6 @@ TEST_P(InferRequestRunTests, MultipleExecutorStreamsTestsSyncInfers) { } } -TEST_P(InferRequestRunTests, MultipleCompiledModelsTestsSyncInfers) { - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED() - // Load CNNNetwork to target plugins - const int no_of_iterations = 256; - std::array compiled_models; - - for (int i = 0; i < no_of_iterations; ++i) { - OV_ASSERT_NO_THROW(compiled_models[i] = core->compile_model(ov_model, target_device, configuration)); - } - - // Create InferRequests - std::array infer_reqs; - std::array infer_reqs_threads; - for (int i = 0; i < no_of_iterations; ++i) { - OV_ASSERT_NO_THROW(infer_reqs[i] = compiled_models[i].create_infer_request()); - } - - for (int i = 0; i < no_of_iterations; ++i) { - infer_reqs_threads[i] = std::thread([&infer_reqs, i]() -> void { - OV_ASSERT_NO_THROW(infer_reqs[i].infer()); - infer_reqs[i] = {}; - }); - } - - for (int i = 0; i < no_of_iterations; ++i) { - infer_reqs_threads[i].join(); - } -} - TEST_P(InferRequestRunTests, MultipleExecutorStreamsTestsAsyncInfers) { // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() @@ -1091,6 +1061,38 @@ TEST_P(SetShapeInferRunTests, checkResultsAfterStateTensorsReallocation) { } } +using InferRunTestsOnNewerDrivers = InferRequestRunTests; + +TEST_P(InferRunTestsOnNewerDrivers, MultipleCompiledModelsTestsSyncInfers) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Load CNNNetwork to target plugins + const int no_of_iterations = 256; + std::array compiled_models; + + for (int i = 0; i < no_of_iterations; ++i) { + OV_ASSERT_NO_THROW(compiled_models[i] = core->compile_model(ov_model, target_device, configuration)); + } + + // Create InferRequests + std::array infer_reqs; + std::array infer_reqs_threads; + for (int i = 0; i < no_of_iterations; ++i) { + OV_ASSERT_NO_THROW(infer_reqs[i] = compiled_models[i].create_infer_request()); + } + + for (int i = 0; i < no_of_iterations; ++i) { + infer_reqs_threads[i] = std::thread([&infer_reqs, i]() -> void { + OV_ASSERT_NO_THROW(infer_reqs[i].infer()); + infer_reqs[i] = {}; + }); + } + + for (int i = 0; i < no_of_iterations; ++i) { + infer_reqs_threads[i].join(); + } +} + } // namespace behavior } // namespace test } // namespace ov