Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support pytest-rerunfailures plugin #24473

Open
wants to merge 7 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.

import pytest
import os

@pytest.mark.flaky(reruns=2)
def test_flaky(): # test_marker--test_flaky
# count is not set for first run, but set to 2 for the second run
count = os.environ.get("COUNT")
os.environ["COUNT"] = "2"
# this will fail on the first run, but pass on the second (1 passed, 1 rerun)
assert count == "2"

def test_flaky_no_marker():
# this test is flaky and will be run via the command line argument
# count is not set for first run, but set to 2 for the second run
count = os.environ.get("COUNT")
os.environ["COUNT"] = "2"
# this will fail on the first run, but pass on the second (1 passed, 1 rerun)
assert count == "2"
30 changes: 30 additions & 0 deletions python_files/tests/pytestadapter/expected_execution_test_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -734,3 +734,33 @@
"subtest": None,
},
}

test_rerunfailures_plugin_path = TEST_DATA_PATH / "test_rerunfailures_plugin.py"
rerunfailures_plugin_expected_execution_output = {
get_absolute_test_id(
"test_rerunfailures_plugin.py::test_flaky", test_rerunfailures_plugin_path
): {
"test": get_absolute_test_id(
"test_rerunfailures_plugin.py::test_flaky", test_rerunfailures_plugin_path
),
"outcome": "success",
"message": None,
"traceback": None,
"subtest": None,
}
}

test_rerunfailures_plugin_path = TEST_DATA_PATH / "test_rerunfailures_plugin.py"
rerunfailures_with_arg_expected_execution_output = {
get_absolute_test_id(
"test_rerunfailures_plugin.py::test_flaky_no_marker", test_rerunfailures_plugin_path
): {
"test": get_absolute_test_id(
"test_rerunfailures_plugin.py::test_flaky_no_marker", test_rerunfailures_plugin_path
),
"outcome": "success",
"message": None,
"traceback": None,
"subtest": None,
}
}
1 change: 1 addition & 0 deletions python_files/tests/pytestadapter/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -331,6 +331,7 @@ def runner_with_cwd_env(
os.mkfifo(pipe_name)
#################

print("beginning run request")
completed = threading.Event()

result = [] # result is a string array to store the data during threading
Expand Down
4 changes: 4 additions & 0 deletions python_files/tests/pytestadapter/test_discovery.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,6 +251,7 @@ def test_pytest_root_dir():

Discovery should succeed and testids should be relative to workspace root.
"""
print("running test_pytest_root_dir")
rd = f"--rootdir={helpers.TEST_DATA_PATH / 'root' / 'tests'}"
actual = helpers.runner_with_cwd(
[
Expand Down Expand Up @@ -279,6 +280,7 @@ def test_pytest_config_file():

Discovery should succeed and testids should be relative to workspace root.
"""
print("running test_pytest_config_file")
actual = helpers.runner_with_cwd(
[
"--collect-only",
Expand All @@ -301,13 +303,15 @@ def test_pytest_config_file():
), f"Tests tree does not match expected value. \n Expected: {json.dumps(expected_discovery_test_output.root_with_config_expected_output, indent=4)}. \n Actual: {json.dumps(actual_item.get('tests'), indent=4)}"


@pytest.mark.timeout(60)
def test_config_sub_folder():
"""Here the session node will be a subfolder of the workspace root and the test are in another subfolder.

This tests checks to see if test node path are under the session node and if so the
session node is correctly updated to the common path.
"""
folder_path = helpers.TEST_DATA_PATH / "config_sub_folder"
print("running test_config_sub_folder")
actual = helpers.runner_with_cwd(
[
"--collect-only",
Expand Down
24 changes: 24 additions & 0 deletions python_files/tests/pytestadapter/test_execution.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,23 @@ def test_rootdir_specified():
assert actual_result_dict == expected_const


def test_rerunfailure_with_arg():
"""Test pytest execution when a --rootdir is specified."""
args = ["--reruns=2", "test_rerunfailures_plugin.py::test_flaky_no_marker"]
actual = runner(args)
expected_const = expected_execution_test_output.rerunfailures_with_arg_expected_execution_output
assert actual
actual_list: List[Dict[str, Dict[str, Any]]] = actual
assert len(actual_list) == len(expected_const)
actual_result_dict = {}
if actual_list is not None:
for actual_item in actual_list:
assert all(item in actual_item for item in ("status", "cwd", "result"))
assert actual_item.get("status") == "success"
actual_result_dict.update(actual_item["result"])
assert actual_result_dict == expected_const


@pytest.mark.parametrize(
("test_ids", "expected_const"),
[
Expand Down Expand Up @@ -194,6 +211,13 @@ def test_rootdir_specified():
expected_execution_test_output.nested_describe_expected_execution_output,
id="nested_describe_plugin",
),
pytest.param(
[
"test_rerunfailures_plugin.py::test_flaky",
],
expected_execution_test_output.rerunfailures_plugin_expected_execution_output,
id="test_rerunfailures_plugin",
),
],
)
def test_pytest_execution(test_ids, expected_const):
Expand Down
63 changes: 61 additions & 2 deletions python_files/vscode_pytest/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@ def __init__(self, message):
collected_tests_so_far = []
TEST_RUN_PIPE = os.getenv("TEST_RUN_PIPE")
SYMLINK_PATH = None
FLAKY_MAX_RUNS = None


def pytest_load_initial_conftests(early_config, parser, args): # noqa: ARG001
Expand All @@ -92,6 +93,11 @@ def pytest_load_initial_conftests(early_config, parser, args): # noqa: ARG001
global IS_DISCOVERY
IS_DISCOVERY = True

# set the reruns value, -1 if not set
global FLAKY_MAX_RUNS
FLAKY_MAX_RUNS = get_reruns_value(args)
print("Plugin info[vscode-pytest]: global FLAKY_MAX_RUNS set to: ", FLAKY_MAX_RUNS)

# check if --rootdir is in the args
for arg in args:
if "--rootdir=" in arg:
Expand Down Expand Up @@ -120,6 +126,29 @@ def pytest_load_initial_conftests(early_config, parser, args): # noqa: ARG001
SYMLINK_PATH = rootdir


def get_reruns_value(args):
"""
Extracts the value of the --reruns argument from a list of command-line arguments.

Args:
args (list of str): A list of command-line arguments.

Returns:
int: The integer value of the --reruns argument if found, otherwise -1.
"""
for arg in args:
if arg.startswith("--reruns"):
if "=" in arg:
# Extract the value from --reruns=<value>
return int(arg.split("=")[1])
else:
# Get the value from the next argument
index = args.index(arg)
if index + 1 < len(args):
return int(args[index + 1])
return -1


def pytest_internalerror(excrepr, excinfo): # noqa: ARG001
"""A pytest hook that is called when an internal error occurs.

Expand Down Expand Up @@ -150,11 +179,38 @@ def pytest_exception_interact(node, call, report):
else:
ERRORS.append(report.longreprtext + "\n Check Python Test Logs for more details.")
else:
node_id = get_absolute_test_id(node.nodeid, get_node_path(node))
# Check if Pytest-rerunfailures is enabled
# check # run this is
# check # of reruns allowed
# if # of reruns is reached, then send the error message, otherwise do not send the error message

try:
exec_count = node.execution_count
# global is set if arg is present during pytest_load_initial_conftests
# if not present, then -1
flaky_max_runs = FLAKY_MAX_RUNS

# check for rerunfailures marker
for m in node.own_markers:
if m.name == "flaky":
flaky_max_runs = m.kwargs.get("reruns", 0)
break

# flaky_max_runs != -1 means test is flaky
if flaky_max_runs != -1 and exec_count <= flaky_max_runs:
print("flaky test rerun: ", exec_count)
return
elif flaky_max_runs != -1 and exec_count > flaky_max_runs:
print("Plugin info[vscode-pytest]: max reruns reached.")
except AttributeError:
pass

# If during execution, send this data that the given node failed.
report_value = "error"
if call.excinfo.typename == "AssertionError":
report_value = "failure"
node_id = get_absolute_test_id(node.nodeid, get_node_path(node))
# Only add test to collected_tests_so_far if it is not a flaky test that will re-run
if node_id not in collected_tests_so_far:
collected_tests_so_far.append(node_id)
item_result = create_test_outcome(
Expand Down Expand Up @@ -280,7 +336,8 @@ def pytest_report_teststatus(report, config): # noqa: ARG001
node_path = cwd
# Calculate the absolute test id and use this as the ID moving forward.
absolute_node_id = get_absolute_test_id(report.nodeid, node_path)
if absolute_node_id not in collected_tests_so_far:
# If the test is not a rerun, add it to the collected_tests_so_far list.
if report.outcome != "rerun" and absolute_node_id not in collected_tests_so_far:
collected_tests_so_far.append(absolute_node_id)
item_result = create_test_outcome(
absolute_node_id,
Expand Down Expand Up @@ -654,6 +711,8 @@ def build_nested_folders(
counter = 0
max_iter = 100
while iterator_path != session_node_path:
print("iterator_path: ", iterator_path)
print("session_node_path: ", session_node_path)
curr_folder_name = iterator_path.name
try:
curr_folder_node: TestNode = created_files_folders_dict[os.fspath(iterator_path)]
Expand Down
Loading