-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtester
executable file
·225 lines (195 loc) · 6.79 KB
/
tester
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
#!/usr/bin/env python3
"""
Autotest tester: python
"""
import os
import unittest
import json
from typing import TextIO, Tuple, Optional, Type, Dict, Iterable
from types import TracebackType
import pytest
import sys
class TextTestResults(unittest.TextTestResult):
"""
Custom unittest.TextTestResult that saves results as
a hash to `self.results`
"""
def __init__(self, stream: TextIO, descriptions: bool, verbosity: int) -> None:
"""
Extends TextTestResult.__init__ and adds additional attributes to keep track
of successes and additional result information.
"""
super().__init__(stream, descriptions, verbosity)
self.results = []
self.successes = []
@staticmethod
def _autotest_test_name(test: unittest.TestCase) -> str:
"""
Return the name of the test. Append the docstring of the test function if it exists.
"""
# noinspection PyProtectedMember
doc = test._testMethodDoc
if doc:
return f"{test.id()} ({doc})"
return test.id()
def addSuccess(self, test: unittest.TestCase) -> None:
"""
Record that a test passed.
"""
self.results.append(
{
"name": self._autotest_test_name(test),
"output": "",
"marks_earned": 1,
"marks_total": 1,
"status": "pass",
}
)
self.successes.append(test)
def addFailure(
self,
test: unittest.TestCase,
err: Tuple[Optional[Type[BaseException]], Optional[BaseException], Optional[TracebackType]],
) -> None:
"""
Record that a test failed.
"""
super().addFailure(test, err)
self.results.append(
{
"name": self._autotest_test_name(test),
"output": self.failures[-1][-1],
"marks_earned": 0,
"marks_total": 1,
"status": "fail",
}
)
def addError(
self,
test: unittest.TestCase,
err: Tuple[Optional[Type[BaseException]], Optional[BaseException], Optional[TracebackType]],
) -> None:
"""
Record that a test failed with an error.
"""
super().addError(test, err)
self.results.append(
{
"name": self._autotest_test_name(test),
"output": self.errors[-1][-1],
"marks_earned": 0,
"marks_total": 1,
"status": "error",
}
)
class PytestPlugin:
"""
Pytest plugin to collect and parse test results as well
as any errors during the test collection process.
"""
def __init__(self) -> None:
"""
Initialize a pytest plugin for collecting results
"""
self.results = {}
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_runtest_makereport(
self, item: pytest.Item, call: Optional[pytest.CallInfo]
) -> Optional[pytest.TestReport]:
"""
Implement a pytest hook that is run when reporting the
results of a given test item.
Records the results of each test in the `self.results`
attribute.
See pytest documentation for a description of the parameter
types and the return value.
Note that this hook is triggered for the setup, call, and
teardown steps of each test.
"""
outcome = yield
result = outcome.get_result()
if item.nodeid not in self.results:
doc = item.obj.__doc__ or ""
if doc:
doc = f" ({doc})"
self.results[item.nodeid] = {
"name": item.nodeid + doc,
"output": "",
"marks_earned": 1,
"marks_total": 1,
"status": "pass"
}
self.results[item.nodeid]["time"] = int(result.duration * 1000)
if result.failed:
self.results[item.nodeid]["output"] = str(result.longrepr)
self.results[item.nodeid]["marks_earned"] = 0
self.results[item.nodeid]["status"] = "fail"
return result
def pytest_collectreport(self, report: pytest.CollectReport) -> None:
"""
Implement a pytest hook that is run after the collector has
finished collecting all tests.
Records a test failure in the `self.results` attribute if the
collection step failed.
See pytest documentation for a description of the parameter
types and the return value.
"""
if report.failed:
self.results[report.nodeid] = {
"name": report.nodeid,
"output": str(report.longrepr),
"marks_earned": 0,
"marks_total": 1,
"status": "error",
}
def _load_unittest_tests(test_file: str) -> unittest.TestSuite:
"""
Discover unittest tests in test_file and return
a `unittest.TestSuite` that contains these tests
"""
test_loader = unittest.defaultTestLoader
test_file_dir = os.path.dirname(test_file)
discovered_tests = test_loader.discover(test_file_dir, test_file)
return unittest.TestSuite(discovered_tests)
def _run_unittest_tests(test_file: str, test_data: Dict) -> Iterable[Dict]:
"""
Run unittest tests in test_file and return the results
of these tests
"""
test_suite = _load_unittest_tests(test_file)
with open(os.devnull, "w") as nullstream:
test_runner = unittest.TextTestRunner(
verbosity=test_data["output_verbosity"],
stream=nullstream,
resultclass=TextTestResults,
)
test_result = test_runner.run(test_suite)
return test_result.results
def _run_pytest_tests(test_file: str, test_data: Dict) -> Iterable[Dict]:
"""
Run unittest tests in test_file and return the results
of these tests
"""
with open(os.devnull, "w") as nullstream:
try:
sys.stdout = nullstream
verbosity = test_data["output_verbosity"]
plugin = PytestPlugin()
pytest.main([test_file, f"--tb={verbosity}"], plugins=[plugin])
return plugin.results.values()
finally:
sys.stdout = sys.__stdout__
def run(settings: Dict) -> None:
"""
Run all tests indicated by settings
"""
test_data = settings["test_data"]
for test_file in test_data["script_files"]:
if test_data["tester"] == "unittest":
results = _run_unittest_tests(test_file, test_data)
else:
results = _run_pytest_tests(test_file, test_data)
for res in results:
print(json.dumps(res), flush=True)
if __name__ == "__main__":
run(json.loads(sys.argv[1]))