Skip to content

Commit

Permalink
fix permission issue in grading and bugs
Browse files Browse the repository at this point in the history
  • Loading branch information
calvinchai committed Mar 29, 2024
1 parent c04c165 commit b80b13c
Show file tree
Hide file tree
Showing 5 changed files with 85 additions and 39 deletions.
14 changes: 9 additions & 5 deletions magi/common/gradescope.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,18 +23,22 @@ class TestCase:
# tags: list = field(default_factory=list)
# extra_data:str = ""

def fail_test(self, msg: str):
self.output += "\n" + msg
def fail(self, msg: str=""):
if msg:
self.output += "\n" + msg
self.score = 0
self.status = "failed"

def pass_test(self, msg: str):
self.output += "\n" + msg
def succ(self, msg: str=""):
if msg:
self.output += "\n" + msg
self.score = self.max_score
self.status = "passed"

def add_output_msg(self, msg: str):
self.output += "\n" + msg
if self.output:
self.output += "\n"
self.output += msg


@dataclass
Expand Down
21 changes: 16 additions & 5 deletions magi/managers/test_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from typing import Optional

from magi.common.gradescope import TestCase, Result
from magi.utils.serialization import serialize

logging = logging.getLogger("TestManager")

Expand All @@ -19,10 +20,16 @@ def __init__(self):
self.test_cases_by_name = {}
self.anonymous_counter: int = 0
self.all_failed: bool = False # if True, the total score and Testcases will be zero


status = Status()


def reset(self):
self.score = 0
self.execution_time = 0
self.output = ""
self.extra_data = {}
self.test_cases = []
self.test_cases_by_name = {}
self.anonymous_counter = 0
self.all_failed = False

def reset():
"""
Expand Down Expand Up @@ -93,13 +100,17 @@ def output_result(result_path: Optional[str | Path] = None) -> None:
from magi.managers import InfoManager
result_path = InfoManager.Directories.RESULT_JSON_PATH

from magi.utils.serialization import serialize
result = Result()
result.output += status.output

if not status.test_cases:
logging.warning("No test cases were added")
status.test_cases.append(TestCase(name="No test cases were executed", score=0, output=""))

for test in status.test_cases:
result.tests.append(test)

if status.all_failed:
result.score = 0

serialize(result, result_path)
16 changes: 10 additions & 6 deletions plugins/c_cpp/c_cpp.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,11 @@ def before_grading(self):
for dir in Config.ensure_dirs:
os.makedirs(Directories.WORK_DIR / dir, exist_ok=True)

if Config.compile_method == CompileMethod.INSTRUCTOR_MAKE:
p = subprocess.run(["make"], cwd=Directories.WORK_DIR, capture_output=True)
elif Config.compile_method == CompileMethod.STUDENT_MAKE:
p = run("make", cwd=Directories.WORK_DIR, capture_output=True)
elif Config.compile_method == CompileMethod.AUTO_CMAKE:
if Config.compile_method == CompileMethod.AUTO_CMAKE:
p = subprocess.run(["cmake", "."], cwd=Directories.WORK_DIR, capture_output=True)

else:
p = run("make", cwd=Directories.WORK_DIR, capture_output=True)

if p.returncode != 0:
TestManager.fail_all(f"Compilation failed with error: {p.stderr.decode()}\n {p.stdout.decode()}")

Expand All @@ -33,3 +31,9 @@ def before_generating(self):
shutil.copy(Config.makefile, Directories.WORK_DIR / "Makefile")
if Config.provide_student_makefile:
shutil.copy(Config.makefile, Directories.OUTPUT_DIR / "dist" / "Makefile")
if Config.create_file_structure:
directories = ["include", "src", "test", "obj", "lib"]
if Config.create_file_structure:
for dir in directories:
os.makedirs(Directories.WORK_DIR / dir, exist_ok=True)

4 changes: 4 additions & 0 deletions plugins/c_cpp/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,4 +24,8 @@ class Config:
makefile: str = field(default="plugins/c_cpp/Makefile",
metadata={"excluded_from_ui": True,
"file_editor": "plugins/c_cpp/Makefile"})
create_file_structure: bool = field(default=True,
metadata={"help": "Create a file structure for C/C++ projects",
"half_width": True})

ensure_dirs: list[str] = field(default_factory=lambda: ["include", "src", "test", "obj", "lib"])
69 changes: 46 additions & 23 deletions plugins/gtest/grader.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,9 @@
from magi.common import gradescope
from magi.common.gradescope import TestCase
from magi.managers import TestManager
from magi.managers.info_manager import Directories
from magi.utils import file_utils
from magi.utils.code_runner import Popen


@dataclass
Expand Down Expand Up @@ -77,6 +79,8 @@ def generate_overall_stats(self):
print("Errors: " + str(self.errors))
print("Disabled: " + str(self.disabled))

def all_tests_passed(self):
return self.tests == self.tests - (self.failures + self.disabled + self.errors)

def write_failed_test(fname: str, testname: str, points: str) -> None:
"""Generates a generic failure XML document which can be parsed in the same way that succeeding
Expand Down Expand Up @@ -110,14 +114,13 @@ def grade_all(test_file_name: str) -> None:
:param str test_file_name: The path to the yaml file describing the tests to execute.
"""
from magi.utils.code_runner import Popen
from magi.managers.info_manager import Directories
# First, we're going to read in the test layout from the user defined YAML file
with open(test_file_name, 'r') as file:
tests = yaml.load(file, Loader=yaml.FullLoader)

TEMP_DIR = Directories.WORK_DIR / "gtest_temp"
os.makedirs(TEMP_DIR, exist_ok=True)
file_utils.reset_dir(TEMP_DIR)
os.chmod(TEMP_DIR, 0o777)

for test in tests['tests']:
# define the output name for the gtest xml file, as gtest can only export in xml, no python API yet
Expand All @@ -127,41 +130,61 @@ def grade_all(test_file_name: str) -> None:
logging.debug(f"Running: {test['name']}")

# In case we are running the autograder again, we want to remove any existing XML files which may be present
# if os.path.exists(out_name):
# os.remove(out_name)
file_utils.remove(out_name)

# Run the individual gtest using the following console command (subprocess is how you can run system commands from python)
out = ""
err = ""

gtest_filter = test['class'] + "." + test['name']
gtest_output = "xml:" + str(out_name)
try:

p = Popen(["./" + test['file'], f"--gtest_output={gtest_output}", f"--gtest_filter={gtest_filter}"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=Directories.WORK_DIR)
test_case = TestCase(name=gtest_filter, max_score=test['points'])
test_case.visibility = gradescope.Visibility.VISIBLE if 'visible' not in test or test[
'visible'] else gradescope.Visibility.AFTER_PUBLISHED

try:
command = ["./" + test['file'], f"--gtest_output={gtest_output}", f"--gtest_filter={gtest_filter}"]
p = Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=Directories.WORK_DIR)
# Get stdout and stderr
out, err = p.communicate()
# gtest outputs 0 if test succeeds, 1 otherwise, 124 if timeout
logging.debug(f"Test {test['name']} returned {p.returncode}")
if p.returncode != 0:
logging.warning(f"Test {test['name']} returned {p.returncode}")
logging.warning(f"Test {test['name']} returned {p.returncode} :{out}\n {err}")

except Exception as e:
logging.error(f"Error running test {test['name']}: {e}", exc_info=True)

# If the xml fails to generate, the test fails to execute (possibly due to segfault)
test_case.fail(str(e))

def fail_test():
message = ""
if out:
message += str(out) + "\n"
if err:
message += str(err)
test_case.fail(message)
TestManager.add_test(test_case)

if not os.path.exists(out_name):
logging.warning(f"Test {test['name']} failed to execute")
# Write a generic failed XML file so that we can treat it the same as other tests with one function
write_failed_test(out_name, test['name'], test['points'])
fail_test()
continue

xml_test_suite = XmlTestSuite(str(out_name))
xml_test_cases = xml_test_suite.gather_data()

for xml_test_case in xml_test_cases:
test_case = TestCase(name=xml_test_case.name, max_score=test['points'])
test_case.visibility = gradescope.Visibility.VISIBLE if 'visible' not in test or test[
'visible'] else gradescope.Visibility.AFTER_PUBLISHED
if xml_test_case.status == "error":
test_case.fail_test(xml_test_case.failed_msg)
else:
test_case.pass_test(xml_test_case.failed_msg)
TestManager.add_test(test_case)
if not xml_test_cases:
fail_test()
continue

xml_test_case = xml_test_cases[0]

if xml_test_suite.all_tests_passed():
test_case.succ()
else:
test_case.fail(xml_test_case.failed_msg)

TestManager.add_test(test_case)



0 comments on commit b80b13c

Please sign in to comment.