diff --git a/openassessment/data.py b/openassessment/data.py index fb8c1823a7..9768c3c492 100644 --- a/openassessment/data.py +++ b/openassessment/data.py @@ -19,6 +19,7 @@ from django.utils.translation import gettext as _ import requests +from submissions.models import Submission from submissions import api as sub_api from submissions.errors import SubmissionNotFoundError from openassessment.runtime_imports.classes import import_block_structure_transformers, import_external_id @@ -27,6 +28,7 @@ from openassessment.assessment.models import Assessment, AssessmentFeedback, AssessmentPart from openassessment.fileupload.api import get_download_url from openassessment.workflow.models import AssessmentWorkflow, TeamAssessmentWorkflow +from openassessment.assessment.score_type_constants import PEER_TYPE, SELF_TYPE, STAFF_TYPE logger = logging.getLogger(__name__) @@ -95,6 +97,34 @@ def map_anonymized_ids_to_usernames(anonymized_ids): return anonymous_id_to_username_mapping +def map_anonymized_ids_to_user_data(anonymized_ids): + """ + Args: + anonymized_ids - list of anonymized user ids. + Returns: + dict { + : { + 'email': (str) + 'username': (str) + 'fullname': (str) + } + } + """ + User = get_user_model() + users = _use_read_replica( + User.objects.filter(anonymoususerid__anonymous_user_id__in=anonymized_ids) + .select_related("profile") + .annotate(anonymous_id=F("anonymoususerid__anonymous_user_id")) + ).values("username", "email", "profile__name", "anonymous_id") + + anonymous_id_to_user_info_mapping = { + user["anonymous_id"]: { + "username": user["username"], + "email": user["email"], + "fullname": user["profile__name"] + } for user in users + } + return anonymous_id_to_user_info_mapping class CsvWriter: """ @@ -1535,3 +1565,161 @@ def get_file_uploads(self, missing_blank=False): files.append(file_upload) self.file_uploads = files return self.file_uploads + + +def score_type_to_string(score_type): + """ + Converts the given score type into its string representation. + + Args: + score_type : System representation of the score type. + + Returns: + : string representation of score_type as needed in Staff Grader Template. + """ + SCORE_TYPE_MAP = { + PEER_TYPE: "Peer", + SELF_TYPE: "Self", + STAFF_TYPE: "Staff", + } + return SCORE_TYPE_MAP.get(score_type, "Unknown") + +def parts_summary(assessment_obj): + """ + Retrieves a summary of the parts from a given assessment object. + + Args: + assessment_obj: assessment object. + + Returns: + list[dict]: A list containing assessment parts summary data dictionaries. + """ + return [ + { + 'criterion_name': part.criterion.name, + 'score_earned': part.points_earned, + 'score_type': part.option.name if part.option else "None", + } + for part in assessment_obj.parts.all() + ] + +def get_scorer_data(anonymous_scorer_id, user_data_mapping): + """ + Retrieves the scorer's data (full name, username, and email) based on their anonymous ID. + + Args: + anonymous_scorer_id : Scorer's anonymous_user_id. + user_data_mapping: User data by anonymous_user_id + dict { + : { + 'email': (str) + 'username': (str) + 'fullname': (str) + } + } + + Returns: + fullname, username, email : user data values. + """ + scorer_data = user_data_mapping.get(anonymous_scorer_id, {}) + return ( + scorer_data.get('fullname', ""), + scorer_data.get('username', ""), + scorer_data.get('email', "") + ) + +def generate_assessment_data(assessment_list, user_data_mapping): + """ + Creates the list of Assessment's data dictionaries. + + Args: + assessment_list: assessment objects queryset. + user_data_mapping: User data by anonymous_user_id + dict { + : { + 'email': (str) + 'username': (str) + 'fullname': (str) + } + } + + Returns: + list[dict]: A list containing assessment data dictionaries. + """ + assessment_data_list = [] + for assessment in assessment_list: + + scorer_name, scorer_username, scorer_email = get_scorer_data(assessment.scorer_id, user_data_mapping) + + assessment_data_list.append({ + "assessment_id": str(assessment.id), + "scorer_name": scorer_name, + "scorer_username": scorer_username, + "scorer_email": scorer_email, + "assesment_date": assessment.scored_at.strftime('%d-%m-%Y'), + "assesment_scores": parts_summary(assessment), + "problem_step": score_type_to_string(assessment.score_type), + "feedback": assessment.feedback or '' + }) + return assessment_data_list + +def generate_received_assessment_data(submission_uuid): + """ + Generates a list of received assessments data based on the submission UUID. + + Args: + submission_uuid (str, optional): The UUID of the submission. Defaults to None. + + Returns: + list[dict]: A list containing assessment data dictionaries. + """ + + + submission = sub_api.get_submission_and_student(submission_uuid) + + if not submission: + return [] + + assessments = _use_read_replica( + Assessment.objects.prefetch_related('parts'). + prefetch_related('rubric'). + filter( + submission_uuid=submission['uuid'] + ) + ) + user_data_mapping = map_anonymized_ids_to_user_data([assessment.scorer_id for assessment in assessments]) + return generate_assessment_data(assessments, user_data_mapping) + + +def generate_given_assessment_data(item_id, submission_uuid): + """ + Generates a list of given assessments data based on the submission UUID as scorer. + + Args: + item_id (str): The ID of the item. + submission_uuid (str): The UUID of the submission. + + Returns: + list[dict]: A list containing assessment data dictionaries. + """ + scorer_submission = sub_api.get_submission_and_student(submission_uuid) + + if not scorer_submission: + return [] + + scorer_id = scorer_submission['student_item']['student_id'] + + submissions = Submission.objects.filter(student_item__item_id=item_id).values('uuid') + submission_uuids = [sub['uuid'] for sub in submissions] + + if not submission_uuids or not submissions: + return [] + + assessments_made_by_student = _use_read_replica( + Assessment.objects.prefetch_related('parts') + .prefetch_related('rubric') + .filter(scorer_id=scorer_id, submission_uuid__in=submission_uuids) + ) + + user_data_mapping = map_anonymized_ids_to_user_data([assessment.scorer_id for assessment in assessments_made_by_student]) + return generate_assessment_data(assessments_made_by_student, user_data_mapping) diff --git a/openassessment/staffgrader/serializers/submission_list.py b/openassessment/staffgrader/serializers/submission_list.py index 37c7944eb0..6e90de4d81 100644 --- a/openassessment/staffgrader/serializers/submission_list.py +++ b/openassessment/staffgrader/serializers/submission_list.py @@ -30,7 +30,9 @@ class Meta: 'gradedBy', 'username', 'teamName', - 'score' + 'score', + "email", + "fullname", ] read_only_fields = fields @@ -40,17 +42,23 @@ class Meta: CONTEXT_ANON_ID_TO_USERNAME = 'anonymous_id_to_username' CONTEXT_SUB_TO_ASSESSMENT = 'submission_uuid_to_assessment' CONTEXT_SUB_TO_ANON_ID = 'submission_uuid_to_student_id' + CONTEXT_ANON_ID_TO_EMAIL = "anonymous_id_to_email" + CONTEXT_ANON_ID_TO_FULLNAME = "anonymous_id_to_fullname" def _verify_required_context(self, context): """Verify that required individual or team context is present for serialization""" context_keys = set(context.keys()) # Required context for individual submissions - required_context = set([ - self.CONTEXT_ANON_ID_TO_USERNAME, - self.CONTEXT_SUB_TO_ASSESSMENT, - self.CONTEXT_SUB_TO_ANON_ID - ]) + required_context = set( + [ + self.CONTEXT_ANON_ID_TO_USERNAME, + self.CONTEXT_SUB_TO_ASSESSMENT, + self.CONTEXT_SUB_TO_ANON_ID, + self.CONTEXT_ANON_ID_TO_EMAIL, + self.CONTEXT_ANON_ID_TO_FULLNAME, + ] + ) missing_context = required_context - context_keys if missing_context: @@ -70,6 +78,8 @@ def __init__(self, *args, **kwargs): username = serializers.SerializerMethodField() teamName = serializers.SerializerMethodField() score = serializers.SerializerMethodField() + email = serializers.SerializerMethodField() + fullname = serializers.SerializerMethodField() def _get_username_from_context(self, anonymous_user_id): try: @@ -85,6 +95,22 @@ def _get_anonymous_id_from_context(self, submission_uuid): f"No submitter anonymous user id found for submission uuid {submission_uuid}" ) from e + def _get_email_from_context(self, anonymous_user_id): + try: + return self.context[self.CONTEXT_ANON_ID_TO_EMAIL][anonymous_user_id] + except KeyError as e: + raise MissingContextException( + f"Email not found for anonymous user id {anonymous_user_id}" + ) from e + + def _get_fullname_from_context(self, anonymous_user_id): + try: + return self.context[self.CONTEXT_ANON_ID_TO_FULLNAME][anonymous_user_id] + except KeyError as e: + raise MissingContextException( + f"fullname not found for anonymous user id {anonymous_user_id}" + ) from e + def get_dateGraded(self, workflow): return str(workflow.grading_completed_at) @@ -99,6 +125,16 @@ def get_username(self, workflow): self._get_anonymous_id_from_context(workflow.identifying_uuid) ) + def get_email(self, workflow): + return self._get_email_from_context( + self._get_anonymous_id_from_context(workflow.identifying_uuid) + ) + + def get_fullname(self, workflow): + return self._get_fullname_from_context( + self._get_anonymous_id_from_context(workflow.identifying_uuid) + ) + def get_teamName(self, workflow): # pylint: disable=unused-argument # For individual submissions, this is intentionally empty return None @@ -123,12 +159,16 @@ class TeamSubmissionListSerializer(SubmissionListSerializer): CONTEXT_SUB_TO_ASSESSMENT = 'submission_uuid_to_assessment' CONTEXT_SUB_TO_TEAM_ID = 'team_submission_uuid_to_team_id' CONTEXT_TEAM_ID_TO_TEAM_NAME = 'team_id_to_team_name' + CONTEXT_ANON_ID_TO_EMAIL = "anonymous_id_to_email" + CONTEXT_ANON_ID_TO_FULLNAME = "anonymous_id_to_fullname" REQUIRED_CONTEXT_KEYS = [ CONTEXT_ANON_ID_TO_USERNAME, CONTEXT_SUB_TO_ASSESSMENT, CONTEXT_SUB_TO_TEAM_ID, CONTEXT_TEAM_ID_TO_TEAM_NAME, + CONTEXT_ANON_ID_TO_EMAIL, + CONTEXT_ANON_ID_TO_FULLNAME, ] def _verify_required_context(self, context): @@ -160,6 +200,15 @@ def get_username(self, workflow): # pylint: disable=unused-argument # For team submissions, this is intentionally empty return None + def get_email(self, workflow): # pylint: disable=unused-argument + # For team submissions, this is intentionally empty + return None + + def get_fullname(self, workflow): # pylint: disable=unused-argument + # For team submissions, this is intentionally empty + return None + + def get_teamName(self, workflow): return self._get_team_name_from_context( self._get_team_id_from_context(workflow.identifying_uuid) diff --git a/openassessment/staffgrader/staff_grader_mixin.py b/openassessment/staffgrader/staff_grader_mixin.py index ae37eec386..b8b7709201 100644 --- a/openassessment/staffgrader/staff_grader_mixin.py +++ b/openassessment/staffgrader/staff_grader_mixin.py @@ -15,7 +15,13 @@ from openassessment.assessment.models.base import Assessment, AssessmentPart from openassessment.assessment.models.staff import StaffWorkflow, TeamStaffWorkflow -from openassessment.data import map_anonymized_ids_to_usernames, OraSubmissionAnswerFactory, VersionNotFoundException +from openassessment.data import ( + OraSubmissionAnswerFactory, + VersionNotFoundException, + map_anonymized_ids_to_user_data, + generate_received_assessment_data, + generate_given_assessment_data +) from openassessment.staffgrader.errors.submission_lock import SubmissionLockContestedError from openassessment.staffgrader.models.submission_lock import SubmissionGradingLock from openassessment.staffgrader.serializers import ( @@ -193,6 +199,42 @@ def list_staff_workflows(self, data, suffix=''): # pylint: disable=unused-argum log.exception("Failed to serialize workflow %d: %s", staff_workflow.id, str(e), exc_info=True) return result + + + @XBlock.json_handler + @require_course_staff("STUDENT_GRADE") + def list_assessments(self, data, suffix=''): # pylint: disable=unused-argument + """ + List the assessments' grades based on the type (received or given) for a specific submission. + + Args: + data (dict): Contains the necessary information to fetch the assessments. + - 'item_id': The ID of the xblock/item. + - 'submission_uuid': The UUID of the submission. + - 'assessment_filter': A string, either "received" or any other value + to determine the type of assessments to retrieve. + + Returns: + list[dict]: A list of dictionaries, each representing an assessment's data. + + Note: + - If 'assessment_filter' is "received", the function fetches assessments received + for the given 'submission_uuid'. + - For any other value of 'assessment_filter', the function fetches assessments + given by the owner of the 'submission_uuid' for other submissions in the same item. + """ + item_id = data['item_id'] + subission_uuid = data['submission_uuid'] + filter_value = data['assessment_filter'] + + if filter_value == "received": + return generate_received_assessment_data(subission_uuid) + elif filter_value == "given": + return generate_given_assessment_data(item_id, subission_uuid) + else: + raise ValueError("Invalid assessment_filter value") + + def _get_list_workflows_serializer_context(self, staff_workflows, is_team_assignment=False): """ Fetch additional required data and models to serialize the response @@ -205,7 +247,8 @@ def _get_list_workflows_serializer_context(self, staff_workflows, is_team_assign workflow_scorer_ids.add(workflow.scorer_id) course_id = self.get_student_item_dict()['course_id'] - # Fetch user identifier mappings + context = {} + if is_team_assignment: # Look up the team IDs for submissions so we can later map to team names team_submission_uuid_to_team_id = get_team_ids_by_team_submission_uuid(submission_uuids) @@ -213,40 +256,39 @@ def _get_list_workflows_serializer_context(self, staff_workflows, is_team_assign # Look up names for teams topic_id = self.selected_teamset_id team_id_to_team_name = self.teams_service.get_team_names(course_id, topic_id) - - # Do bulk lookup for scorer anonymous ids (submitting team name is a separate lookup) - anonymous_id_to_username = map_anonymized_ids_to_usernames(set(workflow_scorer_ids)) - - context = { + context.update({ 'team_submission_uuid_to_team_id': team_submission_uuid_to_team_id, 'team_id_to_team_name': team_id_to_team_name, - } + }) else: # When we look up usernames we want to include all connected learner student ids submission_uuid_to_student_id = get_student_ids_by_submission_uuid( course_id, submission_uuids, ) + context['submission_uuid_to_student_id'] = submission_uuid_to_student_id - # Do bulk lookup for all anonymous ids (submitters and scoreres). This is used for the - # `gradedBy` and `username` fields - anonymous_id_to_username = map_anonymized_ids_to_usernames( - set(submission_uuid_to_student_id.values()) | workflow_scorer_ids - ) + all_anonymous_ids = set(workflow_scorer_ids) + if not is_team_assignment: + all_anonymous_ids |= set(context['submission_uuid_to_student_id'].values()) - context = { - 'submission_uuid_to_student_id': submission_uuid_to_student_id, - } + anonymous_id_to_user_data = map_anonymized_ids_to_user_data(all_anonymous_ids) + anonymous_id_to_username = {key: val["username"] for key, val in anonymous_id_to_user_data.items()} + anonymous_id_to_email = {key: val["email"] for key, val in anonymous_id_to_user_data.items()} + anonymous_id_to_fullname = {key: val["fullname"] for key, val in anonymous_id_to_user_data.items()} # Do a bulk fetch of the assessments linked to the workflows, including all connected # Rubric, Criteria, and Option models submission_uuid_to_assessment = self.bulk_deep_fetch_assessments(staff_workflows) - context.update({ - 'anonymous_id_to_username': anonymous_id_to_username, - 'submission_uuid_to_assessment': submission_uuid_to_assessment, - }) - + context.update( + { + "anonymous_id_to_username": anonymous_id_to_username, + "anonymous_id_to_email": anonymous_id_to_email, + "anonymous_id_to_fullname": anonymous_id_to_fullname, + "submission_uuid_to_assessment": submission_uuid_to_assessment, + } + ) return context def _bulk_fetch_annotated_staff_workflows(self, is_team_assignment=False): diff --git a/openassessment/staffgrader/tests/test_list_staff_workflows.py b/openassessment/staffgrader/tests/test_list_staff_workflows.py index 7d2dff05e9..151cb7015a 100644 --- a/openassessment/staffgrader/tests/test_list_staff_workflows.py +++ b/openassessment/staffgrader/tests/test_list_staff_workflows.py @@ -154,6 +154,28 @@ def _mock_map_anonymized_ids_to_usernames(self): ) as patched_map: yield patched_map + @contextmanager + def _mock_map_anonymized_ids_to_user_data(self): + """ + Context manager that patches map_anonymized_ids_to_user_data and returns a mapping from student IDs to a dictionary + containing username, email, and fullname. + """ + # Creamos un diccionario simulado que represente lo que la función real devuelve + mock_data = { + student_id: { + "username": self.student_id_to_username_map.get(student_id, ""), + "email": "mocked_email@example.com", # O quizás puedes usar un mapa similar para emails si es necesario + "fullname": "Mocked Full Name" # O quizás puedes usar un mapa similar para nombres completos si es necesario + } + for student_id in self.student_id_to_username_map + } + + with patch( + 'openassessment.staffgrader.staff_grader_mixin.map_anonymized_ids_to_user_data', + return_value=mock_data + ) as patched_map: + yield patched_map + def submit_staff_assessment(self, xblock, student, grader, option, option_2=None): """ Helper method to submit a staff assessment @@ -601,20 +623,18 @@ def test_bulk_fetch_annotated_staff_workflows(self, xblock, set_up_grades, set_u def test_get_list_workflows_serializer_context(self, xblock): """ Unit test for _get_list_workflows_serializer_context """ self.set_staff_user(xblock) - # Set up the mock return_value for bulk_deep_fetch_assessments. - # submissions 0 and 3 are the only ones assessed + mock_staff_workflows = [ Mock(scorer_id=self.course_staff[1].student_id), Mock(assessment=None, scorer_id=None), Mock(assessment=None, scorer_id=None), Mock(scorer_id=self.course_staff[2].student_id), ] + with self._mock_get_student_ids_by_submission_uuid() as mock_get_student_ids: - # with self._mock_get_team_ids_by_team_submission_uuid() as mock_get_team_ids: - with self._mock_map_anonymized_ids_to_usernames() as mock_map_ids: + with self._mock_map_anonymized_ids_to_user_data() as mock_map_data: # Cambio importante aquí with patch.object(xblock, 'bulk_deep_fetch_assessments') as mock_bulk_fetch_assessments: - # pylint: disable=protected-access - context = xblock._get_list_workflows_serializer_context(mock_staff_workflows) + context = xblock._get_list_workflows_serializer_context(mock_staff_workflows) # pylint: disable=protected-access mock_get_student_ids.assert_called_once_with( self.course_id, @@ -627,12 +647,14 @@ def test_get_list_workflows_serializer_context(self, xblock): expected_anonymous_id_lookups.update( {self.course_staff[1].student_id, self.course_staff[2].student_id} ) - mock_map_ids.assert_called_once_with(expected_anonymous_id_lookups) + mock_map_data.assert_called_once_with(expected_anonymous_id_lookups) # Cambio importante aquí mock_bulk_fetch_assessments.assert_called_once_with(mock_staff_workflows) expected_context = { 'submission_uuid_to_student_id': mock_get_student_ids.return_value, - 'anonymous_id_to_username': mock_map_ids.return_value, + 'anonymous_id_to_username': {k: v["username"] for k, v in mock_map_data.return_value.items()}, + 'anonymous_id_to_email': {k: v["email"] for k, v in mock_map_data.return_value.items()}, + 'anonymous_id_to_fullname': {k: v["fullname"] for k, v in mock_map_data.return_value.items()}, 'submission_uuid_to_assessment': mock_bulk_fetch_assessments.return_value, }