From 047f1ea664090d6cf91073028f3ae5684a0d8db7 Mon Sep 17 00:00:00 2001 From: Fernando Date: Mon, 16 Oct 2023 11:21:04 -0400 Subject: [PATCH 1/4] feat: endpoint /api/ora_staff_grader/initialize upgraded --- openassessment/data.py | 42 +++++++++++++ .../serializers/submission_list.py | 61 +++++++++++++++++-- .../staffgrader/staff_grader_mixin.py | 33 +++++++--- 3 files changed, 123 insertions(+), 13 deletions(-) diff --git a/openassessment/data.py b/openassessment/data.py index fb8c1823a7..b1ddc7a25c 100644 --- a/openassessment/data.py +++ b/openassessment/data.py @@ -95,6 +95,48 @@ def map_anonymized_ids_to_usernames(anonymized_ids): return anonymous_id_to_username_mapping +def map_anonymized_ids_to_emails(anonymized_ids): + """ + Args: + anonymized_ids - list of anonymized user ids. + Returns: + dictionary, that contains mapping between anonymized user ids and + actual user emails. + """ + User = get_user_model() + + users = _use_read_replica( + User.objects.filter(anonymoususerid__anonymous_user_id__in=anonymized_ids) + .annotate(anonymous_id=F("anonymoususerid__anonymous_user_id")) + .values("email", "anonymous_id") + ) + anonymous_id_to_email_mapping = { + user["anonymous_id"]: user["email"] for user in users + } + return anonymous_id_to_email_mapping + + +def map_anonymized_ids_to_fullname(anonymized_ids): + """ + Args: + anonymized_ids - list of anonymized user ids. + Returns: + dictionary, that contains mapping between anonymized user ids and + actual user fullname. + """ + User = get_user_model() + + users = _use_read_replica( + User.objects.filter(anonymoususerid__anonymous_user_id__in=anonymized_ids) + .select_related("profile") + .annotate(anonymous_id=F("anonymoususerid__anonymous_user_id")) + .values("profile__name", "anonymous_id") + ) + + anonymous_id_to_fullname_mapping = { + user["anonymous_id"]: user["profile__name"] for user in users + } + return anonymous_id_to_fullname_mapping class CsvWriter: """ diff --git a/openassessment/staffgrader/serializers/submission_list.py b/openassessment/staffgrader/serializers/submission_list.py index 37c7944eb0..6e90de4d81 100644 --- a/openassessment/staffgrader/serializers/submission_list.py +++ b/openassessment/staffgrader/serializers/submission_list.py @@ -30,7 +30,9 @@ class Meta: 'gradedBy', 'username', 'teamName', - 'score' + 'score', + "email", + "fullname", ] read_only_fields = fields @@ -40,17 +42,23 @@ class Meta: CONTEXT_ANON_ID_TO_USERNAME = 'anonymous_id_to_username' CONTEXT_SUB_TO_ASSESSMENT = 'submission_uuid_to_assessment' CONTEXT_SUB_TO_ANON_ID = 'submission_uuid_to_student_id' + CONTEXT_ANON_ID_TO_EMAIL = "anonymous_id_to_email" + CONTEXT_ANON_ID_TO_FULLNAME = "anonymous_id_to_fullname" def _verify_required_context(self, context): """Verify that required individual or team context is present for serialization""" context_keys = set(context.keys()) # Required context for individual submissions - required_context = set([ - self.CONTEXT_ANON_ID_TO_USERNAME, - self.CONTEXT_SUB_TO_ASSESSMENT, - self.CONTEXT_SUB_TO_ANON_ID - ]) + required_context = set( + [ + self.CONTEXT_ANON_ID_TO_USERNAME, + self.CONTEXT_SUB_TO_ASSESSMENT, + self.CONTEXT_SUB_TO_ANON_ID, + self.CONTEXT_ANON_ID_TO_EMAIL, + self.CONTEXT_ANON_ID_TO_FULLNAME, + ] + ) missing_context = required_context - context_keys if missing_context: @@ -70,6 +78,8 @@ def __init__(self, *args, **kwargs): username = serializers.SerializerMethodField() teamName = serializers.SerializerMethodField() score = serializers.SerializerMethodField() + email = serializers.SerializerMethodField() + fullname = serializers.SerializerMethodField() def _get_username_from_context(self, anonymous_user_id): try: @@ -85,6 +95,22 @@ def _get_anonymous_id_from_context(self, submission_uuid): f"No submitter anonymous user id found for submission uuid {submission_uuid}" ) from e + def _get_email_from_context(self, anonymous_user_id): + try: + return self.context[self.CONTEXT_ANON_ID_TO_EMAIL][anonymous_user_id] + except KeyError as e: + raise MissingContextException( + f"Email not found for anonymous user id {anonymous_user_id}" + ) from e + + def _get_fullname_from_context(self, anonymous_user_id): + try: + return self.context[self.CONTEXT_ANON_ID_TO_FULLNAME][anonymous_user_id] + except KeyError as e: + raise MissingContextException( + f"fullname not found for anonymous user id {anonymous_user_id}" + ) from e + def get_dateGraded(self, workflow): return str(workflow.grading_completed_at) @@ -99,6 +125,16 @@ def get_username(self, workflow): self._get_anonymous_id_from_context(workflow.identifying_uuid) ) + def get_email(self, workflow): + return self._get_email_from_context( + self._get_anonymous_id_from_context(workflow.identifying_uuid) + ) + + def get_fullname(self, workflow): + return self._get_fullname_from_context( + self._get_anonymous_id_from_context(workflow.identifying_uuid) + ) + def get_teamName(self, workflow): # pylint: disable=unused-argument # For individual submissions, this is intentionally empty return None @@ -123,12 +159,16 @@ class TeamSubmissionListSerializer(SubmissionListSerializer): CONTEXT_SUB_TO_ASSESSMENT = 'submission_uuid_to_assessment' CONTEXT_SUB_TO_TEAM_ID = 'team_submission_uuid_to_team_id' CONTEXT_TEAM_ID_TO_TEAM_NAME = 'team_id_to_team_name' + CONTEXT_ANON_ID_TO_EMAIL = "anonymous_id_to_email" + CONTEXT_ANON_ID_TO_FULLNAME = "anonymous_id_to_fullname" REQUIRED_CONTEXT_KEYS = [ CONTEXT_ANON_ID_TO_USERNAME, CONTEXT_SUB_TO_ASSESSMENT, CONTEXT_SUB_TO_TEAM_ID, CONTEXT_TEAM_ID_TO_TEAM_NAME, + CONTEXT_ANON_ID_TO_EMAIL, + CONTEXT_ANON_ID_TO_FULLNAME, ] def _verify_required_context(self, context): @@ -160,6 +200,15 @@ def get_username(self, workflow): # pylint: disable=unused-argument # For team submissions, this is intentionally empty return None + def get_email(self, workflow): # pylint: disable=unused-argument + # For team submissions, this is intentionally empty + return None + + def get_fullname(self, workflow): # pylint: disable=unused-argument + # For team submissions, this is intentionally empty + return None + + def get_teamName(self, workflow): return self._get_team_name_from_context( self._get_team_id_from_context(workflow.identifying_uuid) diff --git a/openassessment/staffgrader/staff_grader_mixin.py b/openassessment/staffgrader/staff_grader_mixin.py index ae37eec386..639e3ddda1 100644 --- a/openassessment/staffgrader/staff_grader_mixin.py +++ b/openassessment/staffgrader/staff_grader_mixin.py @@ -15,7 +15,8 @@ from openassessment.assessment.models.base import Assessment, AssessmentPart from openassessment.assessment.models.staff import StaffWorkflow, TeamStaffWorkflow -from openassessment.data import map_anonymized_ids_to_usernames, OraSubmissionAnswerFactory, VersionNotFoundException +from openassessment.data import (OraSubmissionAnswerFactory, VersionNotFoundException, map_anonymized_ids_to_emails, + map_anonymized_ids_to_fullname, map_anonymized_ids_to_usernames) from openassessment.staffgrader.errors.submission_lock import SubmissionLockContestedError from openassessment.staffgrader.models.submission_lock import SubmissionGradingLock from openassessment.staffgrader.serializers import ( @@ -215,7 +216,15 @@ def _get_list_workflows_serializer_context(self, staff_workflows, is_team_assign team_id_to_team_name = self.teams_service.get_team_names(course_id, topic_id) # Do bulk lookup for scorer anonymous ids (submitting team name is a separate lookup) - anonymous_id_to_username = map_anonymized_ids_to_usernames(set(workflow_scorer_ids)) + anonymous_id_to_username = map_anonymized_ids_to_usernames( + set(workflow_scorer_ids) + ) + anonymous_id_to_email = map_anonymized_ids_to_emails( + set(workflow_scorer_ids) + ) + anonymous_id_to_fullname = map_anonymized_ids_to_fullname( + set(workflow_scorer_ids) + ) context = { 'team_submission_uuid_to_team_id': team_submission_uuid_to_team_id, @@ -233,6 +242,13 @@ def _get_list_workflows_serializer_context(self, staff_workflows, is_team_assign anonymous_id_to_username = map_anonymized_ids_to_usernames( set(submission_uuid_to_student_id.values()) | workflow_scorer_ids ) + anonymous_id_to_email = map_anonymized_ids_to_emails( + set(submission_uuid_to_student_id.values()) | workflow_scorer_ids + ) + + anonymous_id_to_fullname = map_anonymized_ids_to_fullname( + set(submission_uuid_to_student_id.values()) | workflow_scorer_ids + ) context = { 'submission_uuid_to_student_id': submission_uuid_to_student_id, @@ -242,11 +258,14 @@ def _get_list_workflows_serializer_context(self, staff_workflows, is_team_assign # Rubric, Criteria, and Option models submission_uuid_to_assessment = self.bulk_deep_fetch_assessments(staff_workflows) - context.update({ - 'anonymous_id_to_username': anonymous_id_to_username, - 'submission_uuid_to_assessment': submission_uuid_to_assessment, - }) - + context.update( + { + "anonymous_id_to_username": anonymous_id_to_username, + "anonymous_id_to_email": anonymous_id_to_email, + "anonymous_id_to_fullname": anonymous_id_to_fullname, + "submission_uuid_to_assessment": submission_uuid_to_assessment, + } + ) return context def _bulk_fetch_annotated_staff_workflows(self, is_team_assignment=False): From d0d9b0231941c9eb9026b4105f012c17caa0e4c5 Mon Sep 17 00:00:00 2001 From: Fernando Date: Wed, 18 Oct 2023 17:31:16 -0400 Subject: [PATCH 2/4] feat: generate_assessment_data handler created --- openassessment/data.py | 121 ++++++++++++++++++ .../staffgrader/staff_grader_mixin.py | 35 ++++- 2 files changed, 155 insertions(+), 1 deletion(-) diff --git a/openassessment/data.py b/openassessment/data.py index b1ddc7a25c..c6593a8746 100644 --- a/openassessment/data.py +++ b/openassessment/data.py @@ -19,6 +19,7 @@ from django.utils.translation import gettext as _ import requests +from submissions.models import Submission from submissions import api as sub_api from submissions.errors import SubmissionNotFoundError from openassessment.runtime_imports.classes import import_block_structure_transformers, import_external_id @@ -27,6 +28,7 @@ from openassessment.assessment.models import Assessment, AssessmentFeedback, AssessmentPart from openassessment.fileupload.api import get_download_url from openassessment.workflow.models import AssessmentWorkflow, TeamAssessmentWorkflow +from openassessment.assessment.score_type_constants import PEER_TYPE, SELF_TYPE, STAFF_TYPE logger = logging.getLogger(__name__) @@ -1577,3 +1579,122 @@ def get_file_uploads(self, missing_blank=False): files.append(file_upload) self.file_uploads = files return self.file_uploads + + +def score_type_to_string(score_type): + """ + Converts the given score type into its string representation. + """ + SCORE_TYPE_MAP = { + PEER_TYPE: "Peer", + SELF_TYPE: "Self", + STAFF_TYPE: "Staff", + } + return SCORE_TYPE_MAP.get(score_type, "Unknown") + +def parts_summary(assessment_obj): + """ + Retrieves a summary of the parts from a given assessment object. + """ + return [ + { + 'type': part.criterion.name, + 'score': part.points_earned, + 'score_type': part.option.name if part.option else "None", + } + for part in assessment_obj.parts.all() + ] + +def get_scorer_data(anonymous_scorer_id): + """ + Retrieves the grader's data (full name, username, and email) based on their anonymous ID. + """ + scorer_username = map_anonymized_ids_to_usernames([anonymous_scorer_id]).get(anonymous_scorer_id, "Unknown") + scorer_name = map_anonymized_ids_to_fullname([anonymous_scorer_id]).get(anonymous_scorer_id, "Unknown") + scorer_email = map_anonymized_ids_to_emails([anonymous_scorer_id]).get(anonymous_scorer_id, "Unknown") + return scorer_name, scorer_username, scorer_email + +def generate_assessment_data(assessment_list): + results = [] + for assessment in assessment_list: + + scorer_name, scorer_username, scorer_email = get_scorer_data(assessment.scorer_id) + + assessment_data = { + "idAssessment": str(assessment.id), + "grader_name": scorer_name, + "grader_username": scorer_username, + "grader_email": scorer_email, + "assesmentDate": assessment.scored_at.strftime('%d-%m-%Y'), + "assesmentScores": parts_summary(assessment), + "problemStep": score_type_to_string(assessment.score_type), + "feedback": assessment.feedback or '' + } + + results.append(assessment_data) + return results + +def generate_received_assessment_data(submission_uuid=None): + """ + Generates a list of received assessments data based on the submission UUID. + + Args: + submission_uuid (str, optional): The UUID of the submission. Defaults to None. + + Returns: + list[dict]: A list containing assessment data dictionaries. + """ + + results = [] + + submission = None + if submission_uuid: + submission = sub_api.get_submission_and_student(submission_uuid) + + if not submission: + return results + + assessments = _use_read_replica( + Assessment.objects.prefetch_related('parts'). + prefetch_related('rubric'). + filter( + submission_uuid=submission['uuid'] + ) + ) + return generate_assessment_data(assessments) + + +def generate_given_assessment_data(item_id=None, submission_uuid=None): + """ + Generates a list of given assessments data based on the submission UUID as scorer. + + Args: + submission_uuid (str, optional): The UUID of the submission. Defaults to None. + + Returns: + list[dict]: A list containing assessment data dictionaries. + """ + results = [] + # Getting the scorer student id + primary_submission = sub_api.get_submission_and_student(submission_uuid) + + if not primary_submission: + return results + + scorer_student_id = primary_submission['student_item']['student_id'] + submissions = None + if item_id: + submissions = Submission.objects.filter(student_item__item_id=item_id).values('uuid') + submission_uuids = [sub['uuid'] for sub in submissions] + + if not submission_uuids or not submissions: + return results + + # Now fetch all assessments made by this student for these submissions + assessments_made_by_student = _use_read_replica( + Assessment.objects.prefetch_related('parts') + .prefetch_related('rubric') + .filter(scorer_id=scorer_student_id, submission_uuid__in=submission_uuids) + ) + + return generate_assessment_data(assessments_made_by_student) \ No newline at end of file diff --git a/openassessment/staffgrader/staff_grader_mixin.py b/openassessment/staffgrader/staff_grader_mixin.py index 639e3ddda1..3895b58219 100644 --- a/openassessment/staffgrader/staff_grader_mixin.py +++ b/openassessment/staffgrader/staff_grader_mixin.py @@ -16,7 +16,7 @@ from openassessment.assessment.models.base import Assessment, AssessmentPart from openassessment.assessment.models.staff import StaffWorkflow, TeamStaffWorkflow from openassessment.data import (OraSubmissionAnswerFactory, VersionNotFoundException, map_anonymized_ids_to_emails, - map_anonymized_ids_to_fullname, map_anonymized_ids_to_usernames) + map_anonymized_ids_to_fullname, map_anonymized_ids_to_usernames, generate_received_assessment_data, generate_given_assessment_data) from openassessment.staffgrader.errors.submission_lock import SubmissionLockContestedError from openassessment.staffgrader.models.submission_lock import SubmissionGradingLock from openassessment.staffgrader.serializers import ( @@ -194,6 +194,39 @@ def list_staff_workflows(self, data, suffix=''): # pylint: disable=unused-argum log.exception("Failed to serialize workflow %d: %s", staff_workflow.id, str(e), exc_info=True) return result + + + @XBlock.json_handler + @require_course_staff("STUDENT_GRADE") + def list_assessments_grades(self, data, suffix=''): # pylint: disable=unused-argument + """ + List the assessments' grades based on the type (received or given) for a specific submission. + + Args: + data (dict): Contains the necessary information to fetch the assessments. + - 'item_id': The ID of the xblock/item. + - 'submission_uuid': The UUID of the submission. + - 'assessment_type': A string, either "received" or any other value + to determine the type of assessments to retrieve. + + Returns: + list[dict]: A list of dictionaries, each representing an assessment's data. + + Note: + - If 'assessment_type' is "received", the function fetches assessments received + for the given 'submission_uuid'. + - For any other value of 'assessment_type', the function fetches assessments + given by the owner of the 'submission_uuid' for other submissions in the same item. + """ + item_id = data['item_id'] + subission_uuid = data['submission_uuid'] + + if data['assessment_type'] == "received": + return generate_received_assessment_data(subission_uuid) + else: + return generate_given_assessment_data(item_id, subission_uuid) + + def _get_list_workflows_serializer_context(self, staff_workflows, is_team_assignment=False): """ Fetch additional required data and models to serialize the response From 9e5b92b272ae13a7de2984f674b992313669cd7d Mon Sep 17 00:00:00 2001 From: Fernando Date: Sat, 21 Oct 2023 16:15:39 -0400 Subject: [PATCH 3/4] refactor: map_anonymized_ids_to_user_data for optimized user data retrieval. --- openassessment/data.py | 99 +++++++++---------- .../staffgrader/staff_grader_mixin.py | 53 ++++------ .../tests/test_list_staff_workflows.py | 38 +++++-- 3 files changed, 94 insertions(+), 96 deletions(-) diff --git a/openassessment/data.py b/openassessment/data.py index c6593a8746..fe35591f0e 100644 --- a/openassessment/data.py +++ b/openassessment/data.py @@ -97,48 +97,34 @@ def map_anonymized_ids_to_usernames(anonymized_ids): return anonymous_id_to_username_mapping -def map_anonymized_ids_to_emails(anonymized_ids): +def map_anonymized_ids_to_user_data(anonymized_ids): """ Args: anonymized_ids - list of anonymized user ids. Returns: - dictionary, that contains mapping between anonymized user ids and - actual user emails. - """ - User = get_user_model() - - users = _use_read_replica( - User.objects.filter(anonymoususerid__anonymous_user_id__in=anonymized_ids) - .annotate(anonymous_id=F("anonymoususerid__anonymous_user_id")) - .values("email", "anonymous_id") - ) - anonymous_id_to_email_mapping = { - user["anonymous_id"]: user["email"] for user in users - } - return anonymous_id_to_email_mapping - - -def map_anonymized_ids_to_fullname(anonymized_ids): - """ - Args: - anonymized_ids - list of anonymized user ids. - Returns: - dictionary, that contains mapping between anonymized user ids and - actual user fullname. - """ + dict { + : { + 'email': (str) + 'username': (str) + 'fullname': (str) + } + } + """ User = get_user_model() - users = _use_read_replica( User.objects.filter(anonymoususerid__anonymous_user_id__in=anonymized_ids) - .select_related("profile") + .select_related("profile") # Optional, based on performance testing .annotate(anonymous_id=F("anonymoususerid__anonymous_user_id")) - .values("profile__name", "anonymous_id") - ) - - anonymous_id_to_fullname_mapping = { - user["anonymous_id"]: user["profile__name"] for user in users + ).values("username", "email", "profile__name", "anonymous_id") + + anonymous_id_to_user_info_mapping = { + user["anonymous_id"]: { + "username": user["username"], + "email": user["email"], + "fullname": user["profile__name"] + } for user in users } - return anonymous_id_to_fullname_mapping + return anonymous_id_to_user_info_mapping class CsvWriter: """ @@ -1598,36 +1584,37 @@ def parts_summary(assessment_obj): """ return [ { - 'type': part.criterion.name, - 'score': part.points_earned, + 'criterion_name': part.criterion.name, + 'score_earned': part.points_earned, 'score_type': part.option.name if part.option else "None", } for part in assessment_obj.parts.all() ] -def get_scorer_data(anonymous_scorer_id): +def get_scorer_data(anonymous_scorer_id, user_data_mapping): """ - Retrieves the grader's data (full name, username, and email) based on their anonymous ID. + Retrieves the scorer's data (full name, username, and email) based on their anonymous ID. """ - scorer_username = map_anonymized_ids_to_usernames([anonymous_scorer_id]).get(anonymous_scorer_id, "Unknown") - scorer_name = map_anonymized_ids_to_fullname([anonymous_scorer_id]).get(anonymous_scorer_id, "Unknown") - scorer_email = map_anonymized_ids_to_emails([anonymous_scorer_id]).get(anonymous_scorer_id, "Unknown") + scorer_data = user_data_mapping.get(anonymous_scorer_id, {}) + scorer_name = scorer_data.get('fullname', "Unknown") + scorer_username = scorer_data.get('username', "Unknown") + scorer_email = scorer_data.get('email', "Unknown") return scorer_name, scorer_username, scorer_email -def generate_assessment_data(assessment_list): +def generate_assessment_data(assessment_list, user_data_mapping): results = [] for assessment in assessment_list: - scorer_name, scorer_username, scorer_email = get_scorer_data(assessment.scorer_id) + scorer_name, scorer_username, scorer_email = get_scorer_data(assessment.scorer_id, user_data_mapping) assessment_data = { - "idAssessment": str(assessment.id), - "grader_name": scorer_name, - "grader_username": scorer_username, - "grader_email": scorer_email, - "assesmentDate": assessment.scored_at.strftime('%d-%m-%Y'), - "assesmentScores": parts_summary(assessment), - "problemStep": score_type_to_string(assessment.score_type), + "id_assessment": str(assessment.id), + "scorer_name": scorer_name, + "scorer_username": scorer_username, + "scorer_email": scorer_email, + "assesment_date": assessment.scored_at.strftime('%d-%m-%Y'), + "assesment_scores": parts_summary(assessment), + "problem_step": score_type_to_string(assessment.score_type), "feedback": assessment.feedback or '' } @@ -1661,7 +1648,8 @@ def generate_received_assessment_data(submission_uuid=None): submission_uuid=submission['uuid'] ) ) - return generate_assessment_data(assessments) + user_data_mapping = map_anonymized_ids_to_user_data([assessment.scorer_id for assessment in assessments]) + return generate_assessment_data(assessments, user_data_mapping) def generate_given_assessment_data(item_id=None, submission_uuid=None): @@ -1676,12 +1664,12 @@ def generate_given_assessment_data(item_id=None, submission_uuid=None): """ results = [] # Getting the scorer student id - primary_submission = sub_api.get_submission_and_student(submission_uuid) + scorer_submission = sub_api.get_submission_and_student(submission_uuid) - if not primary_submission: + if not scorer_submission: return results - scorer_student_id = primary_submission['student_item']['student_id'] + scorer_id = scorer_submission['student_item']['student_id'] submissions = None if item_id: submissions = Submission.objects.filter(student_item__item_id=item_id).values('uuid') @@ -1694,7 +1682,8 @@ def generate_given_assessment_data(item_id=None, submission_uuid=None): assessments_made_by_student = _use_read_replica( Assessment.objects.prefetch_related('parts') .prefetch_related('rubric') - .filter(scorer_id=scorer_student_id, submission_uuid__in=submission_uuids) + .filter(scorer_id=scorer_id, submission_uuid__in=submission_uuids) ) - return generate_assessment_data(assessments_made_by_student) \ No newline at end of file + user_data_mapping = map_anonymized_ids_to_user_data([assessment.scorer_id for assessment in assessments_made_by_student]) + return generate_assessment_data(assessments_made_by_student, user_data_mapping) \ No newline at end of file diff --git a/openassessment/staffgrader/staff_grader_mixin.py b/openassessment/staffgrader/staff_grader_mixin.py index 3895b58219..cc3b001611 100644 --- a/openassessment/staffgrader/staff_grader_mixin.py +++ b/openassessment/staffgrader/staff_grader_mixin.py @@ -15,8 +15,13 @@ from openassessment.assessment.models.base import Assessment, AssessmentPart from openassessment.assessment.models.staff import StaffWorkflow, TeamStaffWorkflow -from openassessment.data import (OraSubmissionAnswerFactory, VersionNotFoundException, map_anonymized_ids_to_emails, - map_anonymized_ids_to_fullname, map_anonymized_ids_to_usernames, generate_received_assessment_data, generate_given_assessment_data) +from openassessment.data import ( + OraSubmissionAnswerFactory, + VersionNotFoundException, + map_anonymized_ids_to_user_data, + generate_received_assessment_data, + generate_given_assessment_data +) from openassessment.staffgrader.errors.submission_lock import SubmissionLockContestedError from openassessment.staffgrader.models.submission_lock import SubmissionGradingLock from openassessment.staffgrader.serializers import ( @@ -198,7 +203,7 @@ def list_staff_workflows(self, data, suffix=''): # pylint: disable=unused-argum @XBlock.json_handler @require_course_staff("STUDENT_GRADE") - def list_assessments_grades(self, data, suffix=''): # pylint: disable=unused-argument + def list_assessments(self, data, suffix=''): # pylint: disable=unused-argument """ List the assessments' grades based on the type (received or given) for a specific submission. @@ -239,7 +244,8 @@ def _get_list_workflows_serializer_context(self, staff_workflows, is_team_assign workflow_scorer_ids.add(workflow.scorer_id) course_id = self.get_student_item_dict()['course_id'] - # Fetch user identifier mappings + context = {} + if is_team_assignment: # Look up the team IDs for submissions so we can later map to team names team_submission_uuid_to_team_id = get_team_ids_by_team_submission_uuid(submission_uuids) @@ -247,45 +253,26 @@ def _get_list_workflows_serializer_context(self, staff_workflows, is_team_assign # Look up names for teams topic_id = self.selected_teamset_id team_id_to_team_name = self.teams_service.get_team_names(course_id, topic_id) - - # Do bulk lookup for scorer anonymous ids (submitting team name is a separate lookup) - anonymous_id_to_username = map_anonymized_ids_to_usernames( - set(workflow_scorer_ids) - ) - anonymous_id_to_email = map_anonymized_ids_to_emails( - set(workflow_scorer_ids) - ) - anonymous_id_to_fullname = map_anonymized_ids_to_fullname( - set(workflow_scorer_ids) - ) - - context = { + context.update({ 'team_submission_uuid_to_team_id': team_submission_uuid_to_team_id, 'team_id_to_team_name': team_id_to_team_name, - } + }) else: # When we look up usernames we want to include all connected learner student ids submission_uuid_to_student_id = get_student_ids_by_submission_uuid( course_id, submission_uuids, ) + context['submission_uuid_to_student_id'] = submission_uuid_to_student_id - # Do bulk lookup for all anonymous ids (submitters and scoreres). This is used for the - # `gradedBy` and `username` fields - anonymous_id_to_username = map_anonymized_ids_to_usernames( - set(submission_uuid_to_student_id.values()) | workflow_scorer_ids - ) - anonymous_id_to_email = map_anonymized_ids_to_emails( - set(submission_uuid_to_student_id.values()) | workflow_scorer_ids - ) - - anonymous_id_to_fullname = map_anonymized_ids_to_fullname( - set(submission_uuid_to_student_id.values()) | workflow_scorer_ids - ) + all_anonymous_ids = set(workflow_scorer_ids) + if not is_team_assignment: + all_anonymous_ids |= set(context['submission_uuid_to_student_id'].values()) - context = { - 'submission_uuid_to_student_id': submission_uuid_to_student_id, - } + anonymous_id_to_user_data = map_anonymized_ids_to_user_data(all_anonymous_ids) + anonymous_id_to_username = {key: val["username"] for key, val in anonymous_id_to_user_data.items()} + anonymous_id_to_email = {key: val["email"] for key, val in anonymous_id_to_user_data.items()} + anonymous_id_to_fullname = {key: val["fullname"] for key, val in anonymous_id_to_user_data.items()} # Do a bulk fetch of the assessments linked to the workflows, including all connected # Rubric, Criteria, and Option models diff --git a/openassessment/staffgrader/tests/test_list_staff_workflows.py b/openassessment/staffgrader/tests/test_list_staff_workflows.py index 7d2dff05e9..151cb7015a 100644 --- a/openassessment/staffgrader/tests/test_list_staff_workflows.py +++ b/openassessment/staffgrader/tests/test_list_staff_workflows.py @@ -154,6 +154,28 @@ def _mock_map_anonymized_ids_to_usernames(self): ) as patched_map: yield patched_map + @contextmanager + def _mock_map_anonymized_ids_to_user_data(self): + """ + Context manager that patches map_anonymized_ids_to_user_data and returns a mapping from student IDs to a dictionary + containing username, email, and fullname. + """ + # Creamos un diccionario simulado que represente lo que la función real devuelve + mock_data = { + student_id: { + "username": self.student_id_to_username_map.get(student_id, ""), + "email": "mocked_email@example.com", # O quizás puedes usar un mapa similar para emails si es necesario + "fullname": "Mocked Full Name" # O quizás puedes usar un mapa similar para nombres completos si es necesario + } + for student_id in self.student_id_to_username_map + } + + with patch( + 'openassessment.staffgrader.staff_grader_mixin.map_anonymized_ids_to_user_data', + return_value=mock_data + ) as patched_map: + yield patched_map + def submit_staff_assessment(self, xblock, student, grader, option, option_2=None): """ Helper method to submit a staff assessment @@ -601,20 +623,18 @@ def test_bulk_fetch_annotated_staff_workflows(self, xblock, set_up_grades, set_u def test_get_list_workflows_serializer_context(self, xblock): """ Unit test for _get_list_workflows_serializer_context """ self.set_staff_user(xblock) - # Set up the mock return_value for bulk_deep_fetch_assessments. - # submissions 0 and 3 are the only ones assessed + mock_staff_workflows = [ Mock(scorer_id=self.course_staff[1].student_id), Mock(assessment=None, scorer_id=None), Mock(assessment=None, scorer_id=None), Mock(scorer_id=self.course_staff[2].student_id), ] + with self._mock_get_student_ids_by_submission_uuid() as mock_get_student_ids: - # with self._mock_get_team_ids_by_team_submission_uuid() as mock_get_team_ids: - with self._mock_map_anonymized_ids_to_usernames() as mock_map_ids: + with self._mock_map_anonymized_ids_to_user_data() as mock_map_data: # Cambio importante aquí with patch.object(xblock, 'bulk_deep_fetch_assessments') as mock_bulk_fetch_assessments: - # pylint: disable=protected-access - context = xblock._get_list_workflows_serializer_context(mock_staff_workflows) + context = xblock._get_list_workflows_serializer_context(mock_staff_workflows) # pylint: disable=protected-access mock_get_student_ids.assert_called_once_with( self.course_id, @@ -627,12 +647,14 @@ def test_get_list_workflows_serializer_context(self, xblock): expected_anonymous_id_lookups.update( {self.course_staff[1].student_id, self.course_staff[2].student_id} ) - mock_map_ids.assert_called_once_with(expected_anonymous_id_lookups) + mock_map_data.assert_called_once_with(expected_anonymous_id_lookups) # Cambio importante aquí mock_bulk_fetch_assessments.assert_called_once_with(mock_staff_workflows) expected_context = { 'submission_uuid_to_student_id': mock_get_student_ids.return_value, - 'anonymous_id_to_username': mock_map_ids.return_value, + 'anonymous_id_to_username': {k: v["username"] for k, v in mock_map_data.return_value.items()}, + 'anonymous_id_to_email': {k: v["email"] for k, v in mock_map_data.return_value.items()}, + 'anonymous_id_to_fullname': {k: v["fullname"] for k, v in mock_map_data.return_value.items()}, 'submission_uuid_to_assessment': mock_bulk_fetch_assessments.return_value, } From 6c9079d157afeda20021f702f275911d55c00312 Mon Sep 17 00:00:00 2001 From: Fernando Date: Tue, 24 Oct 2023 23:00:09 -0400 Subject: [PATCH 4/4] fix: style and docs improvemenet --- openassessment/data.py | 96 +++++++++++++------ .../staffgrader/staff_grader_mixin.py | 13 ++- 2 files changed, 74 insertions(+), 35 deletions(-) diff --git a/openassessment/data.py b/openassessment/data.py index fe35591f0e..9768c3c492 100644 --- a/openassessment/data.py +++ b/openassessment/data.py @@ -113,7 +113,7 @@ def map_anonymized_ids_to_user_data(anonymized_ids): User = get_user_model() users = _use_read_replica( User.objects.filter(anonymoususerid__anonymous_user_id__in=anonymized_ids) - .select_related("profile") # Optional, based on performance testing + .select_related("profile") .annotate(anonymous_id=F("anonymoususerid__anonymous_user_id")) ).values("username", "email", "profile__name", "anonymous_id") @@ -1570,6 +1570,12 @@ def get_file_uploads(self, missing_blank=False): def score_type_to_string(score_type): """ Converts the given score type into its string representation. + + Args: + score_type : System representation of the score type. + + Returns: + : string representation of score_type as needed in Staff Grader Template. """ SCORE_TYPE_MAP = { PEER_TYPE: "Peer", @@ -1581,6 +1587,12 @@ def score_type_to_string(score_type): def parts_summary(assessment_obj): """ Retrieves a summary of the parts from a given assessment object. + + Args: + assessment_obj: assessment object. + + Returns: + list[dict]: A list containing assessment parts summary data dictionaries. """ return [ { @@ -1594,21 +1606,53 @@ def parts_summary(assessment_obj): def get_scorer_data(anonymous_scorer_id, user_data_mapping): """ Retrieves the scorer's data (full name, username, and email) based on their anonymous ID. + + Args: + anonymous_scorer_id : Scorer's anonymous_user_id. + user_data_mapping: User data by anonymous_user_id + dict { + : { + 'email': (str) + 'username': (str) + 'fullname': (str) + } + } + + Returns: + fullname, username, email : user data values. """ scorer_data = user_data_mapping.get(anonymous_scorer_id, {}) - scorer_name = scorer_data.get('fullname', "Unknown") - scorer_username = scorer_data.get('username', "Unknown") - scorer_email = scorer_data.get('email', "Unknown") - return scorer_name, scorer_username, scorer_email + return ( + scorer_data.get('fullname', ""), + scorer_data.get('username', ""), + scorer_data.get('email', "") + ) def generate_assessment_data(assessment_list, user_data_mapping): - results = [] + """ + Creates the list of Assessment's data dictionaries. + + Args: + assessment_list: assessment objects queryset. + user_data_mapping: User data by anonymous_user_id + dict { + : { + 'email': (str) + 'username': (str) + 'fullname': (str) + } + } + + Returns: + list[dict]: A list containing assessment data dictionaries. + """ + assessment_data_list = [] for assessment in assessment_list: scorer_name, scorer_username, scorer_email = get_scorer_data(assessment.scorer_id, user_data_mapping) - assessment_data = { - "id_assessment": str(assessment.id), + assessment_data_list.append({ + "assessment_id": str(assessment.id), "scorer_name": scorer_name, "scorer_username": scorer_username, "scorer_email": scorer_email, @@ -1616,12 +1660,10 @@ def generate_assessment_data(assessment_list, user_data_mapping): "assesment_scores": parts_summary(assessment), "problem_step": score_type_to_string(assessment.score_type), "feedback": assessment.feedback or '' - } + }) + return assessment_data_list - results.append(assessment_data) - return results - -def generate_received_assessment_data(submission_uuid=None): +def generate_received_assessment_data(submission_uuid): """ Generates a list of received assessments data based on the submission UUID. @@ -1632,14 +1674,11 @@ def generate_received_assessment_data(submission_uuid=None): list[dict]: A list containing assessment data dictionaries. """ - results = [] - submission = None - if submission_uuid: - submission = sub_api.get_submission_and_student(submission_uuid) + submission = sub_api.get_submission_and_student(submission_uuid) if not submission: - return results + return [] assessments = _use_read_replica( Assessment.objects.prefetch_related('parts'). @@ -1652,33 +1691,30 @@ def generate_received_assessment_data(submission_uuid=None): return generate_assessment_data(assessments, user_data_mapping) -def generate_given_assessment_data(item_id=None, submission_uuid=None): +def generate_given_assessment_data(item_id, submission_uuid): """ Generates a list of given assessments data based on the submission UUID as scorer. Args: - submission_uuid (str, optional): The UUID of the submission. Defaults to None. + item_id (str): The ID of the item. + submission_uuid (str): The UUID of the submission. Returns: list[dict]: A list containing assessment data dictionaries. """ - results = [] - # Getting the scorer student id scorer_submission = sub_api.get_submission_and_student(submission_uuid) if not scorer_submission: - return results + return [] scorer_id = scorer_submission['student_item']['student_id'] - submissions = None - if item_id: - submissions = Submission.objects.filter(student_item__item_id=item_id).values('uuid') - submission_uuids = [sub['uuid'] for sub in submissions] + + submissions = Submission.objects.filter(student_item__item_id=item_id).values('uuid') + submission_uuids = [sub['uuid'] for sub in submissions] if not submission_uuids or not submissions: - return results + return [] - # Now fetch all assessments made by this student for these submissions assessments_made_by_student = _use_read_replica( Assessment.objects.prefetch_related('parts') .prefetch_related('rubric') @@ -1686,4 +1722,4 @@ def generate_given_assessment_data(item_id=None, submission_uuid=None): ) user_data_mapping = map_anonymized_ids_to_user_data([assessment.scorer_id for assessment in assessments_made_by_student]) - return generate_assessment_data(assessments_made_by_student, user_data_mapping) \ No newline at end of file + return generate_assessment_data(assessments_made_by_student, user_data_mapping) diff --git a/openassessment/staffgrader/staff_grader_mixin.py b/openassessment/staffgrader/staff_grader_mixin.py index cc3b001611..b8b7709201 100644 --- a/openassessment/staffgrader/staff_grader_mixin.py +++ b/openassessment/staffgrader/staff_grader_mixin.py @@ -211,25 +211,28 @@ def list_assessments(self, data, suffix=''): # pylint: disable=unused-argument data (dict): Contains the necessary information to fetch the assessments. - 'item_id': The ID of the xblock/item. - 'submission_uuid': The UUID of the submission. - - 'assessment_type': A string, either "received" or any other value + - 'assessment_filter': A string, either "received" or any other value to determine the type of assessments to retrieve. Returns: list[dict]: A list of dictionaries, each representing an assessment's data. Note: - - If 'assessment_type' is "received", the function fetches assessments received + - If 'assessment_filter' is "received", the function fetches assessments received for the given 'submission_uuid'. - - For any other value of 'assessment_type', the function fetches assessments + - For any other value of 'assessment_filter', the function fetches assessments given by the owner of the 'submission_uuid' for other submissions in the same item. """ item_id = data['item_id'] subission_uuid = data['submission_uuid'] + filter_value = data['assessment_filter'] - if data['assessment_type'] == "received": + if filter_value == "received": return generate_received_assessment_data(subission_uuid) - else: + elif filter_value == "given": return generate_given_assessment_data(item_id, subission_uuid) + else: + raise ValueError("Invalid assessment_filter value") def _get_list_workflows_serializer_context(self, staff_workflows, is_team_assignment=False):