Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions cms/templates/js/show-correctness-editor.underscore
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,13 @@
<% } %>
<%- gettext('If the subsection does not have a due date, learners always see their scores when they submit answers to assessments.') %>
</p>
<label class="label">
<input class="input input-radio" name="show-correctness" type="radio" value="never_but_include_grade" aria-describedby="never_show_correctness_but_include_grade_description" />
<%- gettext('Never show individual assessment results, but show overall assessment results after due date') %>
</label>
<p class='field-message' id='never_show_correctness_description'>
<%- gettext('Learners do not see question-level correctness or scores before or after the due date. However, once the due date passes, they can see their overall score for the subsection on the Progress page.') %>
</p>
</div>
</div>
</form>
212 changes: 212 additions & 0 deletions lms/djangoapps/course_home_api/progress/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,226 @@
Python APIs exposed for the progress tracking functionality of the course home API.
"""

from __future__ import annotations

from django.contrib.auth import get_user_model
from opaque_keys.edx.keys import CourseKey
from openedx.core.lib.grade_utils import round_away_from_zero
from xmodule.graders import ShowCorrectness
from datetime import datetime, timezone

from lms.djangoapps.courseware.courses import get_course_blocks_completion_summary
from dataclasses import dataclass, field

User = get_user_model()


@dataclass
class _AssignmentBucket:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Question: Do we use the terminology "bucket" elsewhere in the grading code? I don't hate the name, but I don't want to add a convention if we already have something else we call this, say on the frontend.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

On the frontend, the calculation logic primarily resides in these two functions:
https://github.com/openedx/frontend-app-learning/pull/1797/files#diff-4d3a6d853ff9fd1354a56024f06c6718c856c281e295de92c5b09ac1e86774bfL6-L92

Initially, I tried to replicate the exact calculation flow from the frontend, but it became difficult to follow due to the complexity of the logic. To improve readability and maintainability, I introduced the _AssignmentBucket class to make the code more modular.

We can definitely consider renaming it if “bucket” doesn’t feel intuitive, for example, something like AssignmentDetails might be clearer?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's fine, we can keep the terminology for now. Thank you.

"""Holds scores and visibility info for one assignment type.

Attributes:
assignment_type: Full assignment type name from the grading policy (for example, "Homework").
num_total: The total number of assignments expected to contribute to the grade before any
drop-lowest rules are applied.
last_grade_publish_date: The most recent date when grades for all assignments of assignment_type
are released and included in the final grade.
scores: Per-subsection fractional scores (each value is ``earned / possible`` and falls in
the range 0–1). While awaiting published content we pad the list with zero placeholders
so that its length always matches ``num_total`` until real scores replace them.
visibilities: Mirrors ``scores`` index-for-index and records whether each subsection's
correctness feedback is visible to the learner (``True``), hidden (``False``), or not
yet populated (``None`` when the entry is a placeholder).
included: Tracks whether each subsection currently counts toward the learner's grade as
determined by ``SubsectionGrade.show_grades``. Values follow the same convention as
``visibilities`` (``True`` / ``False`` / ``None`` placeholders).
assignments_created: Count of real subsections inserted into the bucket so far. Once this
reaches ``num_total``, all placeholder entries have been replaced with actual data.
"""
assignment_type: str
num_total: int
last_grade_publish_date: datetime
scores: list[float] = field(default_factory=list)
visibilities: list[bool | None] = field(default_factory=list)
included: list[bool | None] = field(default_factory=list)
assignments_created: int = 0

@classmethod
def with_placeholders(cls, assignment_type: str, num_total: int, now: datetime):
"""Create a bucket prefilled with placeholder (empty) entries."""
return cls(
assignment_type=assignment_type,
num_total=num_total,
last_grade_publish_date=now,
scores=[0] * num_total,
visibilities=[None] * num_total,
included=[None] * num_total,
)

def add_subsection(self, score: float, is_visible: bool, is_included: bool):
"""Add a subsection’s score and visibility, replacing a placeholder if space remains."""
if self.assignments_created < self.num_total:
if self.scores:
self.scores.pop(0)
if self.visibilities:
self.visibilities.pop(0)
if self.included:
self.included.pop(0)
self.scores.append(score)
self.visibilities.append(is_visible)
self.included.append(is_included)
self.assignments_created += 1

def drop_lowest(self, num_droppable: int):
"""Remove the lowest scoring subsections, up to the provided num_droppable."""
while num_droppable > 0 and self.scores:
idx = self.scores.index(min(self.scores))
self.scores.pop(idx)
self.visibilities.pop(idx)
self.included.pop(idx)
num_droppable -= 1

def hidden_state(self) -> str:
"""Return whether kept scores are all, some, or none hidden."""
if not self.visibilities:
return 'none'
all_hidden = all(v is False for v in self.visibilities)
some_hidden = any(v is False for v in self.visibilities)
if all_hidden:
return 'all'
if some_hidden:
return 'some'
return 'none'
Comment on lines +86 to +94
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we create constants for all these states?


def averages(self) -> tuple[float, float]:
"""Compute visible and included averages over kept scores.

Visible average uses only grades with visibility flag True in numerator; denominator is total
number of kept scores (mirrors legacy behavior). Included average uses only scores that are
marked included (show_grades True) in numerator with same denominator.

Returns:
(earned_visible, earned_all) tuple of floats (0-1 each).
"""
if not self.scores:
return 0.0, 0.0
visible_scores = [s for i, s in enumerate(self.scores) if self.visibilities[i]]
included_scores = [s for i, s in enumerate(self.scores) if self.included[i]]
earned_visible = (sum(visible_scores) / len(self.scores)) if self.scores else 0.0
earned_all = (sum(included_scores) / len(self.scores)) if self.scores else 0.0
return earned_visible, earned_all


class _AssignmentTypeGradeAggregator:
"""Collects and aggregates subsection grades by assignment type."""

def __init__(self, course_grade, grading_policy: dict, has_staff_access: bool):
"""Initialize with course grades, grading policy, and staff access flag."""
self.course_grade = course_grade
self.grading_policy = grading_policy
self.has_staff_access = has_staff_access
self.now = datetime.now(timezone.utc)
self.policy_map = self._build_policy_map()
self.buckets: dict[str, _AssignmentBucket] = {}

def _build_policy_map(self) -> dict:
"""Convert grading policy into a lookup of assignment type → policy info."""
policy_map = {}
for policy in self.grading_policy.get('GRADER', []):
policy_map[policy.get('type')] = {
'weight': policy.get('weight', 0.0),
'short_label': policy.get('short_label', ''),
'num_droppable': policy.get('drop_count', 0),
'num_total': policy.get('min_count', 0),
}
return policy_map

def _bucket_for(self, assignment_type: str) -> _AssignmentBucket:
"""Get or create a score bucket for the given assignment type."""
bucket = self.buckets.get(assignment_type)
if bucket is None:
num_total = self.policy_map.get(assignment_type, {}).get('num_total', 0) or 0
bucket = _AssignmentBucket.with_placeholders(assignment_type, num_total, self.now)
self.buckets[assignment_type] = bucket
return bucket

def collect(self):
"""Gather subsection grades into their respective assignment buckets."""
for chapter in self.course_grade.chapter_grades.values():
for subsection_grade in chapter.get('sections', []):
if not getattr(subsection_grade, 'graded', False):
continue
assignment_type = getattr(subsection_grade, 'format', '') or ''
if not assignment_type:
continue
graded_total = getattr(subsection_grade, 'graded_total', None)
earned = getattr(graded_total, 'earned', 0.0) if graded_total else 0.0
possible = getattr(graded_total, 'possible', 0.0) if graded_total else 0.0
earned = 0.0 if earned is None else earned
possible = 0.0 if possible is None else possible
score = (earned / possible) if possible else 0.0
is_visible = ShowCorrectness.correctness_available(
subsection_grade.show_correctness, subsection_grade.due, self.has_staff_access
)
is_included = subsection_grade.show_grades(self.has_staff_access)
bucket = self._bucket_for(assignment_type)
bucket.add_subsection(score, is_visible, is_included)
visibilities_with_due_dates = [ShowCorrectness.PAST_DUE, ShowCorrectness.NEVER_BUT_INCLUDE_GRADE]
if subsection_grade.show_correctness in visibilities_with_due_dates:
if subsection_grade.due and subsection_grade.due > bucket.last_grade_publish_date:
bucket.last_grade_publish_date = subsection_grade.due

def build_results(self) -> dict:
"""Apply drops, compute averages, and return aggregated results and total grade."""
final_grades = 0.0
rows = []
for assignment_type, bucket in self.buckets.items():
policy = self.policy_map.get(assignment_type, {})
bucket.drop_lowest(policy.get('num_droppable', 0))
earned_visible, earned_all = bucket.averages()
weight = policy.get('weight', 0.0)
short_label = policy.get('short_label', '')
row = {
'type': assignment_type,
'weight': weight,
'average_grade': round_away_from_zero(earned_visible, 4),
'weighted_grade': round_away_from_zero(earned_visible * weight, 4),
'short_label': short_label,
'num_droppable': policy.get('num_droppable', 0),
'last_grade_publish_date': bucket.last_grade_publish_date,
'has_hidden_contribution': bucket.hidden_state(),
}
final_grades += earned_all * weight
rows.append(row)
rows.sort(key=lambda r: r['weight'])
return {'results': rows, 'final_grades': round_away_from_zero(final_grades, 4)}

def run(self) -> dict:
"""Execute full pipeline (collect + aggregate) returning final payload."""
self.collect()
return self.build_results()


def aggregate_assignment_type_grade_summary(
course_grade,
grading_policy: dict,
has_staff_access: bool = False,
) -> dict:
"""
Aggregate subsection grades by assignment type and return summary data.
Args:
course_grade: CourseGrade object containing chapter and subsection grades.
grading_policy: Dictionary representing the course's grading policy.
has_staff_access: Boolean indicating if the user has staff access to view all grades.
Returns:
Dictionary with keys:
results: list of per-assignment-type summary dicts
final_grades: overall weighted contribution (float, 4 decimal rounding)
"""
aggregator = _AssignmentTypeGradeAggregator(course_grade, grading_policy, has_staff_access)
return aggregator.run()


def calculate_progress_for_learner_in_course(course_key: CourseKey, user: User) -> dict:
"""
Calculate a given learner's progress in the specified course run.
Expand Down
17 changes: 17 additions & 0 deletions lms/djangoapps/course_home_api/progress/serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ class SubsectionScoresSerializer(ReadOnlySerializer):
assignment_type = serializers.CharField(source='format')
block_key = serializers.SerializerMethodField()
display_name = serializers.CharField()
due = serializers.DateTimeField(allow_null=True)
has_graded_assignment = serializers.BooleanField(source='graded')
override = serializers.SerializerMethodField()
learner_has_access = serializers.SerializerMethodField()
Expand Down Expand Up @@ -127,6 +128,20 @@ class VerificationDataSerializer(ReadOnlySerializer):
status_date = serializers.DateTimeField()


class AssignmentTypeScoresSerializer(ReadOnlySerializer):
"""
Serializer for aggregated scores per assignment type.
"""
type = serializers.CharField()
weight = serializers.FloatField()
average_grade = serializers.FloatField()
weighted_grade = serializers.FloatField()
last_grade_publish_date = serializers.DateTimeField()
has_hidden_contribution = serializers.CharField()
short_label = serializers.CharField()
num_droppable = serializers.IntegerField()


class ProgressTabSerializer(VerifiedModeSerializer):
"""
Serializer for progress tab
Expand All @@ -146,3 +161,5 @@ class ProgressTabSerializer(VerifiedModeSerializer):
user_has_passing_grade = serializers.BooleanField()
verification_data = VerificationDataSerializer()
disable_progress_graph = serializers.BooleanField()
assignment_type_grade_summary = AssignmentTypeScoresSerializer(many=True)
final_grades = serializers.FloatField()
109 changes: 108 additions & 1 deletion lms/djangoapps/course_home_api/progress/tests/test_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,80 @@

from django.test import TestCase

from lms.djangoapps.course_home_api.progress.api import calculate_progress_for_learner_in_course
from lms.djangoapps.course_home_api.progress.api import (
calculate_progress_for_learner_in_course,
aggregate_assignment_type_grade_summary,
)
from xmodule.graders import ShowCorrectness
from datetime import datetime, timedelta, timezone
from types import SimpleNamespace


def _make_subsection(fmt, earned, possible, show_corr, *, due_delta_days=None):
"""Build a lightweight subsection object for testing aggregation scenarios."""
graded_total = SimpleNamespace(earned=earned, possible=possible)
due = None
if due_delta_days is not None:
due = datetime.now(timezone.utc) + timedelta(days=due_delta_days)
return SimpleNamespace(
graded=True,
format=fmt,
graded_total=graded_total,
show_correctness=show_corr,
due=due,
show_grades=lambda staff: True,
)


_AGGREGATION_SCENARIOS = [
(
'all_visible_always',
{'type': 'Homework', 'weight': 1.0, 'drop_count': 0, 'min_count': 2, 'short_label': 'HW'},
[
_make_subsection('Homework', 1, 1, ShowCorrectness.ALWAYS),
_make_subsection('Homework', 0.5, 1, ShowCorrectness.ALWAYS),
],
{'avg': 0.75, 'weighted': 0.75, 'hidden': 'none', 'final': 0.75},
),
(
'some_hidden_never_but_include',
{'type': 'Exam', 'weight': 1.0, 'drop_count': 0, 'min_count': 2, 'short_label': 'EX'},
[
_make_subsection('Exam', 1, 1, ShowCorrectness.ALWAYS),
_make_subsection('Exam', 0.5, 1, ShowCorrectness.NEVER_BUT_INCLUDE_GRADE),
],
{'avg': 0.5, 'weighted': 0.5, 'hidden': 'some', 'final': 0.75},
),
(
'all_hidden_never_but_include',
{'type': 'Quiz', 'weight': 1.0, 'drop_count': 0, 'min_count': 2, 'short_label': 'QZ'},
[
_make_subsection('Quiz', 0.4, 1, ShowCorrectness.NEVER_BUT_INCLUDE_GRADE),
_make_subsection('Quiz', 0.6, 1, ShowCorrectness.NEVER_BUT_INCLUDE_GRADE),
],
{'avg': 0.0, 'weighted': 0.0, 'hidden': 'all', 'final': 0.5},
),
(
'past_due_mixed_visibility',
{'type': 'Lab', 'weight': 1.0, 'drop_count': 0, 'min_count': 2, 'short_label': 'LB'},
[
_make_subsection('Lab', 0.8, 1, ShowCorrectness.PAST_DUE, due_delta_days=-1),
_make_subsection('Lab', 0.2, 1, ShowCorrectness.PAST_DUE, due_delta_days=+3),
],
{'avg': 0.4, 'weighted': 0.4, 'hidden': 'some', 'final': 0.5},
),
(
'drop_lowest_keeps_high_scores',
{'type': 'Project', 'weight': 1.0, 'drop_count': 2, 'min_count': 4, 'short_label': 'PR'},
[
_make_subsection('Project', 1, 1, ShowCorrectness.ALWAYS),
_make_subsection('Project', 1, 1, ShowCorrectness.ALWAYS),
_make_subsection('Project', 0, 1, ShowCorrectness.ALWAYS),
_make_subsection('Project', 0, 1, ShowCorrectness.ALWAYS),
],
{'avg': 1.0, 'weighted': 1.0, 'hidden': 'none', 'final': 1.0},
),
]


class ProgressApiTests(TestCase):
Expand Down Expand Up @@ -73,3 +146,37 @@ def test_calculate_progress_for_learner_in_course_summary_empty(self, mock_get_s

results = calculate_progress_for_learner_in_course("some_course", "some_user")
assert not results

def test_aggregate_assignment_type_grade_summary_scenarios(self):
"""
A test to verify functionality of aggregate_assignment_type_grade_summary.
1. Test visibility modes (always, never but include grade, past due)
2. Test drop-lowest behavior
3. Test weighting behavior
4. Test final grade calculation
5. Test average grade calculation
6. Test weighted grade calculation
7. Test has_hidden_contribution calculation
"""

for case_name, policy, subsections, expected in _AGGREGATION_SCENARIOS:
with self.subTest(case_name=case_name):
course_grade = SimpleNamespace(chapter_grades={'chapter': {'sections': subsections}})
grading_policy = {'GRADER': [policy]}

result = aggregate_assignment_type_grade_summary(
course_grade,
grading_policy,
has_staff_access=False,
)

assert 'results' in result and 'final_grades' in result
assert result['final_grades'] == expected['final']
assert len(result['results']) == 1

row = result['results'][0]
assert row['type'] == policy['type'], case_name
assert row['average_grade'] == expected['avg']
assert row['weighted_grade'] == expected['weighted']
assert row['has_hidden_contribution'] == expected['hidden']
assert row['num_droppable'] == policy['drop_count']
Loading
Loading