Skip to content

Commit

Permalink
Added max score to output of get_student_grade_summary_data.
Browse files Browse the repository at this point in the history
This will put the actual score and actual max score in scope for
the the return_csv function, so actual scores can be dumped.

The ultimate goal is to provide this data in the CSV dump that is
passed to Stellar via pylmod.

This is PR openedx#10395 on edX, and issue 95 on mitocw's edx fork.

https://github.com/edx/edx-platform/pull/10395

#95
  • Loading branch information
ShawnMilo authored and amir-qayyum-khan committed Feb 19, 2016
1 parent 29fbe72 commit ddc5f45
Show file tree
Hide file tree
Showing 3 changed files with 233 additions and 3 deletions.
32 changes: 32 additions & 0 deletions AUTHORS
Original file line number Diff line number Diff line change
Expand Up @@ -225,4 +225,36 @@ Alessandro Verdura <finalmente2@tin.it>
Sven Marnach <sven@marnach.net>
Richard Moch <richard.moch@gmail.com>
Albert Liang <albertliangcode@gmail.com>
<<<<<<< HEAD

=======
Pan Luo <pan.luo@ubc.ca>
Tyler Nickerson <nickersoft@gmail.com>
Vedran Karačić <vedran@edx.org>
William Ono <william.ono@ubc.ca>
Dongwook Yoon <dy252@cornell.edu>
Awais Qureshi <awais.qureshi@arbisoft.com>
Eric Fischer <efischer@edx.org>
Brian Beggs <macdiesel@gmail.com>
Bill DeRusha <bill@edx.org>
Kevin Falcone <kevin@edx.org>
Mirjam Škarica <mirjamskarica@gmail.com>
Saleem Latif <saleem@edx.org>
Julien Paillé <julien.paille@openfun.fr>
Michael Frey <mfrey@edx.org>
Hasnain Naveed <hasnain@edx.org>
J. Cliff Dyer <cdyer@edx.org>
Jamie Folsom <jfolsom@mit.edu>
George Schneeloch <gschneel@mit.edu>
Dustin Gadal <Dustin.Gadal@gmail.com>
Ibrahim Ahmed <ibrahimahmed443@gmail.com>
Robert Raposa <rraposa@edx.org>
Giovanni Di Milia <gdimilia@mit.edu>
Peter Wilkins <pwilkins@mit.edu>
Justin Abrahms <abrahms@mit.edu>
Arbab Nazar <arbab@edx.org>
Douglas Hall <dhall@edx.org>
Awais Jibran <awaisdar001@gmail.com>
Muhammad Rehan <muhammadrehan69@gmail.com>
Shawn Milochik <shawn@milochik.com>
>>>>>>> 5d88300... Added max score to output of get_student_grade_summary_data.
180 changes: 180 additions & 0 deletions lms/djangoapps/instructor/tests/test_legacy_raw_download_csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,29 @@ def test_download_raw_grades_dump(self):
'''
self.assertEqual(body, expected_csv, msg)

<<<<<<< HEAD
def test_grade_summary_data(self):
"""
Test grade summary data report generation
=======
def get_expected_grade_data(
self, get_grades=True, get_raw_scores=False,
use_offline=False, get_score_max=False):
"""
Return expected results from the get_student_grade_summary_data call
with any options selected.

Note that the kwargs accepted by get_expected_grade_data (and their
default values) must be identical to those in
get_student_grade_summary_data for this function to be accurate.
If kwargs are added or removed, or the functionality triggered by
them changes, this function must be updated to match.

If get_score_max is True, instead of a single score between 0 and 1,
the actual score and total possible are returned. For example, if the
student got one out of two possible points, the values (1, 2) will be
returned instead of 0.5.
>>>>>>> 5d88300... Added max score to output of get_student_grade_summary_data.
"""
self.answer_question()
Expand Down Expand Up @@ -104,6 +124,166 @@ def test_grade_summary_data(self):
]
}
<<<<<<< HEAD
=======
# The first five columns contain the student ID, username,
# full name, and e-mail addresses.
non_grade_columns = 5
# If the following 'if' is triggered, the
# get_student_grade_summary_data function will not return any
# grade data. Only the "non_grade_columns."
# So strip out the headers beyond the "non_grade_columns," and
# strip out all the grades in the 'data' key.
if not get_grades or use_offline:
expected_data["header"] = expected_data["header"][:non_grade_columns]
# This iterates over the lists of grades in the 'data' key
# of the return dictionary and strips out everything after
# the non_grade_columns.
for index, rec in enumerate(expected_data["data"]):
expected_data["data"][index] = rec[:non_grade_columns]
# Wipe out all data in the 'assignments' key if use_offline
# is True; no assignment data is returned.
if use_offline:
expected_data['assignments'] = []
# If get_grades is False, get_student_grade_summary_data doesn't
# even return an 'assignments' key, so delete it.
if get_grades is False:
del expected_data['assignments']
# If get_raw_scores is true, get_student_grade_summary_data returns
# the raw score per assignment. For example, the "0.3333333333333333"
# in the data above is for getting one out of three possible
# answers correct. Getting raw scores means the actual score (1) is
# return instead of: 1.0/3.0
# For some reason, this also causes it to not to return any assignments
# without attempts, so most of the headers are removed.
elif get_raw_scores:
expected_data["data"] = [
[
1, u'u1', u'username', u'view@test.com',
'', None, None, None
],
[
2, u'u2', u'username', u'view2@test.com',
'', 0.0, 1.0, 0.0
],
]
expected_data["assignments"] = [u'p3', u'p2', u'p1']
expected_data["header"] = [
u'ID', u'Username', u'Full Name', u'edX email',
u'External email', u'p3', u'p2', u'p1'
]
# Strip out the single-value float scores and replace them
# with two-tuples of actual and possible scores (see docstring).
if get_score_max:
expected_data["data"][-1][-3:] = (0.0, 1), (1.0, 1.0), (0.0, 1)
return expected_data
def test_grade_summary_data_defaults(self):
"""
Test grade summary data report generation with all default kwargs.

This test compares the output of the get_student_grade_summary_data
with a dictionary of exected values. The purpose of this test is
to ensure that future changes to the get_student_grade_summary_data
function (for example, mitocw/edx-platform #95).
"""
request = DummyRequest()
self.answer_question()
data = get_student_grade_summary_data(request, self.course)
expected_data = self.get_expected_grade_data()
self.compare_data(data, expected_data)
def test_grade_summary_data_raw_scores(self):
"""
Test grade summary data report generation with get_raw_scores True.
"""
request = DummyRequest()
self.answer_question()
data = get_student_grade_summary_data(
request, self.course, get_raw_scores=True,
)
expected_data = self.get_expected_grade_data(get_raw_scores=True)
self.compare_data(data, expected_data)
def test_grade_summary_data_no_grades(self):
"""
Test grade summary data report generation with
get_grades set to False.
"""
request = DummyRequest()
self.answer_question()
data = get_student_grade_summary_data(
request, self.course, get_grades=False
)
expected_data = self.get_expected_grade_data(get_grades=False)
# if get_grades is False, get_expected_grade_data does not
# add an "assignments" key.
self.assertNotIn("assignments", expected_data)
self.compare_data(data, expected_data)
def test_grade_summary_data_use_offline(self):
"""
Test grade summary data report generation with use_offline True.
"""
request = DummyRequest()
self.answer_question()
data = get_student_grade_summary_data(
request, self.course, use_offline=True)
expected_data = self.get_expected_grade_data(use_offline=True)
self.compare_data(data, expected_data)
def test_grade_summary_data_use_offline_and_raw_scores(self):
"""
Test grade summary data report generation with use_offline
and get_raw_scores both True.
"""
request = DummyRequest()
self.answer_question()
data = get_student_grade_summary_data(
request, self.course, use_offline=True, get_raw_scores=True
)
expected_data = self.get_expected_grade_data(
use_offline=True, get_raw_scores=True
)
self.compare_data(data, expected_data)
def test_grade_summary_data_get_score_max(self):
"""
Test grade summary data report generation with get_score_max set
to True (also requires get_raw_scores to be True).
"""
request = DummyRequest()
self.answer_question()
data = get_student_grade_summary_data(
request, self.course, use_offline=True, get_raw_scores=True,
get_score_max=True,
)
expected_data = self.get_expected_grade_data(
use_offline=True, get_raw_scores=True, get_score_max=True,
)
self.compare_data(data, expected_data)
def compare_data(self, data, expected_data):
"""
Compare the output of the get_student_grade_summary_data
function to the expected_data data.
"""

# Currently, all kwargs to get_student_grade_summary_data
# return a dictionary with the same keys, except for
# get_grades=False, which results in no 'assignments' key.
# This is explicitly checked for above in
# test_grade_summary_data_no_grades. This is a backup which
# will catch future changes.
self.assertListEqual(
expected_data.keys(),
data.keys(),
)

# Ensure the student info and assignment names are as expected.
>>>>>>> 5d88300... Added max score to output of get_student_grade_summary_data.
for key in ['assignments', 'header']:
self.assertListEqual(expected_data[key], data[key])

Expand Down
24 changes: 21 additions & 3 deletions lms/djangoapps/instructor/views/legacy.py
Original file line number Diff line number Diff line change
Expand Up @@ -674,17 +674,21 @@ def __init__(self):
self.grades = {}
self._current_row = {}

def _add_grade_to_row(self, component, score):
def _add_grade_to_row(self, component, score, possible=None):
"""Creates component if needed, and assigns score
Args:
component (str): Course component being graded
score (float): Score of student on component
possible (float): Max possible score for the component
Returns:
None
"""
component_index = self.components.setdefault(component, len(self.components))
if possible is not None:
# send a tuple instead of a single value
score = (score, possible)
self._current_row[component_index] = score

@contextmanager
Expand Down Expand Up @@ -743,6 +747,11 @@ def get_student_grade_summary_data(
data = list (one per student) of lists of data corresponding to the fields
If get_raw_scores=True, then instead of grade summaries, the raw grades for all graded modules are returned.
If get_score_max is True, two values will be returned for each grade -- the
total number of points earned and the total number of points possible. For
example, if two points are possible and one is earned, (1, 2) will be
returned instead of 0.5 (the default).
"""
course_key = course.id
enrolled_students = User.objects.filter(
Expand All @@ -769,9 +778,18 @@ def get_student_grade_summary_data(
log.debug(u'student=%s, gradeset=%s', student, gradeset)
with gtab.add_row(student.id) as add_grade:
if get_raw_scores:
# TODO (ichuang) encode Score as dict instead of as list, so score[0] -> score['earned']
# The following code calls add_grade, which is an alias
# for the add_row method on the GradeTable class. This adds
# a grade for each assignment. Depending on whether
# get_score_max is True, it will return either a single
# value as a float between 0 and 1, or a two-tuple
# containing the earned score and possible score for
# the assignment (see docstring).
for score in gradeset['raw_scores']:
add_grade(score.section, getattr(score, 'earned', score[0]))
if get_score_max is True:
add_grade(score.section, score.earned, score.possible)
else:
add_grade(score.section, score.earned)
else:
category_cnts = Counter()
progress_summary = grades._progress_summary(student, request, course)
Expand Down

0 comments on commit ddc5f45

Please sign in to comment.