Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add grading method support #299

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file modified conf/locale/es_419/LC_MESSAGES/django.mo
Binary file not shown.
44 changes: 44 additions & 0 deletions conf/locale/es_419/LC_MESSAGES/django.po
Original file line number Diff line number Diff line change
Expand Up @@ -10344,6 +10344,46 @@ msgstr ""
"Defina el número de veces que un estudiante puede intentar responder este "
"problema. Si el valor no es definido, se permitirán intentos infinitos."

#: xmodule/capa_block.py
msgid "Grading Method"
msgstr "Método de calificación"

#: xmodule/capa_block.py
msgid ""
"Define the grading method for this problem. By default, it's the score of"
" the last submission made by the student."
msgstr ""
"Defina el método de calificación para este problema. Por defecto, es el "
"puntaje del último envío realizado por el estudiante."

#: xmodule/capa_block.py
msgid "Last Score"
msgstr "Último puntaje"

#: xmodule/capa_block.py
msgid "First Score"
msgstr "Primer puntaje"

#: xmodule/capa_block.py
msgid "Highest Score"
msgstr "Puntaje más alto"

#: xmodule/capa_block.py
msgid "Average Score"
msgstr "Puntaje promedio"

#: xmodule/capa_block.py
msgid "List of correctness maps for each attempt"
msgstr "Lista de mapas de corrección para cada intento"

#: xmodule/capa_block.py
msgid "List of student answers for each attempt"
msgstr "Lista de respuestas de los estudiantes para cada intento"

#: xmodule/capa_block.py
msgid "List of scores for each attempt"
msgstr "Lista de puntajes para cada intento"

#: xmodule/capa_block.py
msgid "Date that this problem is due by"
msgstr "Fecha en que se debe entregar esta actividad"
Expand Down Expand Up @@ -15102,6 +15142,10 @@ msgstr[0] "Ha realizado {num_used} de {num_total} intento"
msgstr[1] "Ha realizado {num_used} de {num_total} intentos"
msgstr[2] "Ha realizado {num_used} de {num_total} intentos"

#: lms/templates/problem.html
msgid "Grading method: {grading_method}"
msgstr "Método de calificación: {grading_method}"

#: lms/templates/problem.html
msgid ""
"Some problems have options such as save, reset, hints, or show answer. These"
Expand Down
3 changes: 3 additions & 0 deletions lms/djangoapps/instructor/enrollment.py
Original file line number Diff line number Diff line change
Expand Up @@ -361,6 +361,9 @@ def _reset_module_attempts(studentmodule):
problem_state = json.loads(studentmodule.state)
# old_number_of_attempts = problem_state["attempts"]
problem_state["attempts"] = 0
problem_state["score_history"] = []
problem_state["correct_map_history"] = []
problem_state["student_answers_history"] = []

# save
studentmodule.state = json.dumps(problem_state)
Expand Down
5 changes: 4 additions & 1 deletion lms/djangoapps/instructor/tests/test_enrollment.py
Original file line number Diff line number Diff line change
Expand Up @@ -543,7 +543,10 @@ def setup_team(self):
'attempts': 1,
'saved_files_descriptions': ['summary', 'proposal', 'diagrams'],
'saved_files_sizes': [1364677, 958418],
'saved_files_names': ['case_study_abstract.txt', 'design_prop.pdf', 'diagram1.png']
'saved_files_names': ['case_study_abstract.txt', 'design_prop.pdf', 'diagram1.png'],
'score_history': [],
'correct_map_history': [],
'student_answers_history': [],
}
team_state = json.dumps(self.team_state_dict)

Expand Down
4 changes: 2 additions & 2 deletions lms/djangoapps/instructor_task/tests/test_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,7 @@ def test_rescoring_failure(self):
self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])

expected_message = "bad things happened"
with patch('xmodule.capa.capa_problem.LoncapaProblem.get_grade_from_current_answers') as mock_rescore:
with patch('xmodule.capa.capa_problem.LoncapaProblem.get_grade_from_answers') as mock_rescore:
mock_rescore.side_effect = ZeroDivisionError(expected_message)
instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)
self._assert_task_failure(
Expand All @@ -293,7 +293,7 @@ def test_rescoring_bad_unicode_input(self):

# return an input error as if it were a numerical response, with an embedded unicode character:
expected_message = "Could not interpret '2/3\u03a9' as a number"
with patch('xmodule.capa.capa_problem.LoncapaProblem.get_grade_from_current_answers') as mock_rescore:
with patch('xmodule.capa.capa_problem.LoncapaProblem.get_grade_from_answers') as mock_rescore:
mock_rescore.side_effect = StudentInputError(expected_message)
instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)

Expand Down
3 changes: 2 additions & 1 deletion lms/templates/problem.html
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
<%page expression_filter="h"/>
<%!
from django.utils.translation import ngettext, gettext as _
from openedx.core.djangolib.markup import HTML
from openedx.core.djangolib.markup import HTML, Text
%>

<%namespace name='static' file='static_content.html'/>
Expand Down Expand Up @@ -90,6 +90,7 @@ <h3 class="hd hd-3 problem-header" id="${ short_id }-problem-title" aria-describ
% if attempts_allowed and (not submit_disabled_cta or attempts_used == 0):
${ngettext("You have used {num_used} of {num_total} attempt", "You have used {num_used} of {num_total} attempts", attempts_allowed).format(num_used=attempts_used, num_total=attempts_allowed)}
% endif
<div>${Text(_("Grading method: {grading_method}")).format(grading_method=grading_method)}</div>
<span class="sr">${_("Some problems have options such as save, reset, hints, or show answer. These options follow the Submit button.")}</span>
</div>
</div>
Expand Down
35 changes: 35 additions & 0 deletions xmodule/capa/capa_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,12 @@ def __init__(self, problem_text, id, capa_system, capa_block, # pylint: disable
self.has_saved_answers = state.get('has_saved_answers', False)
if 'correct_map' in state:
self.correct_map.set_dict(state['correct_map'])
self.correct_map_history = []
for cmap in state.get('correct_map_history', []):
correct_map = CorrectMap()
correct_map.set_dict(cmap)
self.correct_map_history.append(correct_map)

self.done = state.get('done', False)
self.input_state = state.get('input_state', {})

Expand Down Expand Up @@ -299,8 +305,10 @@ def do_reset(self):
Reset internal state to unfinished, with no answers
"""
self.student_answers = {}
self.student_answers_history = []
self.has_saved_answers = False
self.correct_map = CorrectMap()
self.correct_map_history = []
self.done = False

def set_initial_display(self):
Expand Down Expand Up @@ -328,6 +336,7 @@ def get_state(self):
'student_answers': self.student_answers,
'has_saved_answers': self.has_saved_answers,
'correct_map': self.correct_map.get_dict(),
'correct_map_history': [cmap.get_dict() for cmap in self.correct_map_history],
'input_state': self.input_state,
'done': self.done}

Expand Down Expand Up @@ -434,6 +443,7 @@ def grade_answers(self, answers):
self.student_answers = convert_files_to_filenames(answers)
new_cmap = self.get_grade_from_current_answers(answers)
self.correct_map = new_cmap # lint-amnesty, pylint: disable=attribute-defined-outside-init
self.correct_map_history.append(deepcopy(new_cmap))
return self.correct_map

def supports_rescoring(self):
Expand Down Expand Up @@ -495,6 +505,31 @@ def get_grade_from_current_answers(self, student_answers):

return newcmap

def get_grade_from_answers(
self, student_answers: dict, correct_map: dict
) -> CorrectMap:
"""
This method is based on `get_grade_from_current_answers` method but it
takes `student_answers` and `correct_map` as arguments instead of using
the ones stored in the problem.
"""
newcmap = CorrectMap()
# Call each responsetype instance to do actual grading
for responder in self.responders.values():
# File objects are passed only if responsetype explicitly allows
# for file submissions. But we have no way of knowing if
# student_answers contains a proper answer or the filename of
# an earlier submission, so for now skip these entirely.
# TODO: figure out where to get file submissions when rescoring.
if 'filesubmission' in responder.allowed_inputfields and student_answers is None:
_ = self.capa_system.i18n.gettext
raise Exception(_("Cannot rescore problems with possible file submissions"))

results = responder.evaluate_answers(student_answers, correct_map)
newcmap.update(results)

return newcmap

def get_question_answers(self):
"""
Returns a dict of answer_ids to answer values. If we cannot generate
Expand Down
Loading
Loading