-
Notifications
You must be signed in to change notification settings - Fork 71
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Assessment mode: display item feedback #93
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -246,7 +246,6 @@ def items_without_answers(): | |
"target_img_description": self.target_img_description, | ||
"item_background_color": self.item_background_color or None, | ||
"item_text_color": self.item_text_color or None, | ||
"initial_feedback": self.data['feedback']['start'], | ||
# final feedback (data.feedback.finish) is not included - it may give away answers. | ||
} | ||
|
||
|
@@ -392,17 +391,29 @@ def do_attempt(self, data, suffix=''): | |
self._validate_do_attempt() | ||
|
||
self.attempts += 1 | ||
self._mark_complete_and_publish_grade() # must happen before _get_feedback | ||
# pylint: disable=fixme | ||
# TODO: Refactor this method to "freeze" item_state and pass it to methods that need access to it. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Will this TODO be done in a future story/PR? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @cahrens I've asked @bradenmacdonald about this and haven't got an answer yet. My current understanding is that it's not a good time to further increase the scope of v2.1 with optional refactroings, so it won't be a part of v2.1. |
||
# These implicit dependencies between methods exist because most of them use `item_state` or other | ||
# fields, either as an "input" (i.e. read value) or as output (i.e. set value) or both. As a result, | ||
# incorrect order of invocation causes issues: | ||
self._mark_complete_and_publish_grade() # must happen before _get_feedback - sets grade | ||
correct = self._is_answer_correct() # must happen before manipulating item_state - reads item_state | ||
|
||
overall_feedback_msgs, misplaced_ids = self._get_feedback() | ||
overall_feedback_msgs, misplaced_ids = self._get_feedback(include_item_feedback=True) | ||
|
||
misplaced_items = [] | ||
for item_id in misplaced_ids: | ||
del self.item_state[item_id] | ||
misplaced_items.append(self._get_item_definition(int(item_id))) | ||
|
||
feedback_msgs = [FeedbackMessage(item['feedback']['incorrect'], None) for item in misplaced_items] | ||
|
||
return { | ||
'correct': correct, | ||
'attempts': self.attempts, | ||
'misplaced_items': list(misplaced_ids), | ||
'overall_feedback': self._present_overall_feedback(overall_feedback_msgs) | ||
'feedback': self._present_feedback(feedback_msgs), | ||
'overall_feedback': self._present_feedback(overall_feedback_msgs) | ||
} | ||
|
||
@XBlock.json_handler | ||
|
@@ -487,7 +498,7 @@ def _validate_do_attempt(self): | |
self.i18n_service.gettext("Max number of attempts reached") | ||
) | ||
|
||
def _get_feedback(self): | ||
def _get_feedback(self, include_item_feedback=False): | ||
""" | ||
Builds overall feedback for both standard and assessment modes | ||
""" | ||
|
@@ -510,16 +521,14 @@ def _add_msg_if_exists(ids_list, message_template, message_class): | |
message = message_template(len(ids_list), self.i18n_service.ngettext) | ||
feedback_msgs.append(FeedbackMessage(message, message_class)) | ||
|
||
_add_msg_if_exists( | ||
items.correctly_placed, FeedbackMessages.correctly_placed, FeedbackMessages.MessageClasses.CORRECTLY_PLACED | ||
) | ||
_add_msg_if_exists(misplaced_ids, FeedbackMessages.misplaced, FeedbackMessages.MessageClasses.MISPLACED) | ||
_add_msg_if_exists(missing_ids, FeedbackMessages.not_placed, FeedbackMessages.MessageClasses.NOT_PLACED) | ||
|
||
if misplaced_ids and self.attempts_remain: | ||
feedback_msgs.append( | ||
FeedbackMessage(FeedbackMessages.MISPLACED_ITEMS_RETURNED, None) | ||
if self.item_state or include_item_feedback: | ||
_add_msg_if_exists( | ||
items.correctly_placed, | ||
FeedbackMessages.correctly_placed, | ||
FeedbackMessages.MessageClasses.CORRECTLY_PLACED | ||
) | ||
_add_msg_if_exists(misplaced_ids, FeedbackMessages.misplaced, FeedbackMessages.MessageClasses.MISPLACED) | ||
_add_msg_if_exists(missing_ids, FeedbackMessages.not_placed, FeedbackMessages.MessageClasses.NOT_PLACED) | ||
|
||
if self.attempts_remain and (misplaced_ids or missing_ids): | ||
problem_feedback_message = self.data['feedback']['start'] | ||
|
@@ -539,7 +548,7 @@ def _add_msg_if_exists(ids_list, message_template, message_class): | |
return feedback_msgs, misplaced_ids | ||
|
||
@staticmethod | ||
def _present_overall_feedback(feedback_messages): | ||
def _present_feedback(feedback_messages): | ||
""" | ||
Transforms feedback messages into format expected by frontend code | ||
""" | ||
|
@@ -563,14 +572,14 @@ def _drop_item_standard(self, item_attempt): | |
self._publish_item_dropped_event(item_attempt, is_correct) | ||
|
||
item_feedback_key = 'correct' if is_correct else 'incorrect' | ||
item_feedback = item['feedback'][item_feedback_key] | ||
item_feedback = FeedbackMessage(item['feedback'][item_feedback_key], None) | ||
overall_feedback, __ = self._get_feedback() | ||
|
||
return { | ||
'correct': is_correct, | ||
'finished': self._is_answer_correct(), | ||
'overall_feedback': self._present_overall_feedback(overall_feedback), | ||
'feedback': item_feedback | ||
'overall_feedback': self._present_feedback(overall_feedback), | ||
'feedback': self._present_feedback([item_feedback]) | ||
} | ||
|
||
def _drop_item_assessment(self, item_attempt): | ||
|
@@ -612,6 +621,18 @@ def _mark_complete_and_publish_grade(self): | |
""" | ||
Helper method to update `self.completed` and submit grade event if appropriate conditions met. | ||
""" | ||
# pylint: disable=fixme | ||
# TODO: (arguable) split this method into "clean" functions (with no side effects and implicit state) | ||
# This method implicitly depends on self.item_state (via _is_answer_correct and _get_grade) | ||
# and also updates self.grade if some conditions are met. As a result this method implies some order of | ||
# invocation: | ||
# * it should be called after learner-caused updates to self.item_state is applied | ||
# * it should be called before self.item_state cleanup is applied (i.e. returning misplaced items to item bank) | ||
# * it should be called before any method that depends on self.grade (i.e. self._get_feedback) | ||
|
||
# Splitting it into a "clean" functions will allow to capture this implicit invocation order in caller method | ||
# and help avoid bugs caused by invocation order violation in future. | ||
|
||
# There's no going back from "completed" status to "incomplete" | ||
self.completed = self.completed or self._is_answer_correct() or not self.attempts_remain | ||
grade = self._get_grade() | ||
|
@@ -694,7 +715,7 @@ def _get_user_state(self): | |
'items': item_state, | ||
'finished': is_finished, | ||
'attempts': self.attempts, | ||
'overall_feedback': self._present_overall_feedback(overall_feedback_msgs) | ||
'overall_feedback': self._present_feedback(overall_feedback_msgs) | ||
} | ||
|
||
def _get_item_state(self): | ||
|
@@ -794,8 +815,8 @@ def _get_grade(self): | |
""" | ||
Returns the student's grade for this block. | ||
""" | ||
correct_count, required_count = self._get_item_stats() | ||
return correct_count / float(required_count) * self.weight | ||
correct_count, total_count = self._get_item_stats() | ||
return correct_count / float(total_count) * self.weight | ||
|
||
def _answer_correctness(self): | ||
""" | ||
|
@@ -807,8 +828,8 @@ def _answer_correctness(self): | |
* Partial: Some items are at their correct place. | ||
* Incorrect: None items are at their correct place. | ||
""" | ||
correct_count, required_count = self._get_item_stats() | ||
if correct_count == required_count: | ||
correct_count, total_count = self._get_item_stats() | ||
if correct_count == total_count: | ||
return self.SOLUTION_CORRECT | ||
elif correct_count == 0: | ||
return self.SOLUTION_INCORRECT | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@e-kolpakov I probably missed some of the more recent discussions about "Introductory Feedback" in assessment mode, but don't we at least need to pass this to the client for problems in "standard" mode?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@itsjeyd as far as I can tell "initial_feedback" is not used anywhere in JS.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@e-kolpakov so should "start" feedback be deleted entirely (Python, studio)?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@cahrens no, it is still shown in feedback area - it is passed through
get_user_state
handler, rather than here.