Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor AutoGradingService as a real service #6694

Merged
merged 1 commit into from
Sep 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions lms/services/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from lms.services.aes import AESService
from lms.services.application_instance import ApplicationInstanceNotFound
from lms.services.assignment import AssignmentService
from lms.services.auto_grading import AutoGradingService
from lms.services.canvas import CanvasService
from lms.services.canvas_studio import CanvasStudioService
from lms.services.d2l_api.client import D2LAPIClient
Expand Down Expand Up @@ -151,6 +152,9 @@ def includeme(config):
)
config.register_service_factory(MoodleAPIClient.factory, iface=MoodleAPIClient)
config.register_service_factory("lms.services.roster.factory", iface=RosterService)
config.register_service_factory(
"lms.services.auto_grading.factory", iface=AutoGradingService
)

# Plugins are not the same as top level services but we want to register them as pyramid services too
# Importing them here to:
Expand Down
108 changes: 60 additions & 48 deletions lms/services/auto_grading.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,53 +2,65 @@
from lms.models import AutoGradingConfig


def calculate_grade(
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Moved to a method (instead of function)

auto_grading_config: AutoGradingConfig, annotation_metrics: AnnotationMetrics
) -> float:
"""Calculate auto grades based on the config and the number of annotations made.

The results is a 100 based float rounded to two decimals.
"""
combined_count = annotation_metrics["annotations"] + annotation_metrics["replies"]

grade: float = 0
match (auto_grading_config.grading_type, auto_grading_config.activity_calculation):
case ("all_or_nothing", "cumulative"):
if combined_count >= auto_grading_config.required_annotations:
grade = 100
else:
grade = 0
case ("all_or_nothing", "separate"):
assert (
auto_grading_config.required_replies is not None
), "'separate' auto grade config with empty replies"
if (
annotation_metrics["annotations"]
>= auto_grading_config.required_annotations
and annotation_metrics["replies"]
>= auto_grading_config.required_replies
):
grade = 100
else:
grade = 0

case ("scaled", "cumulative"):
grade = combined_count / auto_grading_config.required_annotations * 100

case ("scaled", "separate"):
assert (
auto_grading_config.required_replies is not None
), "'separate' auto grade config with empty replies"
grade = (
combined_count
/ (
auto_grading_config.required_annotations
+ auto_grading_config.required_replies
class AutoGradingService:
def calculate_grade(
self,
auto_grading_config: AutoGradingConfig,
annotation_metrics: AnnotationMetrics,
) -> float:
"""Calculate auto grades based on the config and the number of annotations made.

The results is a 100 based float rounded to two decimals.
"""
combined_count = (
annotation_metrics["annotations"] + annotation_metrics["replies"]
)

grade: float = 0
match (
auto_grading_config.grading_type,
auto_grading_config.activity_calculation,
):
case ("all_or_nothing", "cumulative"):
if combined_count >= auto_grading_config.required_annotations:
grade = 100
else:
grade = 0
case ("all_or_nothing", "separate"):
assert (
auto_grading_config.required_replies is not None
), "'separate' auto grade config with empty replies"
if (
annotation_metrics["annotations"]
>= auto_grading_config.required_annotations
and annotation_metrics["replies"]
>= auto_grading_config.required_replies
):
grade = 100
else:
grade = 0

case ("scaled", "cumulative"):
grade = combined_count / auto_grading_config.required_annotations * 100

case ("scaled", "separate"):
assert (
auto_grading_config.required_replies is not None
), "'separate' auto grade config with empty replies"
grade = (
combined_count
/ (
auto_grading_config.required_annotations
+ auto_grading_config.required_replies
)
* 100
)
* 100
)
case _:
raise ValueError("Unknown auto grading configuration")
case _:
raise ValueError("Unknown auto grading configuration")

grade = min(100, grade) # Proportional grades are capped at 100%
return round(grade, 2)


grade = min(100, grade) # Proportional grades are capped at 100%
return round(grade, 2)
def factory(_context, _request):
return AutoGradingService()
12 changes: 9 additions & 3 deletions lms/views/dashboard/api/user.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from lms.models import RoleScope, RoleType, User
from lms.security import Permissions
from lms.services import UserService
from lms.services.auto_grading import calculate_grade
from lms.services.auto_grading import AutoGradingService
from lms.services.h_api import HAPI
from lms.validation._base import PyramidRequestSchema
from lms.views.dashboard.pagination import PaginationParametersMixin, get_page
Expand Down Expand Up @@ -64,6 +64,9 @@ def __init__(self, request) -> None:
self.dashboard_service = request.find_service(name="dashboard")
self.h_api = request.find_service(HAPI)
self.user_service: UserService = request.find_service(UserService)
self.auto_grading_service: AutoGradingService = request.find_service(
AutoGradingService
)

@view_config(
route_name="api.dashboard.students",
Expand Down Expand Up @@ -186,8 +189,11 @@ def students_metrics(self) -> APIStudents:
)

if assignment.auto_grading_config:
api_student["auto_grading_grade"] = calculate_grade(
assignment.auto_grading_config, api_student["annotation_metrics"]
api_student["auto_grading_grade"] = (
self.auto_grading_service.calculate_grade(
assignment.auto_grading_config,
api_student["annotation_metrics"],
)
)

students.append(api_student)
Expand Down
108 changes: 64 additions & 44 deletions tests/unit/lms/services/auto_grading_test.py
Original file line number Diff line number Diff line change
@@ -1,53 +1,73 @@
from unittest.mock import sentinel

import pytest

from lms.js_config_types import AnnotationMetrics
from lms.models import AutoGradingConfig
from lms.services.auto_grading import calculate_grade


@pytest.mark.parametrize(
"grading_type,activity_calculation,required_annotations, required_replies,annotations,replies,expected_grade",
[
("all_or_nothing", "cumulative", 15, None, 5, 5, 0),
("all_or_nothing", "cumulative", 15, None, 10, 6, 100),
("all_or_nothing", "separate", 10, 5, 10, 4, 0),
("all_or_nothing", "separate", 10, 5, 10, 5, 100),
("scaled", "cumulative", 15, None, 5, 5, 66.67),
("scaled", "cumulative", 15, None, 10, 10, 100),
("scaled", "separate", 10, 5, 8, 2, 66.67),
("scaled", "separate", 10, 5, 5, 1, 40),
],
)
def test_calculate_grade(
activity_calculation,
grading_type,
required_annotations,
required_replies,
annotations,
replies,
expected_grade,
):
grade = calculate_grade(
AutoGradingConfig(
activity_calculation=activity_calculation,
grading_type=grading_type,
required_annotations=required_annotations,
required_replies=required_replies,
),
AnnotationMetrics(annotations=annotations, replies=replies),
)

assert grade == expected_grade
from lms.services.auto_grading import AutoGradingService, factory


def test_calculate_grade_bad_config():
with pytest.raises(ValueError):
calculate_grade(
class TestAutoGradingService:
@pytest.mark.parametrize(
"grading_type,activity_calculation,required_annotations, required_replies,annotations,replies,expected_grade",
[
("all_or_nothing", "cumulative", 15, None, 5, 5, 0),
("all_or_nothing", "cumulative", 15, None, 10, 6, 100),
("all_or_nothing", "separate", 10, 5, 10, 4, 0),
("all_or_nothing", "separate", 10, 5, 10, 5, 100),
("scaled", "cumulative", 15, None, 5, 5, 66.67),
("scaled", "cumulative", 15, None, 10, 10, 100),
("scaled", "separate", 10, 5, 8, 2, 66.67),
("scaled", "separate", 10, 5, 5, 1, 40),
],
)
def test_calculate_grade(
self,
activity_calculation,
grading_type,
required_annotations,
required_replies,
annotations,
replies,
expected_grade,
svc,
):
grade = svc.calculate_grade(
AutoGradingConfig(
activity_calculation="RANDOM",
grading_type="RANDOM",
required_annotations=10,
required_replies=10,
activity_calculation=activity_calculation,
grading_type=grading_type,
required_annotations=required_annotations,
required_replies=required_replies,
),
AnnotationMetrics(annotations=10, replies=10),
AnnotationMetrics(annotations=annotations, replies=replies),
)

assert grade == expected_grade

def test_calculate_grade_bad_config(self, svc):
with pytest.raises(ValueError):
svc.calculate_grade(
AutoGradingConfig(
activity_calculation="RANDOM",
grading_type="RANDOM",
required_annotations=10,
required_replies=10,
),
AnnotationMetrics(annotations=10, replies=10),
)

@pytest.fixture
def svc(self):
return AutoGradingService()


class TestFactory:
def test_it(self, pyramid_request, AutoGradingService):
service = factory(sentinel.context, pyramid_request)

AutoGradingService.assert_called_once_with()
assert service == AutoGradingService.return_value

@pytest.fixture
def AutoGradingService(self, patch):
return patch("lms.services.auto_grading.AutoGradingService")
18 changes: 10 additions & 8 deletions tests/unit/lms/views/dashboard/api/user_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,11 @@
from tests import factories

pytestmark = pytest.mark.usefixtures(
"h_api", "assignment_service", "dashboard_service", "user_service"
"h_api",
"assignment_service",
"dashboard_service",
"user_service",
"auto_grading_service",
)


Expand Down Expand Up @@ -65,7 +69,7 @@ def test_students_metrics( # pylint:disable=too-many-locals
db_session,
with_auto_grading,
with_segment_authority_provided_id,
calculate_grade,
auto_grading_service,
):
# User returned by the stats endpoint
student = factories.User(display_name="Bart")
Expand Down Expand Up @@ -174,12 +178,14 @@ def test_students_metrics( # pylint:disable=too-many-locals
if with_auto_grading:
calls = []
for student in expected["students"]:
student["auto_grading_grade"] = calculate_grade.return_value
student["auto_grading_grade"] = (
auto_grading_service.calculate_grade.return_value
)
calls.append(
call(assignment.auto_grading_config, student["annotation_metrics"])
)

calculate_grade.assert_has_calls(calls)
auto_grading_service.calculate_grade.assert_has_calls(calls)

assert response == expected

Expand All @@ -190,7 +196,3 @@ def views(self, pyramid_request):
@pytest.fixture
def get_page(self, patch):
return patch("lms.views.dashboard.api.user.get_page")

@pytest.fixture
def calculate_grade(self, patch):
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Using the service mock instead of a new function mock.

return patch("lms.views.dashboard.api.user.calculate_grade")
7 changes: 7 additions & 0 deletions tests/unit/services.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from lms.services.application_instance import ApplicationInstanceService
from lms.services.assignment import AssignmentService
from lms.services.async_oauth_http import AsyncOAuthHTTPService
from lms.services.auto_grading import AutoGradingService
from lms.services.blackboard_api.client import BlackboardAPIClient
from lms.services.canvas_api import CanvasAPIClient
from lms.services.canvas_studio import CanvasStudioService
Expand Down Expand Up @@ -63,6 +64,7 @@
"application_instance_service",
"assignment_service",
"async_oauth_http_service",
"auto_grading_service",
"blackboard_api_client",
"canvas_api_client",
"canvas_service",
Expand Down Expand Up @@ -194,6 +196,11 @@ def roster_service(mock_service):
return mock_service(RosterService)


@pytest.fixture
def auto_grading_service(mock_service):
return mock_service(AutoGradingService)


@pytest.fixture
def d2l_api_client(mock_service):
return mock_service(D2LAPIClient)
Expand Down
Loading