Skip to content

Commit

Permalink
Adding Unit Tests for dl_eval.py (#854)
Browse files Browse the repository at this point in the history
* Adding Unit Tests for dl_eval.py

This pull request introduces unit tests for the compute_correct and compute_accuracy functions. These tests utilize the pytest framework and incorporate "dummy tensors" to simulate various input conditions.

* 🎨 Format Python code with psf/black

* Added additional unit tests for edge cases

Added 0 accuracy and between 0 and 1 Accuracy Unit tests

* adding unit tests edge cases

* fixing indentation

fixing indentation

* added Pytest parameterized tests

added Pytest parameterized testing

* 🎨 Format Python code with psf/black

---------

Co-authored-by: yeaitsurya <yeaitsurya@users.noreply.github.com>
  • Loading branch information
codingwithsurya and yeaitsurya authored Jul 8, 2023
1 parent 9f7dfad commit ad5ea3d
Showing 1 changed file with 42 additions and 0 deletions.
42 changes: 42 additions & 0 deletions tests/test_dl_eval.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
import torch
import pytest
import torch.nn as nn
from backend.dl.dl_eval import compute_correct, compute_accuracy


@pytest.mark.parametrize(
"predicted, actual, expected_correct",
[
# Test case: All correct predictions
(torch.tensor([[0.1, 0.2, 0.7], [0.3, 0.4, 0.3]]), torch.tensor([2, 1]), 2),
# Test case: Some correct predictions
(torch.tensor([[0.8, 0.1, 0.1], [0.2, 0.4, 0.4]]), torch.tensor([0, 0]), 1),
# Test case: No correct predictions
(torch.tensor([[0.9, 0.1, 0.0], [0.7, 0.2, 0.1]]), torch.tensor([2, 1]), 0),
],
)
def test_compute_correct(predicted, actual, expected_correct):
# Compute the number of correct predictions
correct = compute_correct(predicted, actual)

# Check if the number of correct predictions is correct
assert correct == expected_correct


@pytest.mark.parametrize(
"predicted, actual, expected_accuracy",
[
# Test case: Accuracy of 1
(torch.tensor([[0.1, 0.2, 0.7], [0.3, 0.4, 0.3]]), torch.tensor([2, 1]), 1),
# Test case: Accuracy between 0 and 1
(torch.tensor([[0.8, 0.1, 0.1], [0.2, 0.4, 0.4]]), torch.tensor([0, 0]), 0.5),
# Test case: Accuracy of 0
(torch.tensor([[0.9, 0.1, 0.0], [0.7, 0.2, 0.1]]), torch.tensor([2, 1]), 0),
],
)
def test_compute_accuracy(predicted, actual, expected_accuracy):
# Compute the accuracy
accuracy = compute_accuracy(predicted, actual)

# Check if the accuracy is correct
assert accuracy == expected_accuracy

0 comments on commit ad5ea3d

Please sign in to comment.