From ad5ea3d522915afc00eece50e2b48d59cb3cf4b1 Mon Sep 17 00:00:00 2001 From: Surya Subramanian <73920354+yeaitsurya@users.noreply.github.com> Date: Fri, 7 Jul 2023 19:34:05 -0700 Subject: [PATCH] Adding Unit Tests for dl_eval.py (#854) * Adding Unit Tests for dl_eval.py This pull request introduces unit tests for the compute_correct and compute_accuracy functions. These tests utilize the pytest framework and incorporate "dummy tensors" to simulate various input conditions. * :art: Format Python code with psf/black * Added additional unit tests for edge cases Added 0 accuracy and between 0 and 1 Accuracy Unit tests * adding unit tests edge cases * fixing indentation fixing indentation * added Pytest parameterized tests added Pytest parameterized testing * :art: Format Python code with psf/black --------- Co-authored-by: yeaitsurya --- tests/test_dl_eval.py | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 tests/test_dl_eval.py diff --git a/tests/test_dl_eval.py b/tests/test_dl_eval.py new file mode 100644 index 000000000..7a1014418 --- /dev/null +++ b/tests/test_dl_eval.py @@ -0,0 +1,42 @@ +import torch +import pytest +import torch.nn as nn +from backend.dl.dl_eval import compute_correct, compute_accuracy + + +@pytest.mark.parametrize( + "predicted, actual, expected_correct", + [ + # Test case: All correct predictions + (torch.tensor([[0.1, 0.2, 0.7], [0.3, 0.4, 0.3]]), torch.tensor([2, 1]), 2), + # Test case: Some correct predictions + (torch.tensor([[0.8, 0.1, 0.1], [0.2, 0.4, 0.4]]), torch.tensor([0, 0]), 1), + # Test case: No correct predictions + (torch.tensor([[0.9, 0.1, 0.0], [0.7, 0.2, 0.1]]), torch.tensor([2, 1]), 0), + ], +) +def test_compute_correct(predicted, actual, expected_correct): + # Compute the number of correct predictions + correct = compute_correct(predicted, actual) + + # Check if the number of correct predictions is correct + assert correct == expected_correct + + +@pytest.mark.parametrize( + "predicted, actual, expected_accuracy", + [ + # Test case: Accuracy of 1 + (torch.tensor([[0.1, 0.2, 0.7], [0.3, 0.4, 0.3]]), torch.tensor([2, 1]), 1), + # Test case: Accuracy between 0 and 1 + (torch.tensor([[0.8, 0.1, 0.1], [0.2, 0.4, 0.4]]), torch.tensor([0, 0]), 0.5), + # Test case: Accuracy of 0 + (torch.tensor([[0.9, 0.1, 0.0], [0.7, 0.2, 0.1]]), torch.tensor([2, 1]), 0), + ], +) +def test_compute_accuracy(predicted, actual, expected_accuracy): + # Compute the accuracy + accuracy = compute_accuracy(predicted, actual) + + # Check if the accuracy is correct + assert accuracy == expected_accuracy