diff --git a/tests/unit/algorithms/classification/adapters/mmcls/test_configurer.py b/tests/unit/algorithms/classification/adapters/mmcls/test_configurer.py new file mode 100644 index 00000000000..86a65df1a4e --- /dev/null +++ b/tests/unit/algorithms/classification/adapters/mmcls/test_configurer.py @@ -0,0 +1,266 @@ +import copy +import os + +import pytest +import tempfile +from mmcv.utils import ConfigDict + +from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig +from otx.algorithms.classification.adapters.mmcls.configurer import ( + ClassificationConfigurer, + IncrClassificationConfigurer, + SemiSLClassificationConfigurer, +) +from tests.test_suite.e2e_test_system import e2e_pytest_unit +from tests.unit.algorithms.classification.test_helper import DEFAULT_CLS_TEMPLATE_DIR + + +class TestClassificationConfigurer: + @pytest.fixture(autouse=True) + def setup(self) -> None: + self.configurer = ClassificationConfigurer() + self.model_cfg = MPAConfig.fromfile(os.path.join(DEFAULT_CLS_TEMPLATE_DIR, "model.py")) + self.data_cfg = MPAConfig.fromfile(os.path.join(DEFAULT_CLS_TEMPLATE_DIR, "data_pipeline.py")) + + @e2e_pytest_unit + def test_configure(self, mocker): + mock_cfg_base = mocker.patch.object(ClassificationConfigurer, "configure_base") + mock_cfg_device = mocker.patch.object(ClassificationConfigurer, "configure_device") + mock_cfg_ckpt = mocker.patch.object(ClassificationConfigurer, "configure_ckpt") + mock_cfg_model = mocker.patch.object(ClassificationConfigurer, "configure_model") + mock_cfg_data = mocker.patch.object(ClassificationConfigurer, "configure_data") + mock_cfg_task = mocker.patch.object(ClassificationConfigurer, "configure_task") + mock_cfg_hook = mocker.patch.object(ClassificationConfigurer, "configure_hook") + mock_cfg_gpu = mocker.patch.object(ClassificationConfigurer, "configure_samples_per_gpu") + mock_cfg_fp16_optimizer = mocker.patch.object(ClassificationConfigurer, "configure_fp16_optimizer") + mock_cfg_compat_cfg = mocker.patch.object(ClassificationConfigurer, "configure_compat_cfg") + + model_cfg = copy.deepcopy(self.model_cfg) + data_cfg = copy.deepcopy(self.data_cfg) + returned_value = self.configurer.configure(model_cfg, "", data_cfg, True) + mock_cfg_base.assert_called_once_with(model_cfg, data_cfg, None, None) + mock_cfg_device.assert_called_once_with(model_cfg, True) + mock_cfg_ckpt.assert_called_once_with(model_cfg, "") + mock_cfg_model.assert_called_once_with(model_cfg, None) + mock_cfg_data.assert_called_once_with(model_cfg, True, data_cfg) + mock_cfg_task.assert_called_once_with(model_cfg, True) + mock_cfg_hook.assert_called_once_with(model_cfg) + mock_cfg_gpu.assert_called_once_with(model_cfg, "train") + mock_cfg_fp16_optimizer.assert_called_once_with(model_cfg) + mock_cfg_compat_cfg.assert_called_once_with(model_cfg) + assert returned_value == model_cfg + + @e2e_pytest_unit + def test_configure_base(self, mocker): + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.configurer.align_data_config_with_recipe", + return_value=True, + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.configurer.patch_datasets", + return_value=True, + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.configurer.patch_persistent_workers", + return_value=True, + ) + + model_cfg = copy.deepcopy(self.model_cfg) + data_cfg = copy.deepcopy(self.data_cfg._cfg_dict) + self.configurer.configure_base(model_cfg, data_cfg, [], []) + + @e2e_pytest_unit + def test_configure_device(self, mocker): + mocker.patch( + "torch.distributed.is_initialized", + return_value=True, + ) + mocker.patch("os.environ", return_value={"LOCAL_RANK": 2}) + config = copy.deepcopy(self.model_cfg) + self.configurer.configure_device(config, True) + assert config.distributed is True + + mocker.patch( + "torch.distributed.is_initialized", + return_value=False, + ) + mocker.patch( + "torch.cuda.is_available", + return_value=False, + ) + config = copy.deepcopy(self.model_cfg) + self.configurer.configure_device(config, True) + assert config.distributed is False + assert config.device == "cpu" + + mocker.patch( + "torch.distributed.is_initialized", + return_value=False, + ) + mocker.patch( + "torch.cuda.is_available", + return_value=True, + ) + config = copy.deepcopy(self.model_cfg) + self.configurer.configure_device(config, True) + assert config.distributed is False + assert config.device == "cuda" + + @e2e_pytest_unit + def test_configure_model(self): + ir_options = {"ir_model_path": {"ir_weight_path": "", "ir_weight_init": ""}} + self.model_cfg.model.head.in_channels = -1 + self.configurer.configure_model(self.model_cfg, ir_options) + assert self.model_cfg.model_task + assert self.model_cfg.model.head.in_channels == 960 + + @e2e_pytest_unit + def test_configure_model_not_classification_task(self): + ir_options = {"ir_model_path": {"ir_weight_path": "", "ir_weight_init": ""}} + configure_cfg = copy.deepcopy(self.model_cfg) + configure_cfg.model.task = "detection" + with pytest.raises(ValueError): + self.configurer.configure_model(configure_cfg, ir_options) + + @e2e_pytest_unit + def test_configure_ckpt(self, mocker): + model_cfg = copy.deepcopy(self.model_cfg) + model_cfg.resume = True + + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.configurer.CheckpointLoader.load_checkpoint", + return_value={"model": None}, + ) + with tempfile.TemporaryDirectory() as tempdir: + self.configurer.configure_ckpt(model_cfg, os.path.join(tempdir, "dummy.pth")) + + @e2e_pytest_unit + def test_configure_data(self, mocker): + data_cfg = copy.deepcopy(self.data_cfg) + data_cfg.data.pipeline_options = dict( + MinIouRandomCrop=dict(min_crop_size=0.1), + Resize=dict( + img_scale=[(1344, 480), (1344, 960)], + multiscale_mode="range", + ), + Normalize=dict(), + MultiScaleFlipAug=dict( + img_scale=(1344, 800), + flip=False, + transforms=[ + dict(type="Resize", keep_ratio=False), + dict(type="Normalize"), + dict(type="Pad", size_divisor=32), + dict(type="ImageToTensor", keys=["img"]), + dict(type="Collect", keys=["img"]), + ], + ), + ) + self.configurer.configure_data(self.model_cfg, True, data_cfg) + assert self.model_cfg.data + assert self.model_cfg.data.train + assert self.model_cfg.data.val + + @e2e_pytest_unit + def test_configure_task(self, mocker): + model_cfg = copy.deepcopy(self.model_cfg) + model_cfg.update(self.data_cfg) + model_cfg.task_adapt = {"type": "mpa", "op": "REPLACE", "use_mpa_anchor": True} + self.configurer.configure_task(model_cfg, True) + + self.configurer.model_classes = [] + self.configurer.data_classes = ["red", "green"] + self.configurer.configure_task(model_cfg, True) + + @e2e_pytest_unit + def test_configure_hook(self): + model_cfg = copy.deepcopy(self.model_cfg) + model_cfg.custom_hooks = [{"type": "LazyEarlyStoppingHook", "start": 3}] + model_cfg.custom_hook_options = {"LazyEarlyStoppingHook": {"start": 5}, "LoggerReplaceHook": {"_delete_": True}} + self.configurer.configure_hook(model_cfg) + assert model_cfg.custom_hooks[0]["start"] == 5 + + @e2e_pytest_unit + def test_configure_samples_per_gpu(self): + model_cfg = copy.deepcopy(self.model_cfg) + model_cfg.update(self.data_cfg) + model_cfg.data.train.otx_dataset = range(1) + self.configurer.configure_samples_per_gpu(model_cfg, "train") + assert model_cfg.data.train_dataloader == {"samples_per_gpu": 1, "drop_last": True} + + @e2e_pytest_unit + def test_configure_fp16_optimizer(self): + model_cfg = copy.deepcopy(self.model_cfg) + model_cfg.fp16 = {} + model_cfg.optimizer_config.type = "OptimizerHook" + self.configurer.configure_fp16_optimizer(model_cfg) + assert model_cfg.optimizer_config.type == "Fp16OptimizerHook" + + model_cfg.fp16 = {} + model_cfg.optimizer_config.type = "SAMOptimizerHook" + self.configurer.configure_fp16_optimizer(model_cfg) + assert model_cfg.optimizer_config.type == "Fp16SAMOptimizerHook" + + model_cfg.fp16 = {} + model_cfg.optimizer_config.type = "DummyOptimizerHook" + self.configurer.configure_fp16_optimizer(model_cfg) + assert model_cfg.optimizer_config.type == "DummyOptimizerHook" + + @e2e_pytest_unit + def test_configure_compat_cfg(self): + model_cfg = copy.deepcopy(self.model_cfg) + model_cfg.update(self.data_cfg) + model_cfg.data.train_dataloader = {} + model_cfg.data.val_dataloader = {} + model_cfg.data.test_dataloader = {} + self.configurer.configure_compat_cfg(model_cfg) + + @e2e_pytest_unit + def test_get_data_cfg(self): + config = copy.deepcopy(self.model_cfg) + config.update(self.data_cfg) + config.data.train.dataset = ConfigDict({"dataset": [1, 2, 3]}) + assert [1, 2, 3] == self.configurer.get_data_cfg(config, "train") + + +class TestIncrClassificationConfigurer: + @pytest.fixture(autouse=True) + def setup(self) -> None: + self.configurer = IncrClassificationConfigurer() + self.model_cfg = MPAConfig.fromfile(os.path.join(DEFAULT_CLS_TEMPLATE_DIR, "model.py")) + self.data_cfg = MPAConfig.fromfile(os.path.join(DEFAULT_CLS_TEMPLATE_DIR, "data_pipeline.py")) + + def test_configure_task(self, mocker): + mocker.patch.object(ClassificationConfigurer, "configure_task") + self.model_cfg.update(self.data_cfg) + self.model_cfg.task_adapt = {} + self.configurer.task_adapt_type = "mpa" + self.configurer.configure_task(self.model_cfg, True) + assert self.model_cfg.custom_hooks[0].type == "TaskAdaptHook" + assert self.model_cfg.custom_hooks[0].sampler_flag is False + + +class TestSemiSLClassificationConfigurer: + @pytest.fixture(autouse=True) + def setup(self) -> None: + self.configurer = SemiSLClassificationConfigurer() + self.model_cfg = MPAConfig.fromfile(os.path.join(DEFAULT_CLS_TEMPLATE_DIR, "model.py")) + self.data_cfg = MPAConfig.fromfile(os.path.join(DEFAULT_CLS_TEMPLATE_DIR, "data_pipeline.py")) + + def test_configure_data(self, mocker): + mocker.patch.object(ClassificationConfigurer, "configure_data") + mocker.patch("mmdet.datasets.build_dataset", return_value=[]) + mocker.patch("otx.algorithms.classification.adapters.mmcls.configurer.build_dataloader", return_value=[]) + self.model_cfg.update(self.data_cfg) + self.model_cfg.data.unlabeled = ConfigDict({"type": "OTXDataset", "otx_dataset": range(10)}) + self.model_cfg.model_task = "detection" + self.model_cfg.distributed = False + self.configurer.configure_data(self.model_cfg, True, self.data_cfg) + + def test_configure_task(self): + self.model_cfg.update(self.data_cfg) + self.model_cfg.task_adapt = {"type": "mpa", "op": "REPLACE", "use_mpa_anchor": True} + self.configurer.configure_task(self.model_cfg, True) + + self.model_cfg.task_adapt = {"type": "not_mpa", "op": "REPLACE", "use_mpa_anchor": True} + self.configurer.configure_task(self.model_cfg, True) diff --git a/tests/unit/algorithms/classification/adapters/mmcls/test_task.py b/tests/unit/algorithms/classification/adapters/mmcls/test_task.py new file mode 100644 index 00000000000..2e908102825 --- /dev/null +++ b/tests/unit/algorithms/classification/adapters/mmcls/test_task.py @@ -0,0 +1,411 @@ +"""Unit Test for otx.algorithms.detection.adapters.mmdet.task.""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import os +import json +from contextlib import nullcontext +from copy import deepcopy +from typing import Any, Dict + +import numpy as np +import pytest +import torch +from mmcv.utils import Config +from torch import nn + +from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig +from otx.algorithms.classification.adapters.mmcls.task import MMClassificationTask +from otx.algorithms.classification.adapters.mmcls.models.classifiers.sam_classifier import SAMImageClassifier +from otx.algorithms.classification.configs.base import ClassificationConfig +from otx.api.configuration import ConfigurableParameters +from otx.api.configuration.helper import create +from otx.api.entities.dataset_item import DatasetItemEntity +from otx.api.entities.datasets import DatasetEntity +from otx.api.entities.explain_parameters import ExplainParameters +from otx.api.entities.inference_parameters import InferenceParameters +from otx.api.entities.label import Domain +from otx.api.entities.label_schema import LabelGroup, LabelGroupType, LabelSchemaEntity +from otx.api.entities.model import ( + ModelConfiguration, + ModelEntity, + ModelFormat, + ModelOptimizationType, + ModelPrecision, +) +from otx.api.entities.model_template import InstantiationType, parse_model_template, TaskFamily, TaskType +from otx.api.entities.resultset import ResultSetEntity +from otx.api.usecases.tasks.interfaces.export_interface import ExportType +from tests.test_suite.e2e_test_system import e2e_pytest_unit +from tests.unit.algorithms.classification.test_helper import ( + DEFAULT_CLS_TEMPLATE_DIR, + init_environment, + generate_cls_dataset, + generate_label_schema, +) + + +class MockModule(nn.Module): + """Mock class for nn.Module.""" + + def forward(self, inputs: Any): + return inputs + + +class MockModel(nn.Module): + """Mock class for pytorch model.""" + + def __init__(self): + super().__init__() + self.module = MockModule() + self.module.backbone = MockModule() + self.backbone = MockModule() + + def forward(self, *args, **kwargs): + forward_hooks = list(self.module.backbone._forward_hooks.values()) + for hook in forward_hooks: + hook(1, 2, 3) + return np.array([[0.3, 0.7]]) + + @staticmethod + def named_parameters(*args, **kwargs): + return {"name": torch.Tensor([0.5])}.items() + + +class MockDataset(DatasetEntity): + """Mock class for mm_dataset.""" + + def __init__(self, dataset: DatasetEntity): + self.dataset = dataset + self.CLASSES = ["1", "2", "3"] + + def __len__(self): + return len(self.dataset) + + def evaluate(self, prediction, *args, **kwargs): + return {"mAP": 1.0} + + +class MockDataLoader: + """Mock class for data loader.""" + + def __init__(self, dataset: DatasetEntity): + self.dataset = dataset + self.iter = iter(self.dataset) + + def __len__(self) -> int: + return len(self.dataset) + + def __next__(self) -> Dict[str, DatasetItemEntity]: + return {"imgs": next(self.iter)} + + def __iter__(self): + return self + + +class MockExporter: + """Mock class for Exporter.""" + + def __init__(self, task): + self._output_path = task._output_path + + def run(self, *args, **kwargs): + with open(os.path.join(self._output_path, "openvino.bin"), "wb") as f: + f.write(np.ndarray([0])) + with open(os.path.join(self._output_path, "openvino.xml"), "wb") as f: + f.write(np.ndarray([0])) + with open(os.path.join(self._output_path, "model.onnx"), "wb") as f: + f.write(np.ndarray([0])) + + return { + "outputs": { + "bin": os.path.join(self._output_path, "openvino.bin"), + "xml": os.path.join(self._output_path, "openvino.xml"), + "onnx": os.path.join(self._output_path, "model.onnx"), + } + } + + +class TestMMClassificationTask: + """Test class for MMClassificationTask. + + Details are explained in each test function. + """ + + @pytest.fixture(autouse=True) + def setup(self) -> None: + model_template = parse_model_template(os.path.join(DEFAULT_CLS_TEMPLATE_DIR, "template.yaml")) + hyper_parameters = create(model_template.hyper_parameters.data) + + mc_task_env, self.mc_cls_dataset = init_environment(hyper_parameters, model_template, False, False, 100) + self.mc_cls_task = MMClassificationTask(mc_task_env) + self.mc_cls_label_schema = generate_label_schema(self.mc_cls_dataset.get_labels(), False, False) + + ml_task_env, self.ml_cls_dataset = init_environment(hyper_parameters, model_template, True, False, 100) + self.ml_cls_task = MMClassificationTask(ml_task_env) + self.ml_cls_label_schema = generate_label_schema(self.ml_cls_dataset.get_labels(), False, False) + + hl_task_env, self.hl_cls_dataset = init_environment(hyper_parameters, model_template, False, True, 100) + self.hl_cls_task = MMClassificationTask(hl_task_env) + self.hl_cls_label_schema = generate_label_schema(self.hl_cls_dataset.get_labels(), False, False) + + @e2e_pytest_unit + def test_build_model(self, mocker) -> None: + """Test build_model function.""" + _mock_recipe_cfg = MPAConfig.fromfile(os.path.join(DEFAULT_CLS_TEMPLATE_DIR, "model.py")) + _mock_recipe_cfg.model.pop("task") + model = self.mc_cls_task.build_model(_mock_recipe_cfg, True) + assert isinstance(model, SAMImageClassifier) + + @e2e_pytest_unit + def test_train_multiclass(self, mocker) -> None: + """Test train function.""" + + def _mock_train_model(*args, **kwargs): + with open(os.path.join(self.mc_cls_task._output_path, "latest.pth"), "wb") as f: + torch.save({"dummy": torch.randn(1, 3, 3, 3)}, f) + + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.build_dataset", + return_value=MockDataset(self.mc_cls_dataset), + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.build_dataloader", + return_value=MockDataLoader(self.mc_cls_dataset), + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.train_model", + side_effect=_mock_train_model, + ) + mocker.patch.object(MMClassificationTask, "build_model", return_value=MockModel()) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.build_data_parallel", + return_value=MockModel(), + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.FeatureVectorHook", + return_value=nullcontext(), + ) + + _config = ModelConfiguration(ClassificationConfig("header"), self.mc_cls_label_schema) + output_model = ModelEntity(self.mc_cls_dataset, _config) + self.mc_cls_task.train(self.mc_cls_dataset, output_model) + output_model.performance == 1.0 + + @e2e_pytest_unit + def test_train_multilabel(self, mocker) -> None: + """Test train function.""" + + def _mock_train_model(*args, **kwargs): + with open(os.path.join(self.ml_cls_task._output_path, "latest.pth"), "wb") as f: + torch.save({"dummy": torch.randn(1, 3, 3, 3)}, f) + + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.build_dataset", + return_value=MockDataset(self.ml_cls_dataset), + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.build_dataloader", + return_value=MockDataLoader(self.ml_cls_dataset), + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.train_model", + side_effect=_mock_train_model, + ) + mocker.patch.object(MMClassificationTask, "build_model", return_value=MockModel()) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.build_data_parallel", + return_value=MockModel(), + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.FeatureVectorHook", + return_value=nullcontext(), + ) + + _config = ModelConfiguration(ClassificationConfig("header"), self.ml_cls_label_schema) + output_model = ModelEntity(self.ml_cls_dataset, _config) + self.ml_cls_task.train(self.ml_cls_dataset, output_model) + output_model.performance == 1.0 + + @e2e_pytest_unit + def test_train_hierarchicallabel(self, mocker) -> None: + """Test train function.""" + + def _mock_train_model(*args, **kwargs): + with open(os.path.join(self.hl_cls_task._output_path, "latest.pth"), "wb") as f: + torch.save({"dummy": torch.randn(1, 3, 3, 3)}, f) + + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.build_dataset", + return_value=MockDataset(self.hl_cls_dataset), + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.build_dataloader", + return_value=MockDataLoader(self.hl_cls_dataset), + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.train_model", + side_effect=_mock_train_model, + ) + mocker.patch.object(MMClassificationTask, "build_model", return_value=MockModel()) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.build_data_parallel", + return_value=MockModel(), + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.FeatureVectorHook", + return_value=nullcontext(), + ) + + _config = ModelConfiguration(ClassificationConfig("header"), self.hl_cls_label_schema) + output_model = ModelEntity(self.hl_cls_dataset, _config) + self.hl_cls_task.train(self.hl_cls_dataset, output_model) + output_model.performance == 1.0 + + @e2e_pytest_unit + def test_infer_multiclass(self, mocker) -> None: + """Test infer function.""" + + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.build_dataset", + return_value=MockDataset(self.mc_cls_dataset), + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.build_dataloader", + return_value=MockDataLoader(self.mc_cls_dataset), + ) + mocker.patch.object(MMClassificationTask, "build_model", return_value=MockModel()) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.build_data_parallel", + return_value=MockModel(), + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.FeatureVectorHook", + return_value=nullcontext(), + ) + + inference_parameters = InferenceParameters(is_evaluation=True) + outputs = self.mc_cls_task.infer(self.mc_cls_dataset.with_empty_annotations(), inference_parameters) + for output in outputs: + assert output.get_annotations()[-1].get_labels()[0].probability == 0.7 + + @e2e_pytest_unit + def test_infer_multilabel(self, mocker) -> None: + """Test infer function.""" + + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.build_dataset", + return_value=MockDataset(self.ml_cls_dataset), + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.build_dataloader", + return_value=MockDataLoader(self.ml_cls_dataset), + ) + mocker.patch.object(MMClassificationTask, "build_model", return_value=MockModel()) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.build_data_parallel", + return_value=MockModel(), + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.FeatureVectorHook", + return_value=nullcontext(), + ) + + inference_parameters = InferenceParameters(is_evaluation=True) + outputs = self.ml_cls_task.infer(self.ml_cls_dataset.with_empty_annotations(), inference_parameters) + for output in outputs: + assert output.get_annotations()[-1].get_labels()[0].probability == 0.7 + + @e2e_pytest_unit + def test_infer_hierarchicallabel(self, mocker) -> None: + """Test infer function.""" + + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.build_dataset", + return_value=MockDataset(self.hl_cls_dataset), + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.build_dataloader", + return_value=MockDataLoader(self.hl_cls_dataset), + ) + mocker.patch.object(MMClassificationTask, "build_model", return_value=MockModel()) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.build_data_parallel", + return_value=MockModel(), + ) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.FeatureVectorHook", + return_value=nullcontext(), + ) + + inference_parameters = InferenceParameters(is_evaluation=True) + outputs = self.hl_cls_task.infer(self.hl_cls_dataset.with_empty_annotations(), inference_parameters) + for output in outputs: + assert output.get_annotations()[-1].get_labels()[0].probability == 0.7 + + @e2e_pytest_unit + def test_cls_evaluate(self) -> None: + """Test evaluate function for classification.""" + + _config = ModelConfiguration(ClassificationConfig("header"), self.mc_cls_label_schema) + _model = ModelEntity(self.mc_cls_dataset, _config) + resultset = ResultSetEntity(_model, self.mc_cls_dataset, self.mc_cls_dataset) + self.mc_cls_task.evaluate(resultset) + assert resultset.performance.score.value == 1.0 + + @e2e_pytest_unit + def test_cls_evaluate_with_empty_annotations(self) -> None: + """Test evaluate function for classification with empty predictions.""" + + _config = ModelConfiguration(ClassificationConfig("header"), self.mc_cls_label_schema) + _model = ModelEntity(self.mc_cls_dataset, _config) + resultset = ResultSetEntity(_model, self.mc_cls_dataset, self.mc_cls_dataset.with_empty_annotations()) + self.mc_cls_task.evaluate(resultset) + assert resultset.performance.score.value == 0.0 + + @pytest.mark.parametrize("precision", [ModelPrecision.FP16, ModelPrecision.FP32]) + @e2e_pytest_unit + def test_export(self, mocker, precision: ModelPrecision) -> None: + """Test export function. + + + 1. Create model entity + 2. Run export function + 3. Check output model attributes + """ + _config = ModelConfiguration(ClassificationConfig("header"), self.mc_cls_label_schema) + _model = ModelEntity(self.mc_cls_dataset, _config) + + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.task.ClassificationExporter", + return_value=MockExporter(self.mc_cls_task), + ) + mocker.patch( + "otx.algorithms.classification.task.embed_ir_model_data", + return_value=True, + ) + + self.mc_cls_task.export(ExportType.OPENVINO, _model, precision, False) + + assert _model.model_format == ModelFormat.OPENVINO + assert _model.optimization_type == ModelOptimizationType.MO + assert _model.precision[0] == precision + assert _model.get_data("openvino.bin") is not None + assert _model.get_data("openvino.xml") is not None + assert _model.precision == self.mc_cls_task._precision + assert _model.optimization_methods == self.mc_cls_task._optimization_methods + assert _model.get_data("label_schema.json") is not None + + @e2e_pytest_unit + def test_explain(self, mocker): + """Test explain function.""" + explain_parameters = ExplainParameters( + explainer="ClassWiseSaliencyMap", + process_saliency_maps=False, + explain_predicted_classes=True, + ) + outputs = self.mc_cls_task.explain(self.mc_cls_dataset, explain_parameters) + assert isinstance(outputs, DatasetEntity) + assert len(outputs) == 200 diff --git a/tests/unit/algorithms/detection/adapters/mmdet/test_task.py b/tests/unit/algorithms/detection/adapters/mmdet/test_task.py index db35bda88aa..295e1c91834 100644 --- a/tests/unit/algorithms/detection/adapters/mmdet/test_task.py +++ b/tests/unit/algorithms/detection/adapters/mmdet/test_task.py @@ -133,8 +133,8 @@ def run(self, *args, **kwargs): } -class TestMMActionTask: - """Test class for MMActionTask. +class TestMMDetectionTask: + """Test class for MMDetectionTask. Details are explained in each test function. """