From b61c97677b29a90bbbc71288c01339e6704e4b01 Mon Sep 17 00:00:00 2001 From: Yunchu Lee Date: Wed, 30 Aug 2023 17:17:47 +0900 Subject: [PATCH] Mergeback 1.4.2rc2 (#2462) * Fix label list order for h-label classification (#2440) * Fix label list for h-label cls * Fix unit tests * Modified fq numbers for lite HRNET (#2445) modified fq numbers for lite HRNET * Update PTQ ignored scope for hrnet 18 mod2 (#2449) Update ptq ignored scope for hrnet 18 mod2 * Fix OpenVINO inference for legacy models (#2450) * bug fix for legacy openvino models * Add tests * Specific exceptions --------- * Update for 1.4.2rc2 (#2455) update for release 1.4.2rc2 * Prevent zero-sized saliency map in tiling if tile size is too big (#2452) * Prevent zero-sized saliency map in tiling if tile size is too big * Prevent zero-sized saliency in tiling (PyTorch) * Add unit tests for Tiler merge features methods --------- Co-authored-by: Galina * Update pot fq reference number (#2456) update pot fq reference number to 15 * add missing log for the 1.4.2 * resolve precommit issue --------- Co-authored-by: Galina Zalesskaya Co-authored-by: Vladislav Sovrasov Co-authored-by: Ashwin Vaidya --- CHANGELOG.md | 12 ++-- src/otx/algorithms/anomaly/tasks/openvino.py | 42 ++++++++++-- .../classification/adapters/openvino/task.py | 19 ++++-- src/otx/algorithms/classification/task.py | 13 +++- .../classification/utils/__init__.py | 2 + .../classification/utils/cls_utils.py | 21 ++++++ .../adapters/mmdet/datasets/tiling.py | 2 +- .../ptq_optimization_config.py | 3 +- src/otx/cli/utils/io.py | 4 ++ .../compressed_model.yml | 4 +- .../compressed_model.yml | 2 +- .../compressed_model.yml | 2 +- .../algorithms/anomaly/tasks/test_openvino.py | 66 ++++++++++++++++++- .../test_classification_openvino_task.py | 1 + .../tiling/test_tiling_tile_classifier.py | 17 ++++- 15 files changed, 186 insertions(+), 24 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cc41cc1c634..4337220e275 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,13 +19,17 @@ All notable changes to this project will be documented in this file. - Adapt timeout value of initialization for distributed training () - Optimize data loading by merging load & resize operations w/ caching support for cls/det/iseg/sseg (, , ) -### Bug fixes +## \[v1.4.2\] -- Fix F1 auto-threshold to choose best largest confidence () +### Enhancements -### Known issues +- Add model category attributes to model template () -- OpenVINO(==2023.0) IR inference is not working well on 2-stage models (e.g. Mask-RCNN) exported from torch==1.13.1 +### Bug fixes + +- Add workaround for the incorrect meta info M-RCNN (used for XAI) () +- Fix label list order for h-label classification () +- Modified fq numbers for lite HRNET e2e tests () ## \[v1.4.1\] diff --git a/src/otx/algorithms/anomaly/tasks/openvino.py b/src/otx/algorithms/anomaly/tasks/openvino.py index cc65ef74294..7859cfbfb36 100644 --- a/src/otx/algorithms/anomaly/tasks/openvino.py +++ b/src/otx/algorithms/anomaly/tasks/openvino.py @@ -26,6 +26,7 @@ import numpy as np import openvino.runtime as ov from addict import Dict as ADDict +from anomalib.data.utils.transform import get_transforms from anomalib.deploy import OpenVINOInferencer from nncf.common.quantization.structs import QuantizationPreset from omegaconf import OmegaConf @@ -216,16 +217,47 @@ def get_metadata(self) -> Dict: """Get Meta Data.""" metadata = {} if self.task_environment.model is not None: - metadata = json.loads(self.task_environment.model.get_data("metadata").decode()) - metadata["image_threshold"] = np.array(metadata["image_threshold"], dtype=np.float32).item() - metadata["pixel_threshold"] = np.array(metadata["pixel_threshold"], dtype=np.float32).item() - metadata["min"] = np.array(metadata["min"], dtype=np.float32).item() - metadata["max"] = np.array(metadata["max"], dtype=np.float32).item() + try: + metadata = json.loads(self.task_environment.model.get_data("metadata").decode()) + self._populate_metadata(metadata) + logger.info("Metadata loaded from model v1.4.") + except (KeyError, json.decoder.JSONDecodeError): + # model is from version 1.2.x + metadata = self._populate_metadata_legacy(self.task_environment.model) + logger.info("Metadata loaded from model v1.2.x.") else: raise ValueError("Cannot access meta-data. self.task_environment.model is empty.") return metadata + def _populate_metadata_legacy(self, model: ModelEntity) -> Dict[str, Any]: + """Populates metadata for models for version 1.2.x.""" + image_threshold = np.frombuffer(model.get_data("image_threshold"), dtype=np.float32) + pixel_threshold = np.frombuffer(model.get_data("pixel_threshold"), dtype=np.float32) + min_value = np.frombuffer(model.get_data("min"), dtype=np.float32) + max_value = np.frombuffer(model.get_data("max"), dtype=np.float32) + transform = get_transforms( + config=self.config.dataset.transform_config.train, + image_size=tuple(self.config.dataset.image_size), + to_tensor=True, + ) + metadata = { + "transform": transform.to_dict(), + "image_threshold": image_threshold, + "pixel_threshold": pixel_threshold, + "min": min_value, + "max": max_value, + "task": str(self.task_type).lower().split("_")[-1], + } + return metadata + + def _populate_metadata(self, metadata: Dict[str, Any]): + """Populates metadata for models from version 1.4 onwards.""" + metadata["image_threshold"] = np.array(metadata["image_threshold"], dtype=np.float32).item() + metadata["pixel_threshold"] = np.array(metadata["pixel_threshold"], dtype=np.float32).item() + metadata["min"] = np.array(metadata["min"], dtype=np.float32).item() + metadata["max"] = np.array(metadata["max"], dtype=np.float32).item() + def evaluate(self, output_resultset: ResultSetEntity, evaluation_metric: Optional[str] = None): """Evaluate the performance of the model. diff --git a/src/otx/algorithms/classification/adapters/openvino/task.py b/src/otx/algorithms/classification/adapters/openvino/task.py index 9d1ddd1ea30..66a4fa51c44 100644 --- a/src/otx/algorithms/classification/adapters/openvino/task.py +++ b/src/otx/algorithms/classification/adapters/openvino/task.py @@ -36,6 +36,7 @@ from otx.algorithms.classification.configs import ClassificationConfig from otx.algorithms.classification.utils import ( get_cls_deploy_config, + get_hierarchical_label_list, ) from otx.algorithms.common.utils import OTXOpenVinoDataLoader from otx.algorithms.common.utils.ir import check_if_quantized @@ -216,14 +217,18 @@ def add_prediction(id: int, predicted_scene: AnnotationSceneEntity, aux_data: tu if saliency_map.ndim > 1 and repr_vector.ndim > 0: feature_vec_media = TensorEntity(name="representation_vector", numpy=repr_vector.reshape(-1)) dataset_item.append_metadata_item(feature_vec_media, model=self.model) - if saliency_map.ndim == 4 and saliency_map.shape[0] == 1: - saliency_map = saliency_map.squeeze() + label_list = self.task_environment.get_labels() + # Fix the order for hierarchical labels to adjust classes with model outputs + if self.inferencer.model.hierarchical: + label_list = get_hierarchical_label_list( + self.inferencer.model.hierarchical_info["cls_heads_info"], label_list + ) add_saliency_maps_to_dataset_item( dataset_item=dataset_item, saliency_map=saliency_map, model=self.model, - labels=self.task_environment.get_labels(), + labels=label_list, predicted_scored_labels=item_labels, explain_predicted_classes=explain_predicted_classes, process_saliency_maps=process_saliency_maps, @@ -272,6 +277,12 @@ def explain( explain_predicted_classes = explain_parameters.explain_predicted_classes dataset_size = len(dataset) + label_list = self.task_environment.get_labels() + # Fix the order for hierarchical labels to adjust classes with model outputs + if self.inferencer.model.hierarchical: + label_list = get_hierarchical_label_list( + self.inferencer.model.hierarchical_info["cls_heads_info"], label_list + ) for i, dataset_item in enumerate(dataset, 1): cls_result, predicted_scene = self.inferencer.predict(dataset_item.numpy) @@ -292,7 +303,7 @@ def explain( dataset_item=dataset_item, saliency_map=saliency_map, model=self.model, - labels=self.task_environment.get_labels(), + labels=label_list, predicted_scored_labels=item_labels, explain_predicted_classes=explain_predicted_classes, process_saliency_maps=process_saliency_maps, diff --git a/src/otx/algorithms/classification/task.py b/src/otx/algorithms/classification/task.py index 9f520c62cbb..03da102188f 100644 --- a/src/otx/algorithms/classification/task.py +++ b/src/otx/algorithms/classification/task.py @@ -28,6 +28,7 @@ get_cls_deploy_config, get_cls_inferencer_configuration, get_cls_model_api_configuration, + get_hierarchical_label_list, ) from otx.algorithms.classification.utils import ( get_multihead_class_info as get_hierarchical_info, @@ -350,6 +351,10 @@ def _add_predictions_to_dataset( dataset_size = len(dataset) pos_thr = 0.5 + label_list = self._labels + # Fix the order for hierarchical labels to adjust classes with model outputs + if self._hierarchical: + label_list = get_hierarchical_label_list(self._hierarchical_info, label_list) for i, (dataset_item, prediction_items) in enumerate(zip(dataset, prediction_results)): prediction_item, feature_vector, saliency_map = prediction_items if any(np.isnan(prediction_item)): @@ -378,7 +383,7 @@ def _add_predictions_to_dataset( dataset_item=dataset_item, saliency_map=saliency_map, model=self._task_environment.model, - labels=self._labels, + labels=label_list, predicted_scored_labels=item_labels, explain_predicted_classes=explain_predicted_classes, process_saliency_maps=process_saliency_maps, @@ -440,13 +445,17 @@ def _add_explanations_to_dataset( ): """Loop over dataset again and assign saliency maps.""" dataset_size = len(dataset) + label_list = self._labels + # Fix the order for hierarchical labels to adjust classes with model outputs + if self._hierarchical: + label_list = get_hierarchical_label_list(self._hierarchical_info, label_list) for i, (dataset_item, prediction_item, saliency_map) in enumerate(zip(dataset, predictions, saliency_maps)): item_labels = self._get_item_labels(prediction_item, pos_thr=0.5) add_saliency_maps_to_dataset_item( dataset_item=dataset_item, saliency_map=saliency_map, model=self._task_environment.model, - labels=self._labels, + labels=label_list, predicted_scored_labels=item_labels, explain_predicted_classes=explain_predicted_classes, process_saliency_maps=process_saliency_maps, diff --git a/src/otx/algorithms/classification/utils/__init__.py b/src/otx/algorithms/classification/utils/__init__.py index 533b871de17..536cd56bff7 100644 --- a/src/otx/algorithms/classification/utils/__init__.py +++ b/src/otx/algorithms/classification/utils/__init__.py @@ -8,10 +8,12 @@ get_cls_deploy_config, get_cls_inferencer_configuration, get_cls_model_api_configuration, + get_hierarchical_label_list, get_multihead_class_info, ) __all__ = [ + "get_hierarchical_label_list", "get_multihead_class_info", "get_cls_inferencer_configuration", "get_cls_deploy_config", diff --git a/src/otx/algorithms/classification/utils/cls_utils.py b/src/otx/algorithms/classification/utils/cls_utils.py index 12216ba8319..88db1ed2ecb 100644 --- a/src/otx/algorithms/classification/utils/cls_utils.py +++ b/src/otx/algorithms/classification/utils/cls_utils.py @@ -117,3 +117,24 @@ def get_cls_model_api_configuration(label_schema: LabelSchemaEntity, inference_c mapi_config[("model_info", "hierarchical_config")] = json.dumps(hierarchical_config) return mapi_config + + +def get_hierarchical_label_list(hierarchical_info, labels): + """Return hierarchical labels list which is adjusted to model outputs classes.""" + hierarchical_labels = [] + for head_idx in range(hierarchical_info["num_multiclass_heads"]): + logits_begin, logits_end = hierarchical_info["head_idx_to_logits_range"][str(head_idx)] + for logit in range(0, logits_end - logits_begin): + label_str = hierarchical_info["all_groups"][head_idx][logit] + label_idx = hierarchical_info["label_to_idx"][label_str] + hierarchical_labels.append(labels[label_idx]) + + if hierarchical_info["num_multilabel_classes"]: + logits_begin = hierarchical_info["num_single_label_classes"] + logits_end = len(labels) + for logit_idx, logit in enumerate(range(0, logits_end - logits_begin)): + label_str_idx = hierarchical_info["num_multiclass_heads"] + logit_idx + label_str = hierarchical_info["all_groups"][label_str_idx][0] + label_idx = hierarchical_info["label_to_idx"][label_str] + hierarchical_labels.append(labels[label_idx]) + return hierarchical_labels diff --git a/src/otx/algorithms/detection/adapters/mmdet/datasets/tiling.py b/src/otx/algorithms/detection/adapters/mmdet/datasets/tiling.py index b02a548ca5f..79e0bfd5b53 100644 --- a/src/otx/algorithms/detection/adapters/mmdet/datasets/tiling.py +++ b/src/otx/algorithms/detection/adapters/mmdet/datasets/tiling.py @@ -538,8 +538,8 @@ def merge_maps(self, saliency_maps: Union[List[List[np.ndarray]], List[np.ndarra for orig_image in self.cached_results: img_idx = orig_image["index"] - ratios[img_idx] = np.array([feat_h, feat_w]) / self.tile_size image_h, image_w = orig_image["height"], orig_image["width"] + ratios[img_idx] = np.array([feat_h / min(self.tile_size, image_h), feat_w / min(self.tile_size, image_w)]) image_map_h = int(image_h * ratios[img_idx][0]) image_map_w = int(image_w * ratios[img_idx][1]) diff --git a/src/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/ptq_optimization_config.py b/src/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/ptq_optimization_config.py index 1a2f9a8e589..4e5ce69c89c 100644 --- a/src/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/ptq_optimization_config.py +++ b/src/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/ptq_optimization_config.py @@ -23,6 +23,7 @@ preset = QuantizationPreset.MIXED ignored_scope = IgnoredScope( + patterns=["/backbone/*"], names=[ "/backbone/stage0/stage0.0/layers/layers.0/cross_resolution_weighting/Mul", "/backbone/stage0/stage0.0/layers/layers.0/cross_resolution_weighting/Mul_1", @@ -102,5 +103,5 @@ "/aggregator/Add_1", "/aggregator/Add_2", "/backbone/stage2/stage2.1/Add", - ] + ], ) diff --git a/src/otx/cli/utils/io.py b/src/otx/cli/utils/io.py index 73941eb1a6c..3770fb279bf 100644 --- a/src/otx/cli/utils/io.py +++ b/src/otx/cli/utils/io.py @@ -51,6 +51,10 @@ "visual_prompting_image_encoder.bin", "visual_prompting_decoder.xml", "visual_prompting_decoder.bin", + "image_threshold", # NOTE: used for compatibility with with OTX 1.2.x. Remove when all Geti projects are upgraded. + "pixel_threshold", # NOTE: used for compatibility with with OTX 1.2.x. Remove when all Geti projects are upgraded. + "min", # NOTE: used for compatibility with with OTX 1.2.x. Remove when all Geti projects are upgraded. + "max", # NOTE: used for compatibility with with OTX 1.2.x. Remove when all Geti projects are upgraded. ) diff --git a/tests/e2e/cli/semantic_segmentation/reference/Custom_Semantic_Segmentation_Lite-HRNet-18-mod2_OCR/compressed_model.yml b/tests/e2e/cli/semantic_segmentation/reference/Custom_Semantic_Segmentation_Lite-HRNet-18-mod2_OCR/compressed_model.yml index fc0c3d64aac..aa6e9acdd15 100644 --- a/tests/e2e/cli/semantic_segmentation/reference/Custom_Semantic_Segmentation_Lite-HRNet-18-mod2_OCR/compressed_model.yml +++ b/tests/e2e/cli/semantic_segmentation/reference/Custom_Semantic_Segmentation_Lite-HRNet-18-mod2_OCR/compressed_model.yml @@ -1,5 +1,5 @@ TestToolsMPASegmentation: nncf: number_of_fakequantizers: 586 - ptq: - number_of_fakequantizers: 494 + pot: + number_of_fakequantizers: 15 diff --git a/tests/e2e/cli/semantic_segmentation/reference/Custom_Semantic_Segmentation_Lite-HRNet-s-mod2_OCR/compressed_model.yml b/tests/e2e/cli/semantic_segmentation/reference/Custom_Semantic_Segmentation_Lite-HRNet-s-mod2_OCR/compressed_model.yml index 1ffae1d8e0c..99a7b525c57 100644 --- a/tests/e2e/cli/semantic_segmentation/reference/Custom_Semantic_Segmentation_Lite-HRNet-s-mod2_OCR/compressed_model.yml +++ b/tests/e2e/cli/semantic_segmentation/reference/Custom_Semantic_Segmentation_Lite-HRNet-s-mod2_OCR/compressed_model.yml @@ -1,5 +1,5 @@ TestToolsMPASegmentation: nncf: number_of_fakequantizers: 436 - ptq: + pot: number_of_fakequantizers: 368 diff --git a/tests/e2e/cli/semantic_segmentation/reference/Custom_Semantic_Segmentation_Lite-HRNet-x-mod3_OCR/compressed_model.yml b/tests/e2e/cli/semantic_segmentation/reference/Custom_Semantic_Segmentation_Lite-HRNet-x-mod3_OCR/compressed_model.yml index a95149f79ea..f3d5c0f32b2 100644 --- a/tests/e2e/cli/semantic_segmentation/reference/Custom_Semantic_Segmentation_Lite-HRNet-x-mod3_OCR/compressed_model.yml +++ b/tests/e2e/cli/semantic_segmentation/reference/Custom_Semantic_Segmentation_Lite-HRNet-x-mod3_OCR/compressed_model.yml @@ -1,5 +1,5 @@ TestToolsMPASegmentation: nncf: number_of_fakequantizers: 1138 - ptq: + pot: number_of_fakequantizers: 942 diff --git a/tests/unit/algorithms/anomaly/tasks/test_openvino.py b/tests/unit/algorithms/anomaly/tasks/test_openvino.py index 58b6b2a8450..82d1174bb97 100644 --- a/tests/unit/algorithms/anomaly/tasks/test_openvino.py +++ b/tests/unit/algorithms/anomaly/tasks/test_openvino.py @@ -3,27 +3,40 @@ # Copyright (C) 2021-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import pytest +import json from copy import deepcopy +from pathlib import Path +from tempfile import TemporaryDirectory +from unittest.mock import MagicMock, patch import numpy as np +import pytest from otx.algorithms.anomaly.tasks.openvino import OpenVINOTask from otx.algorithms.anomaly.tasks.train import TrainingTask from otx.api.entities.datasets import DatasetEntity from otx.api.entities.inference_parameters import InferenceParameters +from otx.api.entities.label import Domain, LabelEntity +from otx.api.entities.label_schema import LabelSchemaEntity from otx.api.entities.model import ModelEntity, ModelOptimizationType from otx.api.entities.model_template import TaskType from otx.api.entities.optimization_parameters import OptimizationParameters from otx.api.entities.resultset import ResultSetEntity from otx.api.entities.subset import Subset +from otx.api.entities.task_environment import TaskEnvironment from otx.api.usecases.tasks.interfaces.export_interface import ExportType from otx.api.usecases.tasks.interfaces.optimization_interface import OptimizationType +from otx.cli.utils.io import read_model class TestOpenVINOTask: """Tests methods in the OpenVINO task.""" + @pytest.fixture + def tmp_dir(self): + with TemporaryDirectory() as tmp_dir: + yield tmp_dir + def set_normalization_params(self, output_model: ModelEntity): """Sets normalization parameters for an untrained output model. @@ -77,3 +90,54 @@ def test_openvino(self, tmpdir, setup_task_environment): # deploy openvino_task.deploy(output_model) assert output_model.exportable_code is not None + + @patch.multiple(OpenVINOTask, get_config=MagicMock(), load_inferencer=MagicMock()) + @patch("otx.algorithms.anomaly.tasks.openvino.get_transforms", MagicMock()) + def test_anomaly_legacy_keys(self, mocker, tmp_dir): + """Checks whether the model is loaded correctly with legacy and current keys.""" + + tmp_dir = Path(tmp_dir) + xml_model_path = tmp_dir / "model.xml" + xml_model_path.write_text("xml_model") + bin_model_path = tmp_dir / "model.bin" + bin_model_path.write_text("bin_model") + + # Test loading legacy keys + legacy_keys = ("image_threshold", "pixel_threshold", "min", "max") + for key in legacy_keys: + (tmp_dir / key).write_bytes(np.zeros(1, dtype=np.float32).tobytes()) + + model = read_model(mocker.MagicMock(), str(xml_model_path), mocker.MagicMock()) + task_environment = TaskEnvironment( + model_template=mocker.MagicMock(), + model=model, + hyper_parameters=mocker.MagicMock(), + label_schema=LabelSchemaEntity.from_labels( + [ + LabelEntity("Anomalous", is_anomalous=True, domain=Domain.ANOMALY_SEGMENTATION), + LabelEntity("Normal", domain=Domain.ANOMALY_SEGMENTATION), + ] + ), + ) + openvino_task = OpenVINOTask(task_environment) + metadata = openvino_task.get_metadata() + for key in legacy_keys: + assert metadata[key] == np.zeros(1, dtype=np.float32) + + # cleanup legacy keys + for key in legacy_keys: + (tmp_dir / key).unlink() + + # Test loading new keys + new_metadata = { + "image_threshold": np.zeros(1, dtype=np.float32).tolist(), + "pixel_threshold": np.zeros(1, dtype=np.float32).tolist(), + "min": np.zeros(1, dtype=np.float32).tolist(), + "max": np.zeros(1, dtype=np.float32).tolist(), + } + (tmp_dir / "metadata").write_bytes(json.dumps(new_metadata).encode()) + task_environment.model = read_model(mocker.MagicMock(), str(xml_model_path), mocker.MagicMock()) + openvino_task = OpenVINOTask(task_environment) + metadata = openvino_task.get_metadata() + for key in new_metadata.keys(): + assert metadata[key] == np.zeros(1, dtype=np.float32) diff --git a/tests/unit/algorithms/classification/tasks/test_classification_openvino_task.py b/tests/unit/algorithms/classification/tasks/test_classification_openvino_task.py index b0b71eb01cf..d2dc71560b2 100644 --- a/tests/unit/algorithms/classification/tasks/test_classification_openvino_task.py +++ b/tests/unit/algorithms/classification/tasks/test_classification_openvino_task.py @@ -143,6 +143,7 @@ def test_explain(self, mocker): self.fake_ann_scene, ), ) + self.cls_ov_task.inferencer.model.hierarchical = False updpated_dataset = self.cls_ov_task.explain(self.dataset) assert updpated_dataset is not None diff --git a/tests/unit/algorithms/detection/tiling/test_tiling_tile_classifier.py b/tests/unit/algorithms/detection/tiling/test_tiling_tile_classifier.py index 867dc4b1a79..b6f3ac8b955 100644 --- a/tests/unit/algorithms/detection/tiling/test_tiling_tile_classifier.py +++ b/tests/unit/algorithms/detection/tiling/test_tiling_tile_classifier.py @@ -81,9 +81,18 @@ def test_openvino_sync(self, mocker): mocked_model.return_value = mocker.MagicMock(spec=MaskRCNNModel, model_adapter=adapter_mock) params = DetectionConfig(header=self.hyper_parameters.header) ov_mask_inferencer = OpenVINOMaskInferencer(params, self.label_schema, "") - ov_mask_inferencer.model = mocked_model + original_shape = (self.dataset[0].media.width, self.dataset[0].media.height, 3) ov_mask_inferencer.model.resize_mask = False - ov_mask_inferencer.model.preprocess.return_value = ({"foo": "bar"}, {"baz": "qux"}) + ov_mask_inferencer.model.preprocess.return_value = ( + {"foo": "bar"}, + {"baz": "qux", "original_shape": original_shape}, + ) + ov_mask_inferencer.model.postprocess.return_value = ( + np.array([], dtype=np.float32), + np.array([], dtype=np.uint32), + np.zeros((0, 4), dtype=np.float32), + [], + ) ov_inferencer = OpenVINOTileClassifierWrapper( ov_mask_inferencer, tile_classifier_model_file="", tile_classifier_weight_file="", mode="sync" ) @@ -99,6 +108,10 @@ def test_openvino_sync(self, mocker): [], [np.zeros((0, 4), dtype=np.float32)], np.zeros((0, 4), dtype=np.float32) ), ) + ov_inferencer.tiler.model.infer_sync.return_value = { + "feature_vector": np.zeros((1, 5), dtype=np.float32), + "saliency_map": np.zeros((1, 1, 2, 2), dtype=np.float32), + } mocker.patch.object(OpenVINODetectionTask, "load_inferencer", return_value=ov_inferencer) ov_task = OpenVINODetectionTask(self.task_env) updated_dataset = ov_task.infer(self.dataset)