Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Mergeback 1.4.2rc2 #2464

Merged
merged 9 commits into from
Sep 4, 2023
42 changes: 37 additions & 5 deletions src/otx/algorithms/anomaly/tasks/openvino.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
import numpy as np
import openvino.runtime as ov
from addict import Dict as ADDict
from anomalib.data.utils.transform import get_transforms
from anomalib.deploy import OpenVINOInferencer
from nncf.common.quantization.structs import QuantizationPreset
from omegaconf import OmegaConf
Expand Down Expand Up @@ -216,16 +217,47 @@ def get_metadata(self) -> Dict:
"""Get Meta Data."""
metadata = {}
if self.task_environment.model is not None:
metadata = json.loads(self.task_environment.model.get_data("metadata").decode())
metadata["image_threshold"] = np.array(metadata["image_threshold"], dtype=np.float32).item()
metadata["pixel_threshold"] = np.array(metadata["pixel_threshold"], dtype=np.float32).item()
metadata["min"] = np.array(metadata["min"], dtype=np.float32).item()
metadata["max"] = np.array(metadata["max"], dtype=np.float32).item()
try:
metadata = json.loads(self.task_environment.model.get_data("metadata").decode())
self._populate_metadata(metadata)
logger.info("Metadata loaded from model v1.4.")
except (KeyError, json.decoder.JSONDecodeError):
# model is from version 1.2.x
metadata = self._populate_metadata_legacy(self.task_environment.model)
logger.info("Metadata loaded from model v1.2.x.")
else:
raise ValueError("Cannot access meta-data. self.task_environment.model is empty.")

return metadata

def _populate_metadata_legacy(self, model: ModelEntity) -> Dict[str, Any]:
"""Populates metadata for models for version 1.2.x."""
image_threshold = np.frombuffer(model.get_data("image_threshold"), dtype=np.float32)
pixel_threshold = np.frombuffer(model.get_data("pixel_threshold"), dtype=np.float32)
min_value = np.frombuffer(model.get_data("min"), dtype=np.float32)
max_value = np.frombuffer(model.get_data("max"), dtype=np.float32)
transform = get_transforms(
config=self.config.dataset.transform_config.train,
image_size=tuple(self.config.dataset.image_size),
to_tensor=True,
)
metadata = {
"transform": transform.to_dict(),
"image_threshold": image_threshold,
"pixel_threshold": pixel_threshold,
"min": min_value,
"max": max_value,
"task": str(self.task_type).lower().split("_")[-1],
}
return metadata

def _populate_metadata(self, metadata: Dict[str, Any]):
"""Populates metadata for models from version 1.4 onwards."""
metadata["image_threshold"] = np.array(metadata["image_threshold"], dtype=np.float32).item()
metadata["pixel_threshold"] = np.array(metadata["pixel_threshold"], dtype=np.float32).item()
metadata["min"] = np.array(metadata["min"], dtype=np.float32).item()
metadata["max"] = np.array(metadata["max"], dtype=np.float32).item()

def evaluate(self, output_resultset: ResultSetEntity, evaluation_metric: Optional[str] = None):
"""Evaluate the performance of the model.

Expand Down
19 changes: 15 additions & 4 deletions src/otx/algorithms/classification/adapters/openvino/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
from otx.algorithms.classification.configs import ClassificationConfig
from otx.algorithms.classification.utils import (
get_cls_deploy_config,
get_hierarchical_label_list,
)
from otx.algorithms.common.utils import OTXOpenVinoDataLoader
from otx.algorithms.common.utils.ir import check_if_quantized
Expand Down Expand Up @@ -216,14 +217,18 @@ def add_prediction(id: int, predicted_scene: AnnotationSceneEntity, aux_data: tu
if saliency_map.ndim > 1 and repr_vector.ndim > 0:
feature_vec_media = TensorEntity(name="representation_vector", numpy=repr_vector.reshape(-1))
dataset_item.append_metadata_item(feature_vec_media, model=self.model)
if saliency_map.ndim == 4 and saliency_map.shape[0] == 1:
saliency_map = saliency_map.squeeze()
label_list = self.task_environment.get_labels()
# Fix the order for hierarchical labels to adjust classes with model outputs
if self.inferencer.model.hierarchical:
label_list = get_hierarchical_label_list(
self.inferencer.model.hierarchical_info["cls_heads_info"], label_list
)

add_saliency_maps_to_dataset_item(
dataset_item=dataset_item,
saliency_map=saliency_map,
model=self.model,
labels=self.task_environment.get_labels(),
labels=label_list,
predicted_scored_labels=item_labels,
explain_predicted_classes=explain_predicted_classes,
process_saliency_maps=process_saliency_maps,
Expand Down Expand Up @@ -272,6 +277,12 @@ def explain(
explain_predicted_classes = explain_parameters.explain_predicted_classes

dataset_size = len(dataset)
label_list = self.task_environment.get_labels()
# Fix the order for hierarchical labels to adjust classes with model outputs
if self.inferencer.model.hierarchical:
label_list = get_hierarchical_label_list(
self.inferencer.model.hierarchical_info["cls_heads_info"], label_list
)
for i, dataset_item in enumerate(dataset, 1):
cls_result, predicted_scene = self.inferencer.predict(dataset_item.numpy)

Expand All @@ -292,7 +303,7 @@ def explain(
dataset_item=dataset_item,
saliency_map=saliency_map,
model=self.model,
labels=self.task_environment.get_labels(),
labels=label_list,
predicted_scored_labels=item_labels,
explain_predicted_classes=explain_predicted_classes,
process_saliency_maps=process_saliency_maps,
Expand Down
13 changes: 11 additions & 2 deletions src/otx/algorithms/classification/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
get_cls_deploy_config,
get_cls_inferencer_configuration,
get_cls_model_api_configuration,
get_hierarchical_label_list,
)
from otx.algorithms.classification.utils import (
get_multihead_class_info as get_hierarchical_info,
Expand Down Expand Up @@ -350,6 +351,10 @@ def _add_predictions_to_dataset(

dataset_size = len(dataset)
pos_thr = 0.5
label_list = self._labels
# Fix the order for hierarchical labels to adjust classes with model outputs
if self._hierarchical:
label_list = get_hierarchical_label_list(self._hierarchical_info, label_list)
for i, (dataset_item, prediction_items) in enumerate(zip(dataset, prediction_results)):
prediction_item, feature_vector, saliency_map = prediction_items
if any(np.isnan(prediction_item)):
Expand Down Expand Up @@ -378,7 +383,7 @@ def _add_predictions_to_dataset(
dataset_item=dataset_item,
saliency_map=saliency_map,
model=self._task_environment.model,
labels=self._labels,
labels=label_list,
predicted_scored_labels=item_labels,
explain_predicted_classes=explain_predicted_classes,
process_saliency_maps=process_saliency_maps,
Expand Down Expand Up @@ -440,13 +445,17 @@ def _add_explanations_to_dataset(
):
"""Loop over dataset again and assign saliency maps."""
dataset_size = len(dataset)
label_list = self._labels
# Fix the order for hierarchical labels to adjust classes with model outputs
if self._hierarchical:
label_list = get_hierarchical_label_list(self._hierarchical_info, label_list)
for i, (dataset_item, prediction_item, saliency_map) in enumerate(zip(dataset, predictions, saliency_maps)):
item_labels = self._get_item_labels(prediction_item, pos_thr=0.5)
add_saliency_maps_to_dataset_item(
dataset_item=dataset_item,
saliency_map=saliency_map,
model=self._task_environment.model,
labels=self._labels,
labels=label_list,
predicted_scored_labels=item_labels,
explain_predicted_classes=explain_predicted_classes,
process_saliency_maps=process_saliency_maps,
Expand Down
2 changes: 2 additions & 0 deletions src/otx/algorithms/classification/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,12 @@
get_cls_deploy_config,
get_cls_inferencer_configuration,
get_cls_model_api_configuration,
get_hierarchical_label_list,
get_multihead_class_info,
)

__all__ = [
"get_hierarchical_label_list",
"get_multihead_class_info",
"get_cls_inferencer_configuration",
"get_cls_deploy_config",
Expand Down
21 changes: 21 additions & 0 deletions src/otx/algorithms/classification/utils/cls_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,3 +117,24 @@ def get_cls_model_api_configuration(label_schema: LabelSchemaEntity, inference_c

mapi_config[("model_info", "hierarchical_config")] = json.dumps(hierarchical_config)
return mapi_config


def get_hierarchical_label_list(hierarchical_info, labels):
"""Return hierarchical labels list which is adjusted to model outputs classes."""
hierarchical_labels = []
for head_idx in range(hierarchical_info["num_multiclass_heads"]):
logits_begin, logits_end = hierarchical_info["head_idx_to_logits_range"][str(head_idx)]
for logit in range(0, logits_end - logits_begin):
label_str = hierarchical_info["all_groups"][head_idx][logit]
label_idx = hierarchical_info["label_to_idx"][label_str]
hierarchical_labels.append(labels[label_idx])

if hierarchical_info["num_multilabel_classes"]:
logits_begin = hierarchical_info["num_single_label_classes"]
logits_end = len(labels)
for logit_idx, logit in enumerate(range(0, logits_end - logits_begin)):
label_str_idx = hierarchical_info["num_multiclass_heads"] + logit_idx
label_str = hierarchical_info["all_groups"][label_str_idx][0]
label_idx = hierarchical_info["label_to_idx"][label_str]
hierarchical_labels.append(labels[label_idx])
return hierarchical_labels
Original file line number Diff line number Diff line change
Expand Up @@ -538,8 +538,8 @@ def merge_maps(self, saliency_maps: Union[List[List[np.ndarray]], List[np.ndarra

for orig_image in self.cached_results:
img_idx = orig_image["index"]
ratios[img_idx] = np.array([feat_h, feat_w]) / self.tile_size
image_h, image_w = orig_image["height"], orig_image["width"]
ratios[img_idx] = np.array([feat_h / min(self.tile_size, image_h), feat_w / min(self.tile_size, image_w)])

image_map_h = int(image_h * ratios[img_idx][0])
image_map_w = int(image_w * ratios[img_idx][1])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
preset = QuantizationPreset.MIXED

ignored_scope = IgnoredScope(
patterns=["/backbone/*"],
names=[
"/backbone/stage0/stage0.0/layers/layers.0/cross_resolution_weighting/Mul",
"/backbone/stage0/stage0.0/layers/layers.0/cross_resolution_weighting/Mul_1",
Expand Down Expand Up @@ -102,5 +103,5 @@
"/aggregator/Add_1",
"/aggregator/Add_2",
"/backbone/stage2/stage2.1/Add",
]
],
)
4 changes: 4 additions & 0 deletions src/otx/cli/utils/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,10 @@
"visual_prompting_image_encoder.bin",
"visual_prompting_decoder.xml",
"visual_prompting_decoder.bin",
"image_threshold", # NOTE: used for compatibility with with OTX 1.2.x. Remove when all Geti projects are upgraded.
"pixel_threshold", # NOTE: used for compatibility with with OTX 1.2.x. Remove when all Geti projects are upgraded.
"min", # NOTE: used for compatibility with with OTX 1.2.x. Remove when all Geti projects are upgraded.
"max", # NOTE: used for compatibility with with OTX 1.2.x. Remove when all Geti projects are upgraded.
)


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@ TestToolsMPASegmentation:
nncf:
number_of_fakequantizers: 586
ptq:
number_of_fakequantizers: 494
number_of_fakequantizers: 15
66 changes: 65 additions & 1 deletion tests/unit/algorithms/anomaly/tasks/test_openvino.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,27 +3,40 @@
# Copyright (C) 2021-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import pytest
import json
from copy import deepcopy
from pathlib import Path
from tempfile import TemporaryDirectory
from unittest.mock import MagicMock, patch

import numpy as np
import pytest

from otx.algorithms.anomaly.tasks.openvino import OpenVINOTask
from otx.algorithms.anomaly.tasks.train import TrainingTask
from otx.api.entities.datasets import DatasetEntity
from otx.api.entities.inference_parameters import InferenceParameters
from otx.api.entities.label import Domain, LabelEntity
from otx.api.entities.label_schema import LabelSchemaEntity
from otx.api.entities.model import ModelEntity, ModelOptimizationType
from otx.api.entities.model_template import TaskType
from otx.api.entities.optimization_parameters import OptimizationParameters
from otx.api.entities.resultset import ResultSetEntity
from otx.api.entities.subset import Subset
from otx.api.entities.task_environment import TaskEnvironment
from otx.api.usecases.tasks.interfaces.export_interface import ExportType
from otx.api.usecases.tasks.interfaces.optimization_interface import OptimizationType
from otx.cli.utils.io import read_model


class TestOpenVINOTask:
"""Tests methods in the OpenVINO task."""

@pytest.fixture
def tmp_dir(self):
with TemporaryDirectory() as tmp_dir:
yield tmp_dir

def set_normalization_params(self, output_model: ModelEntity):
"""Sets normalization parameters for an untrained output model.

Expand Down Expand Up @@ -77,3 +90,54 @@ def test_openvino(self, tmpdir, setup_task_environment):
# deploy
openvino_task.deploy(output_model)
assert output_model.exportable_code is not None

@patch.multiple(OpenVINOTask, get_config=MagicMock(), load_inferencer=MagicMock())
@patch("otx.algorithms.anomaly.tasks.openvino.get_transforms", MagicMock())
def test_anomaly_legacy_keys(self, mocker, tmp_dir):
"""Checks whether the model is loaded correctly with legacy and current keys."""

tmp_dir = Path(tmp_dir)
xml_model_path = tmp_dir / "model.xml"
xml_model_path.write_text("xml_model")
bin_model_path = tmp_dir / "model.bin"
bin_model_path.write_text("bin_model")

# Test loading legacy keys
legacy_keys = ("image_threshold", "pixel_threshold", "min", "max")
for key in legacy_keys:
(tmp_dir / key).write_bytes(np.zeros(1, dtype=np.float32).tobytes())

model = read_model(mocker.MagicMock(), str(xml_model_path), mocker.MagicMock())
task_environment = TaskEnvironment(
model_template=mocker.MagicMock(),
model=model,
hyper_parameters=mocker.MagicMock(),
label_schema=LabelSchemaEntity.from_labels(
[
LabelEntity("Anomalous", is_anomalous=True, domain=Domain.ANOMALY_SEGMENTATION),
LabelEntity("Normal", domain=Domain.ANOMALY_SEGMENTATION),
]
),
)
openvino_task = OpenVINOTask(task_environment)
metadata = openvino_task.get_metadata()
for key in legacy_keys:
assert metadata[key] == np.zeros(1, dtype=np.float32)

# cleanup legacy keys
for key in legacy_keys:
(tmp_dir / key).unlink()

# Test loading new keys
new_metadata = {
"image_threshold": np.zeros(1, dtype=np.float32).tolist(),
"pixel_threshold": np.zeros(1, dtype=np.float32).tolist(),
"min": np.zeros(1, dtype=np.float32).tolist(),
"max": np.zeros(1, dtype=np.float32).tolist(),
}
(tmp_dir / "metadata").write_bytes(json.dumps(new_metadata).encode())
task_environment.model = read_model(mocker.MagicMock(), str(xml_model_path), mocker.MagicMock())
openvino_task = OpenVINOTask(task_environment)
metadata = openvino_task.get_metadata()
for key in new_metadata.keys():
assert metadata[key] == np.zeros(1, dtype=np.float32)
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,7 @@ def test_explain(self, mocker):
self.fake_ann_scene,
),
)
self.cls_ov_task.inferencer.model.hierarchical = False
updpated_dataset = self.cls_ov_task.explain(self.dataset)

assert updpated_dataset is not None
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,9 +81,18 @@ def test_openvino_sync(self, mocker):
mocked_model.return_value = mocker.MagicMock(spec=MaskRCNNModel, model_adapter=adapter_mock)
params = DetectionConfig(header=self.hyper_parameters.header)
ov_mask_inferencer = OpenVINOMaskInferencer(params, self.label_schema, "")
ov_mask_inferencer.model = mocked_model
original_shape = (self.dataset[0].media.width, self.dataset[0].media.height, 3)
ov_mask_inferencer.model.resize_mask = False
ov_mask_inferencer.model.preprocess.return_value = ({"foo": "bar"}, {"baz": "qux"})
ov_mask_inferencer.model.preprocess.return_value = (
{"foo": "bar"},
{"baz": "qux", "original_shape": original_shape},
)
ov_mask_inferencer.model.postprocess.return_value = (
np.array([], dtype=np.float32),
np.array([], dtype=np.uint32),
np.zeros((0, 4), dtype=np.float32),
[],
)
ov_inferencer = OpenVINOTileClassifierWrapper(
ov_mask_inferencer, tile_classifier_model_file="", tile_classifier_weight_file="", mode="sync"
)
Expand All @@ -99,6 +108,10 @@ def test_openvino_sync(self, mocker):
[], [np.zeros((0, 4), dtype=np.float32)], np.zeros((0, 4), dtype=np.float32)
),
)
ov_inferencer.tiler.model.infer_sync.return_value = {
"feature_vector": np.zeros((1, 5), dtype=np.float32),
"saliency_map": np.zeros((1, 1, 2, 2), dtype=np.float32),
}
mocker.patch.object(OpenVINODetectionTask, "load_inferencer", return_value=ov_inferencer)
ov_task = OpenVINODetectionTask(self.task_env)
updated_dataset = ov_task.infer(self.dataset)
Expand Down