From b5b9a91327497283c25d347b4019f9f2bd007cdb Mon Sep 17 00:00:00 2001 From: Igor Davidyuk Date: Tue, 20 Feb 2024 13:45:17 +0100 Subject: [PATCH 01/22] introduce a postprocessing package also group explainability actions together Signed-off-by: Igor Davidyuk --- geti_sdk/deployment/deployment.py | 12 ++---- .../predictions_postprocessing/__init__.py | 21 ++++++++++ .../postprocessing.py | 42 +++++++++++++++++++ 3 files changed, 67 insertions(+), 8 deletions(-) create mode 100644 geti_sdk/deployment/predictions_postprocessing/__init__.py create mode 100644 geti_sdk/deployment/predictions_postprocessing/postprocessing.py diff --git a/geti_sdk/deployment/deployment.py b/geti_sdk/deployment/deployment.py index c5665465..a83e2a30 100644 --- a/geti_sdk/deployment/deployment.py +++ b/geti_sdk/deployment/deployment.py @@ -21,7 +21,6 @@ import attr import numpy as np import otx -from otx.api.utils.detection_utils import detection2array from geti_sdk.data_models import ( Annotation, @@ -38,6 +37,7 @@ from geti_sdk.deployment.legacy_converters import ( AnomalyClassificationToAnnotationConverter, ) +from geti_sdk.deployment.predictions_postprocessing import detection2array from geti_sdk.rest_converters import ProjectRESTConverter from .deployed_model import DeployedModel @@ -285,13 +285,6 @@ def _infer_task( inference_results = model.infer(preprocessed_image) postprocessing_results = model.postprocess(inference_results, metadata=metadata) - # Optional output related to explainability - saliency_map: Optional[np.ndarray] = None - repr_vector: Optional[np.ndarray] = None - if explain: - saliency_map, repr_vector = model.postprocess_explain_outputs( - inference_results=inference_results, metadata=metadata - ) converter = self._inference_converters[task.title] width: int = image.shape[1] @@ -375,6 +368,9 @@ def _infer_task( # Add optional explainability outputs if explain: + saliency_map, repr_vector = model.postprocess_explain_outputs( + inference_results=inference_results, metadata=metadata + ) prediction.feature_vector = repr_vector result_medium = ResultMedium(name="saliency map", type="saliency map") result_medium.data = saliency_map diff --git a/geti_sdk/deployment/predictions_postprocessing/__init__.py b/geti_sdk/deployment/predictions_postprocessing/__init__.py new file mode 100644 index 00000000..09552458 --- /dev/null +++ b/geti_sdk/deployment/predictions_postprocessing/__init__.py @@ -0,0 +1,21 @@ +# Copyright (C) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. + +""" +Predictions postprocessing module. +""" + +from .postprocessing import detection2array + +__all__ = ["detection2array"] diff --git a/geti_sdk/deployment/predictions_postprocessing/postprocessing.py b/geti_sdk/deployment/predictions_postprocessing/postprocessing.py new file mode 100644 index 00000000..b74986af --- /dev/null +++ b/geti_sdk/deployment/predictions_postprocessing/postprocessing.py @@ -0,0 +1,42 @@ +# Copyright (C) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. + +from typing import List + +import numpy as np + + +def detection2array(detections: List) -> np.ndarray: + """ + Convert list of OpenVINO Detection to a numpy array. + + :param detections: List of OpenVINO Detection containing score, id, xmin, ymin, xmax, ymax + + :return: np.ndarray: numpy array with [label, confidence, x1, y1, x2, y2] + """ + scores = np.empty((0, 1), dtype=np.float32) + labels = np.empty((0, 1), dtype=np.uint32) + boxes = np.empty((0, 4), dtype=np.float32) + for det in detections: + if (det.xmax - det.xmin) * (det.ymax - det.ymin) < 1.0: + continue + scores = np.append(scores, [[det.score]], axis=0) + labels = np.append(labels, [[det.id]], axis=0) + boxes = np.append( + boxes, + [[float(det.xmin), float(det.ymin), float(det.xmax), float(det.ymax)]], + axis=0, + ) + detections = np.concatenate((labels, scores, boxes), -1) + return detections From 5f5602bf55d106d2e837881085781ee41ae84dae Mon Sep 17 00:00:00 2001 From: Igor Davidyuk Date: Wed, 21 Feb 2024 13:55:20 +0100 Subject: [PATCH 02/22] move postprocessing to the deployed_model module Signed-off-by: Igor Davidyuk --- geti_sdk/deployment/deployed_model.py | 142 +++++++++++++++++++++++- geti_sdk/deployment/deployment.py | 149 +------------------------- 2 files changed, 139 insertions(+), 152 deletions(-) diff --git a/geti_sdk/deployment/deployed_model.py b/geti_sdk/deployment/deployed_model.py index 42926bc3..ece681e4 100644 --- a/geti_sdk/deployment/deployed_model.py +++ b/geti_sdk/deployment/deployed_model.py @@ -33,6 +33,17 @@ from otx.api.entities.label_schema import LabelGroup, LabelGroupType, LabelSchemaEntity from geti_sdk.data_models import OptimizedModel, Project, TaskConfiguration +from geti_sdk.data_models.annotations import Annotation +from geti_sdk.data_models.label import ScoredLabel +from geti_sdk.data_models.predictions import Prediction, ResultMedium +from geti_sdk.data_models.shapes import Polygon, Rectangle, RotatedRectangle +from geti_sdk.data_models.task import Task, TaskType +from geti_sdk.deployment.legacy_converters import ( + AnomalyClassificationToAnnotationConverter, +) +from geti_sdk.deployment.predictions_postprocessing.postprocessing import ( + detection2array, +) from geti_sdk.http_session import GetiSession from geti_sdk.rest_converters import ConfigurationRESTConverter, ModelRESTConverter @@ -440,7 +451,7 @@ def save(self, path_to_folder: Union[str, os.PathLike]) -> bool: json.dump(model_detail_dict, model_detail_file, indent=4) return True - def preprocess( + def _preprocess( self, image: np.ndarray ) -> Tuple[Dict[str, np.ndarray], Dict[str, Tuple[int, int, int]]]: """ @@ -453,7 +464,7 @@ def preprocess( """ return self._inference_model.preprocess(image) - def postprocess( + def _postprocess( self, inference_results: Dict[str, np.ndarray], metadata: Optional[Dict[str, Any]] = None, @@ -475,7 +486,7 @@ def postprocess( """ return self._inference_model.postprocess(inference_results, metadata) - def postprocess_explain_outputs( + def _postprocess_explain_outputs( self, inference_results: Dict[str, np.ndarray], metadata: Optional[Dict[str, Any]] = None, @@ -568,7 +579,7 @@ def postprocess_explain_outputs( repr_vector = None return saliency_map, repr_vector - def infer(self, preprocessed_image: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: + def infer(self, image: np.ndarray, task: Task, explain: bool = False) -> Prediction: """ Run inference on an already preprocessed image. @@ -576,7 +587,128 @@ def infer(self, preprocessed_image: Dict[str, np.ndarray]) -> Dict[str, np.ndarr image :return: Dictionary containing the model outputs """ - return self._inference_model.infer_sync(preprocessed_image) + preprocessed_image, metadata = self._preprocess(image) + inference_results: Dict[str, np.ndarray] = self._inference_model.infer_sync( + preprocessed_image + ) + postprocessing_results = self._postprocess(inference_results, metadata=metadata) + + # Create a converter + try: + import otx + from otx.api.usecases.exportable_code.prediction_to_annotation_converter import ( + IPredictionToAnnotationConverter, + create_converter, + ) + except ImportError as error: + raise ValueError( + f"Unable to load inference model for {self}. Relevant OpenVINO " + f"packages were not found. Please make sure that all packages from the " + f"file `requirements-deployment.txt` have been installed. " + ) from error + if otx.__version__ > "1.2.0": + configuration = self.openvino_model_parameters + if "use_ellipse_shapes" not in configuration.keys(): + configuration.update({"use_ellipse_shapes": False}) + converter_args = { + "labels": self.ote_label_schema, + "configuration": configuration, + } + else: + converter_args = {"labels": self.ote_label_schema} + + converter: IPredictionToAnnotationConverter = create_converter( + converter_type=task.type.to_ote_domain(), **converter_args + ) + + # Proceed with postprocessing + width: int = image.shape[1] + height: int = image.shape[0] + + # Handle empty annotations + if isinstance(postprocessing_results, (np.ndarray, list)): + try: + n_outputs = len(postprocessing_results) + except TypeError: + n_outputs = 1 + else: + # Handle the new modelAPI output formats for detection and instance + # segmentation models + if ( + hasattr(postprocessing_results, "objects") + and task.type == TaskType.DETECTION + ): + n_outputs = len(postprocessing_results.objects) + postprocessing_results = detection2array(postprocessing_results.objects) + elif hasattr(postprocessing_results, "segmentedObjects") and task.type in [ + TaskType.INSTANCE_SEGMENTATION, + TaskType.ROTATED_DETECTION, + ]: + n_outputs = len(postprocessing_results.segmentedObjects) + postprocessing_results = postprocessing_results.segmentedObjects + elif isinstance(postprocessing_results, tuple): + try: + n_outputs = len(postprocessing_results) + except TypeError: + n_outputs = 1 + else: + raise ValueError( + f"Unknown postprocessing output of type " + f"`{type(postprocessing_results)}` for task `{task.title}`." + ) + + if n_outputs != 0: + try: + annotation_scene_entity = converter.convert_to_annotation( + predictions=postprocessing_results, metadata=metadata + ) + except AttributeError: + # Add backwards compatibility for anomaly models created in Geti v1.8 and below + if task.type.is_anomaly: + legacy_converter = AnomalyClassificationToAnnotationConverter( + label_schema=self.ote_label_schema + ) + annotation_scene_entity = legacy_converter.convert_to_annotation( + predictions=postprocessing_results, metadata=metadata + ) + converter = legacy_converter + + prediction = Prediction.from_ote( + annotation_scene_entity, image_width=width, image_height=height + ) + else: + prediction = Prediction(annotations=[]) + + # Empty label is not generated by OTE correctly, append it here if there are + # no other predictions + if len(prediction.annotations) == 0: + empty_label = next((label for label in task.labels if label.is_empty), None) + if empty_label is not None: + prediction.append( + Annotation( + shape=Rectangle(x=0, y=0, width=width, height=height), + labels=[ScoredLabel.from_label(empty_label, probability=1)], + ) + ) + + # Rotated detection models produce Polygons, convert them here to + # RotatedRectangles + if task.type == TaskType.ROTATED_DETECTION: + for annotation in prediction.annotations: + if isinstance(annotation.shape, Polygon): + annotation.shape = RotatedRectangle.from_polygon(annotation.shape) + + # Add optional explainability outputs + if explain: + saliency_map, repr_vector = self._postprocess_explain_outputs( + inference_results=inference_results, metadata=metadata + ) + prediction.feature_vector = repr_vector + result_medium = ResultMedium(name="saliency map", type="saliency map") + result_medium.data = saliency_map + prediction.maps = [result_medium] + + return prediction @property def ote_label_schema(self) -> LabelSchemaEntity: diff --git a/geti_sdk/deployment/deployment.py b/geti_sdk/deployment/deployment.py index a83e2a30..4c584210 100644 --- a/geti_sdk/deployment/deployment.py +++ b/geti_sdk/deployment/deployment.py @@ -22,22 +22,8 @@ import numpy as np import otx -from geti_sdk.data_models import ( - Annotation, - Label, - Prediction, - Project, - ScoredLabel, - Task, - TaskType, -) -from geti_sdk.data_models.predictions import ResultMedium -from geti_sdk.data_models.shapes import Polygon, Rectangle, RotatedRectangle +from geti_sdk.data_models import Label, Prediction, Project, Task, TaskType from geti_sdk.deployment.data_models import ROI, IntermediateInferenceResult -from geti_sdk.deployment.legacy_converters import ( - AnomalyClassificationToAnnotationConverter, -) -from geti_sdk.deployment.predictions_postprocessing import detection2array from geti_sdk.rest_converters import ProjectRESTConverter from .deployed_model import DeployedModel @@ -61,7 +47,6 @@ def __attrs_post_init__(self): self._is_single_task: bool = len(self.project.get_trainable_tasks()) == 1 self._are_models_loaded: bool = False self._inference_converters: Dict[str, Any] = {} - self._empty_labels: Dict[str, Label] = {} self._path_to_temp_resources: Optional[str] = None self._requires_resource_cleanup: bool = False @@ -161,20 +146,6 @@ def load_inference_models(self, device: str = "CPU"): :param device: Device to load the inference models to (e.g. 'CPU', 'GPU', 'AUTO', etc) """ - try: - from otx.api.usecases.exportable_code.prediction_to_annotation_converter import ( - IPredictionToAnnotationConverter, - create_converter, - ) - except ImportError as error: - raise ValueError( - f"Unable to load inference model for {self}. Relevant OpenVINO " - f"packages were not found. Please make sure that all packages from the " - f"file `requirements-deployment.txt` have been installed. " - ) from error - - inference_converters: Dict[str, IPredictionToAnnotationConverter] = {} - empty_labels: Dict[str, Label] = {} for model, task in zip(self.models, self.project.get_trainable_tasks()): model.load_inference_model(device=device, project=self.project) @@ -187,27 +158,6 @@ def load_inference_models(self, device: str = "CPU"): if label.name == "Anomalous": label.is_anomalous = True - if otx.__version__ > "1.2.0": - configuration = model.openvino_model_parameters - if "use_ellipse_shapes" not in configuration.keys(): - configuration.update({"use_ellipse_shapes": False}) - converter_args = { - "labels": model.ote_label_schema, - "configuration": configuration, - } - else: - converter_args = {"labels": model.ote_label_schema} - - inference_converter = create_converter( - converter_type=task.type.to_ote_domain(), **converter_args - ) - inference_converters.update({task.title: inference_converter}) - - empty_label = next((label for label in task.labels if label.is_empty), None) - empty_labels.update({task.title: empty_label}) - - self._inference_converters = inference_converters - self._empty_labels = empty_labels self._are_models_loaded = True logging.info(f"Inference models loaded on device `{device}` successfully.") @@ -281,102 +231,7 @@ def _infer_task( :return: Inference result """ model = self._get_model_for_task(task) - preprocessed_image, metadata = model.preprocess(image) - inference_results = model.infer(preprocessed_image) - postprocessing_results = model.postprocess(inference_results, metadata=metadata) - - converter = self._inference_converters[task.title] - - width: int = image.shape[1] - height: int = image.shape[0] - - # Handle empty annotations - if isinstance(postprocessing_results, (np.ndarray, list)): - try: - n_outputs = len(postprocessing_results) - except TypeError: - n_outputs = 1 - else: - # Handle the new modelAPI output formats for detection and instance - # segmentation models - if ( - hasattr(postprocessing_results, "objects") - and task.type == TaskType.DETECTION - ): - n_outputs = len(postprocessing_results.objects) - postprocessing_results = detection2array(postprocessing_results.objects) - elif hasattr(postprocessing_results, "segmentedObjects") and task.type in [ - TaskType.INSTANCE_SEGMENTATION, - TaskType.ROTATED_DETECTION, - ]: - n_outputs = len(postprocessing_results.segmentedObjects) - postprocessing_results = postprocessing_results.segmentedObjects - elif isinstance(postprocessing_results, tuple): - try: - n_outputs = len(postprocessing_results) - except TypeError: - n_outputs = 1 - else: - raise ValueError( - f"Unknown postprocessing output of type " - f"`{type(postprocessing_results)}` for task `{task.title}`." - ) - - if n_outputs != 0: - try: - annotation_scene_entity = converter.convert_to_annotation( - predictions=postprocessing_results, metadata=metadata - ) - except AttributeError: - # Add backwards compatibility for anomaly models created in Geti v1.8 and below - if task.type.is_anomaly: - legacy_converter = AnomalyClassificationToAnnotationConverter( - label_schema=model.ote_label_schema - ) - annotation_scene_entity = legacy_converter.convert_to_annotation( - predictions=postprocessing_results, metadata=metadata - ) - self._inference_converters[task.type] = legacy_converter - - prediction = Prediction.from_ote( - annotation_scene_entity, image_width=width, image_height=height - ) - else: - prediction = Prediction(annotations=[]) - - # Empty label is not generated by OTE correctly, append it here if there are - # no other predictions - if len(prediction.annotations) == 0: - if self._empty_labels[task.title] is not None: - prediction.append( - Annotation( - shape=Rectangle(x=0, y=0, width=width, height=height), - labels=[ - ScoredLabel.from_label( - self._empty_labels[task.title], probability=1 - ) - ], - ) - ) - - # Rotated detection models produce Polygons, convert them here to - # RotatedRectangles - if task.type == TaskType.ROTATED_DETECTION: - for annotation in prediction.annotations: - if isinstance(annotation.shape, Polygon): - annotation.shape = RotatedRectangle.from_polygon(annotation.shape) - - # Add optional explainability outputs - if explain: - saliency_map, repr_vector = model.postprocess_explain_outputs( - inference_results=inference_results, metadata=metadata - ) - prediction.feature_vector = repr_vector - result_medium = ResultMedium(name="saliency map", type="saliency map") - result_medium.data = saliency_map - prediction.maps = [result_medium] - - return prediction + return model.infer(image, task, explain) def _infer_pipeline(self, image: np.ndarray, explain: bool = False) -> Prediction: """ From c8a55e47af44f6971156d97fbd8130bf9cb3286d Mon Sep 17 00:00:00 2001 From: Igor Davidyuk Date: Thu, 22 Feb 2024 16:05:55 +0100 Subject: [PATCH 03/22] push prediction to annotation converter to postprocessing module Signed-off-by: Igor Davidyuk --- geti_sdk/deployment/deployed_model.py | 131 +++------------- .../postprocessing.py | 145 +++++++++++++++++- 2 files changed, 162 insertions(+), 114 deletions(-) diff --git a/geti_sdk/deployment/deployed_model.py b/geti_sdk/deployment/deployed_model.py index ece681e4..8cb92505 100644 --- a/geti_sdk/deployment/deployed_model.py +++ b/geti_sdk/deployment/deployed_model.py @@ -33,17 +33,16 @@ from otx.api.entities.label_schema import LabelGroup, LabelGroupType, LabelSchemaEntity from geti_sdk.data_models import OptimizedModel, Project, TaskConfiguration -from geti_sdk.data_models.annotations import Annotation -from geti_sdk.data_models.label import ScoredLabel + +# from geti_sdk.data_models.annotations import Annotation +# from geti_sdk.data_models.label import ScoredLabel from geti_sdk.data_models.predictions import Prediction, ResultMedium -from geti_sdk.data_models.shapes import Polygon, Rectangle, RotatedRectangle -from geti_sdk.data_models.task import Task, TaskType -from geti_sdk.deployment.legacy_converters import ( - AnomalyClassificationToAnnotationConverter, -) -from geti_sdk.deployment.predictions_postprocessing.postprocessing import ( - detection2array, -) +from geti_sdk.data_models.task import Task + +# from geti_sdk.deployment.legacy_converters import ( +# AnomalyClassificationToAnnotationConverter, +# ) +from geti_sdk.deployment.predictions_postprocessing.postprocessing import Postprocessor from geti_sdk.http_session import GetiSession from geti_sdk.rest_converters import ConfigurationRESTConverter, ModelRESTConverter @@ -101,6 +100,8 @@ def __attrs_post_init__(self): self.openvino_model_parameters: Optional[Dict[str, Any]] = None + self._postprocessor: Optional[Postprocessor] = None + @property def model_data_path(self) -> str: """ @@ -593,110 +594,14 @@ def infer(self, image: np.ndarray, task: Task, explain: bool = False) -> Predict ) postprocessing_results = self._postprocess(inference_results, metadata=metadata) - # Create a converter - try: - import otx - from otx.api.usecases.exportable_code.prediction_to_annotation_converter import ( - IPredictionToAnnotationConverter, - create_converter, - ) - except ImportError as error: - raise ValueError( - f"Unable to load inference model for {self}. Relevant OpenVINO " - f"packages were not found. Please make sure that all packages from the " - f"file `requirements-deployment.txt` have been installed. " - ) from error - if otx.__version__ > "1.2.0": - configuration = self.openvino_model_parameters - if "use_ellipse_shapes" not in configuration.keys(): - configuration.update({"use_ellipse_shapes": False}) - converter_args = { - "labels": self.ote_label_schema, - "configuration": configuration, - } - else: - converter_args = {"labels": self.ote_label_schema} - - converter: IPredictionToAnnotationConverter = create_converter( - converter_type=task.type.to_ote_domain(), **converter_args - ) - - # Proceed with postprocessing - width: int = image.shape[1] - height: int = image.shape[0] - - # Handle empty annotations - if isinstance(postprocessing_results, (np.ndarray, list)): - try: - n_outputs = len(postprocessing_results) - except TypeError: - n_outputs = 1 - else: - # Handle the new modelAPI output formats for detection and instance - # segmentation models - if ( - hasattr(postprocessing_results, "objects") - and task.type == TaskType.DETECTION - ): - n_outputs = len(postprocessing_results.objects) - postprocessing_results = detection2array(postprocessing_results.objects) - elif hasattr(postprocessing_results, "segmentedObjects") and task.type in [ - TaskType.INSTANCE_SEGMENTATION, - TaskType.ROTATED_DETECTION, - ]: - n_outputs = len(postprocessing_results.segmentedObjects) - postprocessing_results = postprocessing_results.segmentedObjects - elif isinstance(postprocessing_results, tuple): - try: - n_outputs = len(postprocessing_results) - except TypeError: - n_outputs = 1 - else: - raise ValueError( - f"Unknown postprocessing output of type " - f"`{type(postprocessing_results)}` for task `{task.title}`." - ) - - if n_outputs != 0: - try: - annotation_scene_entity = converter.convert_to_annotation( - predictions=postprocessing_results, metadata=metadata - ) - except AttributeError: - # Add backwards compatibility for anomaly models created in Geti v1.8 and below - if task.type.is_anomaly: - legacy_converter = AnomalyClassificationToAnnotationConverter( - label_schema=self.ote_label_schema - ) - annotation_scene_entity = legacy_converter.convert_to_annotation( - predictions=postprocessing_results, metadata=metadata - ) - converter = legacy_converter - - prediction = Prediction.from_ote( - annotation_scene_entity, image_width=width, image_height=height + # Create a postprocessor + if self._postprocessor is None: + self._postprocessor = Postprocessor( + labels=self.ote_label_schema, + configuration=self.openvino_model_parameters, + task=task, ) - else: - prediction = Prediction(annotations=[]) - - # Empty label is not generated by OTE correctly, append it here if there are - # no other predictions - if len(prediction.annotations) == 0: - empty_label = next((label for label in task.labels if label.is_empty), None) - if empty_label is not None: - prediction.append( - Annotation( - shape=Rectangle(x=0, y=0, width=width, height=height), - labels=[ScoredLabel.from_label(empty_label, probability=1)], - ) - ) - - # Rotated detection models produce Polygons, convert them here to - # RotatedRectangles - if task.type == TaskType.ROTATED_DETECTION: - for annotation in prediction.annotations: - if isinstance(annotation.shape, Polygon): - annotation.shape = RotatedRectangle.from_polygon(annotation.shape) + prediction = self._postprocessor(postprocessing_results, image, metadata) # Add optional explainability outputs if explain: diff --git a/geti_sdk/deployment/predictions_postprocessing/postprocessing.py b/geti_sdk/deployment/predictions_postprocessing/postprocessing.py index b74986af..b6c1630d 100644 --- a/geti_sdk/deployment/predictions_postprocessing/postprocessing.py +++ b/geti_sdk/deployment/predictions_postprocessing/postprocessing.py @@ -12,9 +12,26 @@ # See the License for the specific language governing permissions # and limitations under the License. -from typing import List +"""Module implements the Postprocessor class.""" + +from typing import Dict, List, Tuple import numpy as np +import otx +from otx.api.usecases.exportable_code.prediction_to_annotation_converter import ( + IPredictionToAnnotationConverter, + create_converter, +) + +from geti_sdk.data_models.annotations import Annotation +from geti_sdk.data_models.enums.task_type import TaskType +from geti_sdk.data_models.label import ScoredLabel +from geti_sdk.data_models.predictions import Prediction +from geti_sdk.data_models.shapes import Polygon, Rectangle, RotatedRectangle +from geti_sdk.data_models.task import Task +from geti_sdk.deployment.legacy_converters.legacy_anomaly_converter import ( + AnomalyClassificationToAnnotationConverter, +) def detection2array(detections: List) -> np.ndarray: @@ -40,3 +57,129 @@ def detection2array(detections: List) -> np.ndarray: ) detections = np.concatenate((labels, scores, boxes), -1) return detections + + +class Postprocessor: + """ + Postprocessor class responsible for converting the output of the model to a Prediction object. + + :param labels: Label schema to be used for the conversion. + :param configuration: Configuration to be used for the conversion. + :param task: Task object containing the task metadata. + """ + + def __init__(self, labels, configuration, task: Task) -> None: + self.task = task + self.ote_label_schema = labels + + # Create OTX converter + converter_args = {"labels": labels} + if otx.__version__ > "1.2.0": + if "use_ellipse_shapes" not in configuration.keys(): + configuration.update({"use_ellipse_shapes": False}) + converter_args["configuration"] = configuration + + self.converter: IPredictionToAnnotationConverter = create_converter( + converter_type=self.task.type.to_ote_domain(), **converter_args + ) + + def __call__( + self, + postprocessing_results: List, + image: np.ndarray, + metadata: Dict[str, Tuple[int, int, int]], + ) -> Prediction: + """ + Convert the postprocessing results to a Prediction object. + """ + # Handle empty annotations + if isinstance(postprocessing_results, (np.ndarray, list)): + try: + n_outputs = len(postprocessing_results) + except TypeError: + n_outputs = 1 + else: + # Handle the new modelAPI output formats for detection and instance + # segmentation models + if ( + hasattr(postprocessing_results, "objects") + and self.task.type == TaskType.DETECTION + ): + n_outputs = len(postprocessing_results.objects) + postprocessing_results = detection2array(postprocessing_results.objects) + elif hasattr( + postprocessing_results, "segmentedObjects" + ) and self.task.type in [ + TaskType.INSTANCE_SEGMENTATION, + TaskType.ROTATED_DETECTION, + ]: + n_outputs = len(postprocessing_results.segmentedObjects) + postprocessing_results = postprocessing_results.segmentedObjects + elif isinstance(postprocessing_results, tuple): + try: + n_outputs = len(postprocessing_results) + except TypeError: + n_outputs = 1 + else: + raise ValueError( + f"Unknown postprocessing output of type " + f"`{type(postprocessing_results)}` for task `{self.task.title}`." + ) + + # Proceed with postprocessing + width: int = image.shape[1] + height: int = image.shape[0] + + if n_outputs != 0: + try: + annotation_scene_entity = self.converter.convert_to_annotation( + predictions=postprocessing_results, metadata=metadata + ) + except AttributeError: + # Add backwards compatibility for anomaly models created in Geti v1.8 and below + if self.task.type.is_anomaly: + legacy_converter = AnomalyClassificationToAnnotationConverter( + label_schema=self.ote_label_schema + ) + annotation_scene_entity = legacy_converter.convert_to_annotation( + predictions=postprocessing_results, metadata=metadata + ) + self.converter = legacy_converter + + prediction = Prediction.from_ote( + annotation_scene_entity, image_width=width, image_height=height + ) + else: + prediction = Prediction(annotations=[]) + + print( + "pre-converter", + postprocessing_results, + "metadata", + metadata, + ) + print("width", width, "height", height) + print("post-converter", prediction) + + # Empty label is not generated by OTE correctly, append it here if there are + # no other predictions + if len(prediction.annotations) == 0: + empty_label = next( + (label for label in self.task.labels if label.is_empty), None + ) + if empty_label is not None: + prediction.append( + Annotation( + shape=Rectangle(x=0, y=0, width=width, height=height), + labels=[ScoredLabel.from_label(empty_label, probability=1)], + ) + ) + + # Rotated detection models produce Polygons, convert them here to + # RotatedRectangles + if self.task.type == TaskType.ROTATED_DETECTION: + for annotation in prediction.annotations: + if isinstance(annotation.shape, Polygon): + annotation.shape = RotatedRectangle.from_polygon(annotation.shape) + + return prediction From db859a0f310d4538638154ea19dad5fbf3a164b2 Mon Sep 17 00:00:00 2001 From: Igor Davidyuk Date: Wed, 6 Mar 2024 12:31:39 +0100 Subject: [PATCH 04/22] converters without otx Signed-off-by: Igor Davidyuk --- geti_sdk/data_models/label.py | 20 + geti_sdk/data_models/label_group.py | 51 ++ geti_sdk/data_models/label_schema.py | 65 +++ geti_sdk/data_models/shapes.py | 11 + geti_sdk/deployment/deployed_model.py | 134 ++--- geti_sdk/deployment/deployment.py | 2 +- .../postprocessing.py | 22 +- .../services/__init__.py | 17 + .../prediction_to_annotation_converter.py | 498 ++++++++++++++++++ .../utils/__init__.py | 17 + .../utils/detection_utils.py | 87 +++ .../utils/segmentation_utils.py | 247 +++++++++ geti_sdk/deployment/utils.py | 16 + 13 files changed, 1112 insertions(+), 75 deletions(-) create mode 100644 geti_sdk/data_models/label_group.py create mode 100644 geti_sdk/data_models/label_schema.py create mode 100644 geti_sdk/deployment/predictions_postprocessing/services/__init__.py create mode 100644 geti_sdk/deployment/predictions_postprocessing/services/prediction_to_annotation_converter.py create mode 100644 geti_sdk/deployment/predictions_postprocessing/utils/__init__.py create mode 100644 geti_sdk/deployment/predictions_postprocessing/utils/detection_utils.py create mode 100644 geti_sdk/deployment/predictions_postprocessing/utils/segmentation_utils.py diff --git a/geti_sdk/data_models/label.py b/geti_sdk/data_models/label.py index cd9af192..15f08993 100644 --- a/geti_sdk/data_models/label.py +++ b/geti_sdk/data_models/label.py @@ -23,6 +23,7 @@ from otx.api.entities.scored_label import ScoredLabel as OteScoredLabel from geti_sdk.data_models.enums import TaskType +from geti_sdk.data_models.enums.domain import Domain @attr.define @@ -64,10 +65,29 @@ class Label: group: str is_empty: bool hotkey: str = "" + domain: Optional[Domain] = None id: Optional[str] = None parent_id: Optional[str] = None is_anomalous: Optional[bool] = None + def __key(self) -> Tuple[str, str]: + """ + Return a tuple representing the key of the label. + + The key is a tuple containing the name and color of the label. + + :return: A tuple representing the key of the label. + """ + return (self.name, self.color) + + def __hash__(self) -> int: + """ + Calculate the hash value of the object. + + :return: The hash value of the object. + """ + return hash(self.__key()) + def to_ote(self, task_type: TaskType) -> LabelEntity: """ Convert the `Label` instance to an OTE SDK LabelEntity object. diff --git a/geti_sdk/data_models/label_group.py b/geti_sdk/data_models/label_group.py new file mode 100644 index 00000000..fc996aed --- /dev/null +++ b/geti_sdk/data_models/label_group.py @@ -0,0 +1,51 @@ +# Copyright (C) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. + +from enum import Enum +from typing import List, Optional + +from geti_sdk.data_models.label import Label + + +class LabelGroupType(Enum): + """Enum to indicate the LabelGroupType.""" + + EXCLUSIVE = 1 + EMPTY_LABEL = 2 + + +class LabelGroup: + """ + Representation of a group of labels. + """ + + def __init__( + self, + name: str, + labels: List[Label], + group_type: LabelGroupType = LabelGroupType.EXCLUSIVE, + id: Optional[str] = None, + ) -> None: + """ + Initialize a LabelGroup object. + + :param name: The name of the label group. + :param labels: A list of Label objects associated with the group. + :param group_type: The type of the label group. Defaults to LabelGroupType.EXCLUSIVE. + :param id: The ID of the label group. Defaults to None. + """ + self.id = id + self.name = name + self.group_type = group_type + self.labels = sorted(labels, key=lambda label: label.id) diff --git a/geti_sdk/data_models/label_schema.py b/geti_sdk/data_models/label_schema.py new file mode 100644 index 00000000..f7bba3eb --- /dev/null +++ b/geti_sdk/data_models/label_schema.py @@ -0,0 +1,65 @@ +# Copyright (C) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. + +from typing import List, Optional + +from geti_sdk.data_models.label import Label +from geti_sdk.data_models.label_group import LabelGroup, LabelGroupType + + +class LabelSchema: + """ + The `LabelSchema` class defines the structure and properties of labels and label groups. + + :param label_groups: Optional list of `LabelGroup` objects representing the label groups in the schema + """ + + def __init__(self, label_groups: Optional[List[LabelGroup]] = None) -> None: + """ + Initialize a new instance of the `LabelSchema` class. + + :param label_groups: Optional list of `LabelGroup` objects representing the label groups in the schema + """ + self._groups = label_groups + + def get_labels(self, include_empty: bool = False) -> List[Label]: + """ + Get the labels in the label schema. + + :param include_empty: Flag determining whether to include empty labels + :return: List of all labels in the label schema + """ + labels = { + label + for group in self._groups + for label in group.labels + if include_empty or not label.is_empty + } + return sorted(list(labels), key=lambda label: label.id) + + def get_groups(self, include_empty: bool = False) -> List[LabelGroup]: + """ + Get the label groups in the label schema. + + :param include_empty: Flag determining whether to include empty label groups + :return: List of all label groups in the label schema + """ + if include_empty: + return self._groups + + return [ + group + for group in self._groups + if group.group_type != LabelGroupType.EMPTY_LABEL + ] diff --git a/geti_sdk/data_models/shapes.py b/geti_sdk/data_models/shapes.py index 081d427a..ddf7bd40 100644 --- a/geti_sdk/data_models/shapes.py +++ b/geti_sdk/data_models/shapes.py @@ -278,6 +278,17 @@ def y_max(self) -> int: """ return self.y + self.height + @classmethod + def generate_full_box(cls, image_width: int, image_height: int) -> "Rectangle": + """ + Return a rectangle that fully encapsulates the image. + + :param image_width: Width of the image to which the rectangle applies (in pixels) + :param image_height: Height of the image to which the rectangle applies (in pixels) + :return: Rectangle: A rectangle that fully encapsulates the image. + """ + return cls(x=0, y=0, width=image_width, height=image_height) + @attr.define(slots=False) class Ellipse(Shape): diff --git a/geti_sdk/deployment/deployed_model.py b/geti_sdk/deployment/deployed_model.py index 8cb92505..902f7919 100644 --- a/geti_sdk/deployment/deployed_model.py +++ b/geti_sdk/deployment/deployed_model.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions # and limitations under the License. -import datetime import importlib.util import json import logging @@ -26,32 +25,43 @@ import attr import numpy as np -from otx.algorithms.classification.utils import get_cls_inferencer_configuration -from otx.api.entities.color import Color -from otx.api.entities.label import Domain as OTEDomain -from otx.api.entities.label import LabelEntity -from otx.api.entities.label_schema import LabelGroup, LabelGroupType, LabelSchemaEntity +from openvino.model_api.adapters import OpenvinoAdapter, OVMSAdapter +from openvino.model_api.models import Model as model_api_Model +from openvino.runtime import Core from geti_sdk.data_models import OptimizedModel, Project, TaskConfiguration +from geti_sdk.data_models.enums.domain import Domain # from geti_sdk.data_models.annotations import Annotation -# from geti_sdk.data_models.label import ScoredLabel +from geti_sdk.data_models.label import Label +from geti_sdk.data_models.label_group import LabelGroup +from geti_sdk.data_models.label_schema import LabelSchema from geti_sdk.data_models.predictions import Prediction, ResultMedium -from geti_sdk.data_models.task import Task # from geti_sdk.deployment.legacy_converters import ( # AnomalyClassificationToAnnotationConverter, # ) from geti_sdk.deployment.predictions_postprocessing.postprocessing import Postprocessor +from geti_sdk.deployment.predictions_postprocessing.services.prediction_to_annotation_converter import ( + ConverterFactory, +) from geti_sdk.http_session import GetiSession from geti_sdk.rest_converters import ConfigurationRESTConverter, ModelRESTConverter from .utils import ( generate_ovms_model_address, generate_ovms_model_name, + rgb_to_hex, target_device_is_ovms, ) +# from otx.algorithms.classification.utils import get_cls_inferencer_configuration +# from otx.api.entities.color import Color +# from otx.api.entities.label import Domain as OTEDomain +# from otx.api.entities.label import LabelEntity +# from otx.api.entities.label_schema import LabelGroup, LabelGroupType, LabelSchemaEntity + + MODEL_DIR_NAME = "model" PYTHON_DIR_NAME = "python" WRAPPER_DIR_NAME = "model_wrappers" @@ -90,7 +100,7 @@ def __attrs_post_init__(self): self._needs_tempdir_deletion: bool = False self._tempdir_path: Optional[str] = None self._has_custom_model_wrappers: bool = False - self._label_schema: Optional[LabelSchemaEntity] = None + # self._label_schema: Optional[LabelSchemaEntity] = None # Attributes related to model explainability self._saliency_key: Optional[str] = None @@ -232,24 +242,10 @@ def load_inference_model( :return: OpenVino inference engine model that can be used to make predictions on images """ - try: - from openvino.model_api.adapters import ( - OpenvinoAdapter, - OVMSAdapter, - create_core, - ) - from openvino.model_api.models import Model as OMZModel - except ImportError as error: - raise ValueError( - f"Unable to load inference model for {self}. Relevant OpenVINO " - f"packages were not found. Please make sure that OpenVINO is installed " - f"correctly." - ) from error - if not target_device_is_ovms(device=device): # Run the model locally model_adapter = OpenvinoAdapter( - create_core(), + Core(), model=os.path.join(self._model_data_path, "model.xml"), weights_path=os.path.join(self._model_data_path, "model.bin"), device=device, @@ -284,22 +280,26 @@ def load_inference_model( # Load model configuration config_path = os.path.join(self._model_data_path, "config.json") - if os.path.isfile(config_path): - with open(config_path, "r") as config_file: - configuration_json = json.load(config_file) - model_type = configuration_json.get("type_of_model") - parameters = configuration_json.get("model_parameters") - label_dictionary = parameters.pop(LABELS_CONFIG_KEY, None) - if configuration is not None: - configuration.update(parameters) - else: - configuration = parameters - else: + if not os.path.isfile(config_path): raise ValueError( f"Missing configuration file `config.json` for deployed model `{self}`," f" unable to load inference model." ) + with open(config_path, "r") as config_file: + configuration_json = json.load(config_file) + model_type = configuration_json.get("type_of_model") + + # Update model parameters + parameters = configuration_json.get("model_parameters") + if configuration is not None: + configuration.update(parameters) + else: + configuration = parameters + # Parse label schema + label_dictionary = configuration_json.get("model_parameters").pop( + LABELS_CONFIG_KEY, None + ) self._parse_label_schema_from_dict(label_dictionary) # Create model wrapper with the loaded configuration @@ -322,19 +322,19 @@ def load_inference_model( f"is required, but could not be found at path " f"{wrapper_module_path}." ) from ex - - if model_type == "otx_classification": - configuration = get_cls_inferencer_configuration(self.ote_label_schema) - - model = OMZModel.create_model( + model = model_api_Model.create_model( model=model_adapter, model_type=model_type, - configuration=configuration, preload=True, ) - self.openvino_model_parameters = configuration + # self.openvino_model_parameters = configuration self._inference_model = model + # Load results to Prediction converter + self._converter = ConverterFactory.create_converter( + self.label_schema, configuration + ) + # TODO: This is a workaround to fix the issue that causes the output blob name # to be unset. Remove this once it has been fixed on OTX/ModelAPI side output_names = list(self._inference_model.outputs.keys()) @@ -580,7 +580,7 @@ def _postprocess_explain_outputs( repr_vector = None return saliency_map, repr_vector - def infer(self, image: np.ndarray, task: Task, explain: bool = False) -> Prediction: + def infer(self, image: np.ndarray, explain: bool = False) -> Prediction: """ Run inference on an already preprocessed image. @@ -589,19 +589,23 @@ def infer(self, image: np.ndarray, task: Task, explain: bool = False) -> Predict :return: Dictionary containing the model outputs """ preprocessed_image, metadata = self._preprocess(image) + # metadata is a dict with keys 'original_shape' and 'resized_shape' inference_results: Dict[str, np.ndarray] = self._inference_model.infer_sync( preprocessed_image ) postprocessing_results = self._postprocess(inference_results, metadata=metadata) # Create a postprocessor - if self._postprocessor is None: - self._postprocessor = Postprocessor( - labels=self.ote_label_schema, - configuration=self.openvino_model_parameters, - task=task, - ) - prediction = self._postprocessor(postprocessing_results, image, metadata) + # if self._postprocessor is None: + # self._postprocessor = Postprocessor( + # label_schema=self.ote_label_schema, + # configuration=self.openvino_model_parameters, + # task=task, + # ) + # prediction = self._postprocessor(postprocessing_results, image, metadata) + prediction = self._converter.convert_to_prediction( + postprocessing_results, image_shape=metadata["original_shape"] + ) # Add optional explainability outputs if explain: @@ -616,14 +620,14 @@ def infer(self, image: np.ndarray, task: Task, explain: bool = False) -> Predict return prediction @property - def ote_label_schema(self) -> LabelSchemaEntity: + def label_schema(self) -> LabelSchema: """ - Return the OTE LabelSchema for the model. + Return the LabelSchema for the model. This requires the inference model to be loaded, getting this property while inference models are not loaded will raise a ValueError - :return: LabelSchemaEntity containing the OTE SDK label schema for the model + :return: LabelSchema containing the SDK label schema for the model """ if self._label_schema is None: raise ValueError( @@ -637,7 +641,7 @@ def _parse_label_schema_from_dict( ) -> None: """ Parse the dictionary contained in the model `config.json` file, and - generate an OTE LabelSchemaEntity from it. + generate an LabelSchema from it. :param label_schema_dict: Dictionary containing the label schema information to parse @@ -645,27 +649,31 @@ def _parse_label_schema_from_dict( label_groups_list = label_schema_dict[LABEL_GROUPS_KEY] labels_dict = label_schema_dict[ALL_LABELS_KEY] for key, value in labels_dict.items(): - label_entity = LabelEntity( - id=value["_id"], + color_tuple = tuple( + int(value["color"][key]) for key in ["red", "green", "blue"] + ) + label_entity = Label( name=value["name"], - hotkey=value["hotkey"], - domain=OTEDomain[value["domain"]], - color=Color(**value["color"]), + color=rgb_to_hex(color_tuple), + group=None, is_empty=value.get("is_empty", False), - creation_date=datetime.datetime.fromisoformat(value["creation_date"]), + hotkey=value["hotkey"], + domain=Domain[value["domain"]], + id=value["_id"], + is_anomalous=value.get("is_anomalous", False), ) labels_dict[key] = label_entity label_groups: List[LabelGroup] = [] for group_dict in label_groups_list: - labels: List[LabelEntity] = [ + labels: List[Label] = [ labels_dict[label_id] for label_id in group_dict["label_ids"] ] label_groups.append( LabelGroup( id=group_dict["_id"], name=group_dict["name"], - group_type=LabelGroupType[group_dict["relation_type"]], + group_type=group_dict["relation_type"], labels=labels, ) ) - self._label_schema = LabelSchemaEntity(label_groups=label_groups) + self._label_schema = LabelSchema(label_groups=label_groups) diff --git a/geti_sdk/deployment/deployment.py b/geti_sdk/deployment/deployment.py index 4c584210..88dfdb0a 100644 --- a/geti_sdk/deployment/deployment.py +++ b/geti_sdk/deployment/deployment.py @@ -231,7 +231,7 @@ def _infer_task( :return: Inference result """ model = self._get_model_for_task(task) - return model.infer(image, task, explain) + return model.infer(image, explain) def _infer_pipeline(self, image: np.ndarray, explain: bool = False) -> Prediction: """ diff --git a/geti_sdk/deployment/predictions_postprocessing/postprocessing.py b/geti_sdk/deployment/predictions_postprocessing/postprocessing.py index b6c1630d..79e78a86 100644 --- a/geti_sdk/deployment/predictions_postprocessing/postprocessing.py +++ b/geti_sdk/deployment/predictions_postprocessing/postprocessing.py @@ -68,12 +68,12 @@ class Postprocessor: :param task: Task object containing the task metadata. """ - def __init__(self, labels, configuration, task: Task) -> None: + def __init__(self, label_schema, configuration, task: Task) -> None: self.task = task - self.ote_label_schema = labels + self.ote_label_schema = label_schema # Create OTX converter - converter_args = {"labels": labels} + converter_args = {"labels": self.ote_label_schema} if otx.__version__ > "1.2.0": if "use_ellipse_shapes" not in configuration.keys(): configuration.update({"use_ellipse_shapes": False}) @@ -152,14 +152,14 @@ def __call__( else: prediction = Prediction(annotations=[]) - print( - "pre-converter", - postprocessing_results, - "metadata", - metadata, - ) - print("width", width, "height", height) - print("post-converter", prediction) + # print( + # "pre-converter", + # postprocessing_results, + # "metadata", + # metadata, + # ) + # print("width", width, "height", height) + # print("post-converter", prediction) # Empty label is not generated by OTE correctly, append it here if there are # no other predictions diff --git a/geti_sdk/deployment/predictions_postprocessing/services/__init__.py b/geti_sdk/deployment/predictions_postprocessing/services/__init__.py new file mode 100644 index 00000000..5ecbbdac --- /dev/null +++ b/geti_sdk/deployment/predictions_postprocessing/services/__init__.py @@ -0,0 +1,17 @@ +# INTEL CONFIDENTIAL +# +# Copyright (C) 2023 Intel Corporation +# +# This software and the related documents are Intel copyrighted materials, and +# your use of them is governed by the express license under which they were provided to +# you ("License"). Unless the License provides otherwise, you may not use, modify, copy, +# publish, distribute, disclose or transmit this software or the related documents +# without Intel's prior written permission. +# +# This software and the related documents are provided as is, +# with no express or implied warranties, other than those that are expressly stated +# in the License. + +""" +The module contains services for predictions post-processing. +""" diff --git a/geti_sdk/deployment/predictions_postprocessing/services/prediction_to_annotation_converter.py b/geti_sdk/deployment/predictions_postprocessing/services/prediction_to_annotation_converter.py new file mode 100644 index 00000000..f22650ab --- /dev/null +++ b/geti_sdk/deployment/predictions_postprocessing/services/prediction_to_annotation_converter.py @@ -0,0 +1,498 @@ +# INTEL CONFIDENTIAL +# +# Copyright (C) 2024 Intel Corporation +# +# This software and the related documents are Intel copyrighted materials, and +# your use of them is governed by the express license under which they were provided to +# you ("License"). Unless the License provides otherwise, you may not use, modify, copy, +# publish, distribute, disclose or transmit this software or the related documents +# without Intel's prior written permission. +# +# This software and the related documents are provided as is, +# with no express or implied warranties, other than those that are expressly stated +# in the License. + +"""Module implements the InferenceResultsToPredictionConverter class.""" + +import abc +from typing import Any, Dict, NamedTuple, Optional, Tuple, Union + +import cv2 +import numpy as np +from openvino.model_api.models.utils import ( + AnomalyResult, + ClassificationResult, + DetectionResult, + ImageResultWithSoftPrediction, + InstanceSegmentationResult, +) + +from geti_sdk.data_models.annotations import Annotation + +# from otx.api.entities.annotation import Annotation +from geti_sdk.data_models.enums.domain import Domain + +# from otx.api.entities.label import Domain +# from otx.api.entities.scored_label import ScoredLabel +from geti_sdk.data_models.label import ScoredLabel +from geti_sdk.data_models.label_schema import LabelSchema +from geti_sdk.data_models.predictions import Prediction +from geti_sdk.data_models.shapes import ( + Ellipse, + Point, + Polygon, + Rectangle, + RotatedRectangle, +) + +# from otx.api.entities.shapes.ellipse import Ellipse +# from otx.api.entities.shapes.polygon import Point, Polygon +# from otx.api.entities.shapes.rectangle import Rectangle +from geti_sdk.deployment.predictions_postprocessing.utils.detection_utils import ( + detection2array, +) +from geti_sdk.deployment.predictions_postprocessing.utils.segmentation_utils import ( + create_annotation_from_segmentation_map, +) + + +class InferenceResultsToPredictionConverter(metaclass=abc.ABCMeta): + """Interface for the converter""" + + @abc.abstractmethod + def convert_to_prediction(self, predictions: NamedTuple, **kwargs) -> Prediction: + """ + Convert raw predictions to Annotation format. + + :param predictions: raw predictions from inference + :return: lisf of annotation objects containing the shapes obtained from the raw predictions. + """ + raise NotImplementedError + + +class ClassificationToPredictionConverter(InferenceResultsToPredictionConverter): + """ + Converts ModelAPI Classification predictions to Annotations. + + :param label_schema: LabelSchema containing the label info of the task + """ + + def __init__(self, label_schema: LabelSchema): + all_labels = label_schema.get_labels(include_empty=True) + # add empty labels if only one non-empty label exits + non_empty_labels = [label for label in all_labels if not label.is_empty] + self.labels = all_labels if len(non_empty_labels) == 1 else non_empty_labels + # get the first empty label + self.empty_label = next((label for label in all_labels if label.is_empty), None) + multilabel = len(label_schema.get_groups(False)) > 1 + multilabel = multilabel and len(label_schema.get_groups(False)) == len( + label_schema.get_labels(include_empty=False) + ) + self.hierarchical = not multilabel and len(label_schema.get_groups(False)) > 1 + + self.label_schema = label_schema + + def convert_to_prediction( + self, predictions: ClassificationResult, image_shape: Tuple[int], **kwargs + ) -> Prediction: # noqa: ARG003 + """ + Convert ModelAPI ClassificationResult predictions to sc_sdk annotations. + + :param predictions: classification labels represented in ModelAPI format (label_index, label_name, confidence) + :return: list of full box annotations objects with corresponding label + """ + labels = [] + for label in predictions.top_labels: + labels.append( + ScoredLabel.from_label(self.labels[label[0]], float(label[-1])) + ) + + if not labels and self.empty_label: + labels = [ScoredLabel.from_label(self.empty_label, probability=1.0)] + + annotations = Annotation( + shape=Rectangle.generate_full_box(image_shape[1], image_shape[0]), + labels=labels, + ) + return Prediction(annotations) + + +class DetectionToPredictionConverter(InferenceResultsToPredictionConverter): + """ + Converts ModelAPI Detection objects to Prediction. + + :param label_schema: LabelSchema containing the label info of the task + :param configuration: optional model configuration setting + """ + + def __init__( + self, label_schema: LabelSchema, configuration: Optional[dict[str, Any]] = None + ): + self.labels = label_schema.get_labels(include_empty=False) + self.label_map = dict(enumerate(self.labels)) + self.use_ellipse_shapes = False + self.confidence_threshold = 0.0 + if configuration is not None: + if "use_ellipse_shapes" in configuration: + self.use_ellipse_shapes = configuration["use_ellipse_shapes"] + if "confidence_threshold" in configuration: + self.confidence_threshold = configuration["confidence_threshold"] + + def convert_to_prediction( + self, predictions: DetectionResult, **kwargs + ) -> Prediction: + """ + Convert ModelAPI DetectionResult predictions to Prediction. + + :param predictions: detection represented in ModelAPI format (label, confidence, x1, y1, x2, y2). + + _Note: + - `label` can be any integer that can be mapped to `self.labels` + - `confidence` should be a value between 0 and 1 + - `x1`, `x2`, `y1` and `y2` are expected to be in pixel + :return: list of annotations object containing the boxes obtained from the prediction + """ + detections = detection2array(predictions.objects) + + annotations = [] + if ( + len(detections) + and detections.shape[1:] < (6,) + or detections.shape[1:] > (7,) + ): + raise ValueError( + f"Shape of prediction is not expected, expected (n, 7) or (n, 6) but got {detections.shape}" + ) + + for detection in detections: + # Some OpenVINO models use an output shape of [7,] + # If this is the case, skip the first value as it is not used + _detection = detection[1:] if detection.shape == (7,) else detection + + label = int(_detection[0]) + confidence = _detection[1] + scored_label = ScoredLabel.from_label(self.label_map[label], confidence) + coords = _detection[2:] + shape: Ellipse | Rectangle + + if confidence < self.confidence_threshold: + continue + + bbox_width = coords[2] - coords[0] + bbox_height = coords[3] - coords[1] + if self.use_ellipse_shapes: + shape = Ellipse(coords[0], coords[1], bbox_width, bbox_height) + else: + shape = Rectangle(coords[0], coords[1], bbox_width, bbox_height) + + annotation = Annotation(shape=shape, labels=[scored_label]) + annotations.append(annotation) + return Prediction(annotations) + + +class RotatedRectToPredictionConverter(DetectionToPredictionConverter): + """ + Converts ModelAPI Rotated Detection objects to Prediction. + + :param label_schema: LabelSchema containing the label info of the task + """ + + def convert_to_prediction( + self, predictions: InstanceSegmentationResult, **kwargs + ) -> Prediction: + """ + Convert ModelAPI instance segmentation predictions to a rotated bounding box annotation format. + + :param predictions: segmentation represented in ModelAPI format + :return: list of annotations containing the rotated boxes obtained from the segmentation contours + :raises ValueError: if metadata is missing from the preprocess step + """ + annotations = [] + if hasattr(predictions, "segmentedObjects"): + predictions = predictions.segmentedObjects + shape: Union[RotatedRectangle, Ellipse] + # for obj in predictions: + for score, class_idx, box, mask in zip(*predictions): + if score < self.confidence_threshold: + continue + if self.use_ellipse_shapes: + shape = Ellipse(box[0], box[1], box[2] - box[0], box[3] - box[1]) + annotations.append( + Annotation( + shape, + labels=[ + ScoredLabel.from_label( + self.labels[int(class_idx) - 1], float(score) + ) + ], + ) + ) + else: + mask = mask.astype(np.uint8) + contours, hierarchies = cv2.findContours( + mask, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE + ) + if hierarchies is None: + continue + for contour, hierarchy in zip(contours, hierarchies[0]): + if hierarchy[3] != -1: + continue + if len(contour) <= 2 or cv2.contourArea(contour) < 1.0: + continue + points = [ + Point( + x=point[0], + y=point[1], + ) + for point in cv2.boxPoints(cv2.minAreaRect(contour)) + ] + shape = Polygon(points=points) + annotations.append( + Annotation( + shape=RotatedRectangle.from_polygon(shape), + labels=[ + ScoredLabel.from_label( + self.labels[int(class_idx) - 1], float(score) + ) + ], + ) + ) + return Prediction(annotations) + + +class MaskToAnnotationConverter(InferenceResultsToPredictionConverter): + """Converts DetectionBox Predictions ModelAPI to Annotations.""" + + def __init__( + self, label_schema: LabelSchema, configuration: Optional[Dict[str, Any]] = None + ): + self.labels = label_schema.get_labels(include_empty=False) + self.use_ellipse_shapes = False + self.confidence_threshold = 0.0 + if configuration is not None: + if "use_ellipse_shapes" in configuration: + self.use_ellipse_shapes = configuration["use_ellipse_shapes"] + if "confidence_threshold" in configuration: + self.confidence_threshold = configuration["confidence_threshold"] + + def convert_to_prediction( + self, predictions: tuple, **kwargs: Dict[str, Any] + ) -> Prediction: + """ + Convert predictions to Annotation Scene using the metadata. + + :param predictions: Raw predictions from the model. + :return: Prediction object. + """ + annotations = [] + shape: Union[Polygon, Ellipse] + for score, class_idx, box, mask in zip(*predictions): + if score < self.confidence_threshold: + continue + if self.use_ellipse_shapes: + shape = shape = Ellipse( + box[0], box[1], box[2] - box[0], box[3] - box[1] + ) + annotations.append( + Annotation( + shape=shape, + labels=[ + ScoredLabel.from_label( + self.labels[int(class_idx) - 1], float(score) + ) + ], + ) + ) + else: + mask = mask.astype(np.uint8) + contours, hierarchies = cv2.findContours( + mask, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE + ) + if hierarchies is None: + continue + for contour, hierarchy in zip(contours, hierarchies[0]): + if hierarchy[3] != -1: + continue + if len(contour) <= 2 or cv2.contourArea(contour) < 1.0: + continue + contour = list(contour) + points = [ + Point( + x=point[0][0], + y=point[0][1], + ) + for point in contour + ] + shape = Polygon(points=points) + annotations.append( + Annotation( + shape=shape, + labels=[ + ScoredLabel.from_label( + self.labels[int(class_idx) - 1], float(score) + ) + ], + ) + ) + return Prediction(annotations) + + +class SegmentationToPredictionConverter(InferenceResultsToPredictionConverter): + """ + Converts ModelAPI Segmentation objects to Annotations. + + :param label_schema: LabelSchema containing the label info of the task + """ + + def __init__(self, label_schema: LabelSchema): + self.labels = label_schema.get_labels(include_empty=False) + # NB: index=0 is reserved for the background label + self.label_map = dict(enumerate(self.labels, 1)) + + def convert_to_prediction( + self, predictions: ImageResultWithSoftPrediction, **kwargs # noqa: ARG002 + ) -> Prediction: + """ + Convert ModelAPI instance segmentation predictions to sc_sdk annotations. + + :param predictions: segmentation represented in ModelAPI format + :return: list of annotations object containing the contour polygon obtained from the segmentation + """ + annotations = create_annotation_from_segmentation_map( + hard_prediction=predictions.resultImage, + soft_prediction=predictions.soft_prediction, + label_map=self.label_map, + ) + return Prediction(annotations) + + +class AnomalyToPredictionConverter(InferenceResultsToPredictionConverter): + """ + Convert ModelAPI AnomalyResult predictions to Annotations. + + :param label_schema: LabelSchema containing the label info of the task + """ + + def __init__(self, label_schema: LabelSchema): + self.labels = label_schema.get_labels(include_empty=False) + self.normal_label = next( + label for label in self.labels if not label.is_anomalous + ) + self.anomalous_label = next( + label for label in self.labels if label.is_anomalous + ) + self.domain = self.anomalous_label.domain + + def convert_to_prediction( + self, predictions: AnomalyResult, image_shape: Tuple[int], **kwargs + ) -> Prediction: # noqa: ARG002 + """ + Convert ModelAPI AnomalyResult predictions to sc_sdk annotations. + + :param predictions: anomaly result represented in ModelAPI format (same for all anomaly tasks) + :return: list of annotation objects based on the specific anomaly task: + - Classification: single label (normal or anomalous). + - Segmentation: contour polygon representing the segmentation. + - Detection: predicted bounding boxes. + """ + pred_label = predictions.pred_label + label = self.anomalous_label if pred_label == "Anomalous" else self.normal_label + annotations: list[Annotation] = [] + match self.domain: + case Domain.ANOMALY_CLASSIFICATION: + scored_label = ScoredLabel.from_label( + label=label, probability=float(predictions.pred_score) + ) + annotations = [ + Annotation( + Rectangle.generate_full_box(*image_shape[1::-1]), + labels=[scored_label], + ) + ] + case Domain.ANOMALY_SEGMENTATION: + annotations = create_annotation_from_segmentation_map( + hard_prediction=predictions.pred_mask, + soft_prediction=predictions.anomaly_map.squeeze(), + label_map={0: self.normal_label, 1: self.anomalous_label}, + ) + case Domain.ANOMALY_DETECTION: + for box in predictions.pred_boxes: + annotations.append( + Annotation( + Rectangle(box[0], box[1], box[2] - box[0], box[3] - box[1]), + labels=[ + ScoredLabel.from_label( + label=self.anomalous_label, + probability=predictions.pred_score, + ) + ], + ) + ) + case _: + raise ValueError( + f"Cannot convert predictions for task '{self.domain.name}'. Only Anomaly tasks are supported." + ) + if not annotations: + scored_label = ScoredLabel.from_label( + label=self.normal_label, probability=1.0 + ) + annotations = [ + Annotation( + Rectangle.generate_full_box(*image_shape[1::-1]), + labels=[scored_label], + ) + ] + return Prediction(annotations) + + +class ConverterFactory: + """ + Factory class for creating inference result to prediction converters based on the model's task. + """ + + @staticmethod + def create_converter( + label_schema: LabelSchema, configuration: dict[str, Any] | None = None + ) -> InferenceResultsToPredictionConverter: + """ + Create the appropriate inferencer object according to the model's task. + + :param label_schema: The label schema containing the label info of the task. + :param configuration: Optional configuration for the converter. Defaults to None. + :return: The created inference result to prediction converter. + :raises ValueError: If the task type cannot be determined from the label schema. + """ + domain = ConverterFactory._get_labels_domain(label_schema) + if domain == Domain.CLASSIFICATION: + return ClassificationToPredictionConverter(label_schema) + if domain == Domain.DETECTION: + return DetectionToPredictionConverter(label_schema, configuration) + if domain == Domain.SEGMENTATION: + return SegmentationToPredictionConverter(label_schema) + if domain == Domain.ROTATED_DETECTION: + return RotatedRectToPredictionConverter(label_schema, configuration) + if domain == Domain.INSTANCE_SEGMENTATION: + return MaskToAnnotationConverter(label_schema, configuration) + if domain in ( + Domain.ANOMALY_CLASSIFICATION, + Domain.ANOMALY_SEGMENTATION, + Domain.ANOMALY_DETECTION, + ): + return AnomalyToPredictionConverter(label_schema) + raise ValueError(f"Cannot create inferencer for task type '{domain.name}'.") + + @staticmethod + def _get_labels_domain(label_schema: LabelSchema) -> Domain: + """ + Return the domain (task type) associated with the model's labels. + + :param label_schema: The label schema containing the label info of the task. + :return: The domain of the task. + :raises ValueError: If the task type cannot be determined from the label schema. + """ + try: + return next( + label.domain for label in label_schema.get_labels(include_empty=False) + ) + except StopIteration: + raise ValueError("Cannot determine the task for the model") diff --git a/geti_sdk/deployment/predictions_postprocessing/utils/__init__.py b/geti_sdk/deployment/predictions_postprocessing/utils/__init__.py new file mode 100644 index 00000000..8857252c --- /dev/null +++ b/geti_sdk/deployment/predictions_postprocessing/utils/__init__.py @@ -0,0 +1,17 @@ +# INTEL CONFIDENTIAL +# +# Copyright (C) 2024 Intel Corporation +# +# This software and the related documents are Intel copyrighted materials, and +# your use of them is governed by the express license under which they were provided to +# you ("License"). Unless the License provides otherwise, you may not use, modify, copy, +# publish, distribute, disclose or transmit this software or the related documents +# without Intel's prior written permission. +# +# This software and the related documents are provided as is, +# with no express or implied warranties, other than those that are expressly stated +# in the License. + +""" +The module contains utility functions for post-processing predictions. +""" diff --git a/geti_sdk/deployment/predictions_postprocessing/utils/detection_utils.py b/geti_sdk/deployment/predictions_postprocessing/utils/detection_utils.py new file mode 100644 index 00000000..768541dd --- /dev/null +++ b/geti_sdk/deployment/predictions_postprocessing/utils/detection_utils.py @@ -0,0 +1,87 @@ +# INTEL CONFIDENTIAL +# +# Copyright (C) 2024 Intel Corporation +# +# This software and the related documents are Intel copyrighted materials, and +# your use of them is governed by the express license under which they were provided to +# you ("License"). Unless the License provides otherwise, you may not use, modify, copy, +# publish, distribute, disclose or transmit this software or the related documents +# without Intel's prior written permission. +# +# This software and the related documents are provided as is, +# with no express or implied warranties, other than those that are expressly stated +# in the License. + +import json +import logging + +import numpy as np +from openvino.model_api.models.utils import Detection + +from geti_sdk.data_models.model import Model + +# from sc_sdk.entities.model import Model + +logger = logging.getLogger(__name__) + + +def detection2array(detections: list[Detection]) -> np.ndarray: + """ + Convert list of OpenVINO Detection to a numpy array. + + :param detections: list of OpenVINO Detection containing [score, id, xmin, ymin, xmax, ymax] + :return: numpy array with [label, confidence, x1, y1, x2, y2] + """ + scores = np.empty((0, 1), dtype=np.float32) + labels = np.empty((0, 1), dtype=np.uint32) + boxes = np.empty((0, 4), dtype=np.float32) + for det in detections: + if (det.xmax - det.xmin) * (det.ymax - det.ymin) < 1.0: + continue + scores = np.append(scores, [[det.score]], axis=0) + labels = np.append(labels, [[det.id]], axis=0) + boxes = np.append( + boxes, + [[float(det.xmin), float(det.ymin), float(det.xmax), float(det.ymax)]], + axis=0, + ) + return np.concatenate((labels, scores, boxes), -1) + + +def get_detection_inferencer_configuration(model: Model) -> dict: + """ + Get detection configuration from the model. + + :param model: (Geti) Model to get the detection configuration from + :return: dict representing the detection configuration + """ + config = json.loads(model.get_data("config.json")) + _flatten_config_values(config) + + configuration = {} + if config["postprocessing"].get("result_based_confidence_threshold", False): + configuration["confidence_threshold"] = float( + np.frombuffer(model.get_data("confidence_threshold"), dtype=np.float32)[0] + ) + configuration["use_ellipse_shapes"] = config["postprocessing"].get( + "use_ellipse_shapes", False + ) + + logger.info(f"Detection inferencer configuration: {configuration}") + return configuration + + +def _flatten_config_values(config: dict) -> None: + """ + Extract the "value" field from any nested config. + + Flattening the structure of the config dictionary. The original config dictionary is modified in-place. + + :param config: config dictionary + """ + for key, value in config.items(): + if isinstance(value, dict): + if "value" in value: + config[key] = value["value"] + else: + _flatten_config_values(value) diff --git a/geti_sdk/deployment/predictions_postprocessing/utils/segmentation_utils.py b/geti_sdk/deployment/predictions_postprocessing/utils/segmentation_utils.py new file mode 100644 index 00000000..d0fbeb76 --- /dev/null +++ b/geti_sdk/deployment/predictions_postprocessing/utils/segmentation_utils.py @@ -0,0 +1,247 @@ +# INTEL CONFIDENTIAL +# +# Copyright (C) 2024 Intel Corporation +# +# This software and the related documents are Intel copyrighted materials, and +# your use of them is governed by the express license under which they were provided to +# you ("License"). Unless the License provides otherwise, you may not use, modify, copy, +# publish, distribute, disclose or transmit this software or the related documents +# without Intel's prior written permission. +# +# This software and the related documents are provided as is, +# with no express or implied warranties, other than those that are expressly stated +# in the License. +import logging +from copy import copy +from typing import cast + +import cv2 +import numpy as np + +from geti_sdk.data_models.annotations import Annotation +from geti_sdk.data_models.label import ScoredLabel +from geti_sdk.data_models.shapes import Point, Polygon + +# from bson import ObjectId + + +# from otx.api.entities.annotation import Annotation +# from otx.api.entities.id import ID +# from otx.api.entities.scored_label import ScoredLabel +# from otx.api.entities.shapes.polygon import Point, Polygon +# from otx.api.utils.shape_factory import ShapeFactory + +# from sc_sdk.entities.dataset_item import DatasetItem +# from sc_sdk.entities.label import Label + +logger = logging.getLogger(__name__) + +Contour = list[tuple[float, float]] +ContourInternal = list[tuple[float, float] | None] + + +def create_hard_prediction_from_soft_prediction( + soft_prediction: np.ndarray, soft_threshold: float, blur_strength: int = 5 +) -> np.ndarray: + """ + Create a hard prediction containing the final label index per pixel. + + :param soft_prediction: Output from segmentation network. Assumes floating point values, between 0.0 and 1.0. + Can be a 2d-array of shape (height, width) or per-class segmentation logits of shape (height, width, n_classes) + :param soft_threshold: minimum class confidence for each pixel. + The higher the value, the more strict the segmentation is (usually set to 0.5) + :param blur_strength: The higher the value, the smoother the segmentation output will be, but less accurate + :return: numpy array of the hard prediction + """ + soft_prediction_blurred = cv2.blur(soft_prediction, (blur_strength, blur_strength)) + if len(soft_prediction.shape) == 3: + # Apply threshold to filter out `unconfident` predictions, then get max along + # class dimension + soft_prediction_blurred[soft_prediction_blurred < soft_threshold] = 0 + hard_prediction = np.argmax(soft_prediction_blurred, axis=2) + elif len(soft_prediction.shape) == 2: + # In the binary case, simply apply threshold + hard_prediction = soft_prediction_blurred > soft_threshold + else: + raise ValueError( + f"Invalid prediction input of shape {soft_prediction.shape}. " + f"Expected either a 2D or 3D array." + ) + return hard_prediction + + +def get_subcontours(contour: Contour) -> list[Contour]: + """ + Split contour into sub-contours that do not have self intersections. + + :param contour: the contour to split + :return: list of sub-contours + """ + + def find_loops(points: ContourInternal) -> list: + """For each consecutive pair of equivalent rows in the input matrix returns their indices.""" + _, inverse, count = np.unique(points, axis=0, return_inverse=True, return_counts=True) # type: ignore + duplicates = np.where(count > 1)[0] + indices = [] + for x in duplicates: + y = np.nonzero(inverse == x)[0] + for i, _ in enumerate(y[:-1]): + indices.append(y[i : i + 2]) + return indices + + base_contour = cast(ContourInternal, copy(contour)) + + # Make sure that contour is closed. + if not np.array_equal(base_contour[0], base_contour[-1]): # type: ignore + base_contour.append(base_contour[0]) + + subcontours: list[Contour] = [] + loops = sorted(find_loops(base_contour), key=lambda x: x[0], reverse=True) + for loop in loops: + i, j = loop + subcontour = base_contour[i:j] + subcontour = [x for x in subcontour if x is not None] + subcontours.append(cast(Contour, subcontour)) + base_contour[i:j] = [None] * (j - i) + + return [i for i in subcontours if len(i) > 2] + + +def create_annotation_from_segmentation_map( + hard_prediction: np.ndarray, soft_prediction: np.ndarray, label_map: dict +) -> list[Annotation]: + """ + Create polygons from the soft predictions. + + Note: background label will be ignored and not be converted to polygons. + + :param hard_prediction: hard prediction containing the final label index per pixel. + See function `create_hard_prediction_from_soft_prediction`. + :param soft_prediction: soft prediction with shape H x W x N_labels, + where soft_prediction[:, :, 0] is the soft prediction for + background. If soft_prediction is of H x W shape, it is + assumed that this soft prediction will be applied for all + labels. + :param label_map: dictionary mapping labels to an index. It is assumed + that the first item in the dictionary corresponds to the + background label and will therefore be ignored. + :return: list of annotations with polygons + """ + # pylint: disable=too-many-locals + height, width = hard_prediction.shape[:2] + img_class = hard_prediction.swapaxes(0, 1) + + # pylint: disable=too-many-nested-blocks + annotations: list[Annotation] = [] + for label_index, label in label_map.items(): + # Skip background + if label_index == 0: + continue + + # obtain current label soft prediction + if len(soft_prediction.shape) == 3: + current_label_soft_prediction = soft_prediction[:, :, label_index] + else: + current_label_soft_prediction = soft_prediction + + obj_group = img_class == label_index + label_index_map = (obj_group.T.astype(int) * 255).astype(np.uint8) + + # Contour retrieval mode CCOMP (Connected components) creates a two-level + # hierarchy of contours + contours, hierarchies = cv2.findContours( + label_index_map, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE + ) + + if hierarchies is not None: + for contour, hierarchy in zip(contours, hierarchies[0]): + if len(contour) <= 2 or cv2.contourArea(contour) < 1.0: + continue + + if hierarchy[3] == -1: + # In this case a contour does not represent a hole + _contour = [(point[0][0], point[0][1]) for point in contour] + + # Split contour into subcontours that do not have self intersections. + subcontours = get_subcontours(_contour) + for subcontour in subcontours: + # compute probability of the shape + mask = np.zeros(hard_prediction.shape, dtype=np.uint8) + cv2.drawContours( + mask, + np.asarray([[[x, y]] for x, y in subcontour]), + contourIdx=-1, + color=1, + thickness=-1, + ) + probability = cv2.mean(current_label_soft_prediction, mask)[0] + + # convert the list of points to a closed polygon + points = [Point(x=x, y=y) for x, y in subcontour] + polygon = Polygon(points=points) + + if polygon.area > 0: + # Contour is a closed polygon with area > 0 + annotations.append( + Annotation( + shape=polygon, + labels=[ScoredLabel.from_label(label, probability)], + # id=ID(ObjectId()), + ) + ) + else: + # Contour is a closed polygon with area == 0 + logger.warning( + "The geometry of the segmentation map you are converting " + "is not fully supported. Polygons with a area of zero " + "will be removed.", + ) + else: + # If contour hierarchy[3] != -1 then contour has a parent and + # therefore is a hole + # Do not allow holes in segmentation masks to be filled silently, + # but trigger warning instead + logger.warning( + "The geometry of the segmentation map you are converting is " + "not fully supported. A hole was found and will be filled.", + ) + return annotations + + +# def mask_from_annotation(annotations: list[Annotation], labels: list[Label], width: int, height: int) -> np.ndarray: +# """ +# Generate a segmentation mask of a numpy image, and a list of shapes. + +# The mask is will be two dimensional and the value of each pixel matches the class +# index with offset 1. The background class index is zero. labels[0] matches pixel +# value 1, etc. The class index is determined based on the order of `labels`: + +# :param annotations: List of annotations to plot in mask +# :param labels: List of labels. The index position of the label determines the class number in the segmentation mask. +# :param width: Width of the mask +# :param height: Height of the mask +# :return: 2d numpy array of mask +# """ + +# mask = np.zeros(shape=(height, width), dtype=np.uint8) +# for annotation in annotations: +# shape = annotation.shape +# if not isinstance(shape, Polygon): +# shape = ShapeFactory.shape_as_polygon(annotation.shape) +# known_labels = [ +# label for label in annotation.get_labels() if isinstance(label, ScoredLabel) and label.get_label() in labels +# ] +# if len(known_labels) == 0: +# # Skip unknown shapes +# continue + +# label_to_compare = known_labels[0].get_label() + +# class_idx = labels.index(label_to_compare) + 1 +# contour = [] +# for point in shape.points: +# contour.append([int(point.x * width), int(point.y * height)]) + +# mask = cv2.drawContours(mask, np.asarray([contour]), 0, (class_idx, class_idx, class_idx), -1) + +# return np.expand_dims(mask, axis=2) diff --git a/geti_sdk/deployment/utils.py b/geti_sdk/deployment/utils.py index 78307e51..458aafc6 100644 --- a/geti_sdk/deployment/utils.py +++ b/geti_sdk/deployment/utils.py @@ -14,6 +14,7 @@ import re from importlib import resources +from typing import Tuple from pathvalidate import sanitize_filepath @@ -86,3 +87,18 @@ def target_device_is_ovms(device: str) -> bool: r"^((https?://)|(www.))(?:([a-zA-Z]+)|(\d+\.\d+\.\d+\.\d+)):\d{1,5}?$" ) return server_pattern.match(device) is not None + + +def rgb_to_hex(rgb: Tuple[int, int, int]) -> str: + """ + Convert an RGB color value to its corresponding hexadecimal representation. + + :param rgb: A tuple representing the RGB color value, where each element is an integer between 0 and 255. + :return: The hexadecimal representation of the RGB color value. + + _Example: + + >>> rgb_to_hex((255, 0, 0)) + '#ff0000' + """ + return "#{:02x}{:02x}{:02x}".format(rgb[0], rgb[1], rgb[2]) From d6012dc04ce8797577d4ebe28fc16515558f74b8 Mon Sep 17 00:00:00 2001 From: ljcornel Date: Mon, 26 Feb 2024 17:05:07 +0100 Subject: [PATCH 05/22] Make `datasets` optional in Project data model --- geti_sdk/data_models/project.py | 4 +- .../project_client/project_client.py | 49 +++++++++++++++---- 2 files changed, 42 insertions(+), 11 deletions(-) diff --git a/geti_sdk/data_models/project.py b/geti_sdk/data_models/project.py index d2e6e6ef..51f391e1 100644 --- a/geti_sdk/data_models/project.py +++ b/geti_sdk/data_models/project.py @@ -217,7 +217,9 @@ class Project: name: str pipeline: Pipeline - datasets: List[Dataset] + datasets: Optional[List[Dataset]] = ( + None # `datasets` was removed from project listing in Geti v1.15 + ) score: Optional[float] = None # 'score' is removed in v1.1 performance: Optional[Performance] = None creation_time: Optional[str] = attr.field(default=None, converter=str_to_datetime) diff --git a/geti_sdk/rest_clients/project_client/project_client.py b/geti_sdk/rest_clients/project_client/project_client.py index 1cb2d6e9..35dfdf89 100644 --- a/geti_sdk/rest_clients/project_client/project_client.py +++ b/geti_sdk/rest_clients/project_client/project_client.py @@ -63,13 +63,18 @@ def __init__(self, session: GetiSession, workspace_id: str): self.workspace_id = workspace_id self.base_url = f"workspaces/{workspace_id}/" - def get_all_projects(self, request_page_size: int = 50) -> List[Project]: + def get_all_projects( + self, request_page_size: int = 50, get_project_details: bool = True + ) -> List[Project]: """ Return a list of projects found on the Intel® Geti™ server :param request_page_size: Max number of projects to fetch in a single HTTP request. Higher values may reduce the response time of this method when there are many projects, but increase the chance of timeout. + :param get_project_details: True to get all details of the projects on the + Intel® Geti™, False to fetch only a summary of each project. Set this to + False if minimizing latency is a concern. Defaults to True :return: List of Project objects, containing the project information for each project on the Intel® Geti™ server """ @@ -81,19 +86,23 @@ def get_all_projects(self, request_page_size: int = 50) -> List[Project]: # The 'projects' endpoint uses pagination: multiple HTTP may be necessary to # fetch the full list of projects - project_list: List[Dict] = [] + project_rest_list: List[Dict] = [] while response := self.session.get_rest_response( - url=f"{self.base_url}projects?limit={request_page_size}&skip={len(project_list)}", + url=f"{self.base_url}projects?limit={request_page_size}&skip={len(project_rest_list)}", method="GET", ): - project_list.extend(response[project_key]) - if len(project_list) >= response[num_total_projects_key]: + project_rest_list.extend(response[project_key]) + if len(project_rest_list) >= response[num_total_projects_key]: break - return [ + project_list = [ ProjectRESTConverter.from_dict(project_input=project) - for project in project_list + for project in project_rest_list ] + if get_project_details: + return [self._get_project_detail(project) for project in project_list] + else: + return project_list def get_project_by_name(self, project_name: str) -> Optional[Project]: """ @@ -103,11 +112,14 @@ def get_project_by_name(self, project_name: str) -> Optional[Project]: :return: Project object containing the data of the project, if the project is found on the server. Returns None if the project doesn't exist """ - project_list = self.get_all_projects() - project = next( + project_list = self.get_all_projects(get_project_details=False) + project_entry = next( (project for project in project_list if project.name == project_name), None ) - return project + if project_entry is not None: + return self._get_project_detail(project_entry) + else: + return None def get_or_create_project( self, @@ -619,3 +631,20 @@ def _await_project_ready( f"Project has not become ready within the specified timeout ({timeout} " f"seconds)." ) from error + + def _get_project_detail(self, project: Union[Project, str]) -> Project: + """ + Fetch the most recent project details from the Intel® Geti™ server + + :param project: Name of the project or Project object representing the project + to get detailed information for. + :return: Updated Project object + """ + if isinstance(project, str): + project = self.get_project_by_name(project_name=project) + return project + else: + response = self.session.get_rest_response( + url=f"{self.base_url}projects/{project.id}", method="GET" + ) + return ProjectRESTConverter.from_dict(response) From 8011b16eae244e74f7573d2c2c4bdf005ac3d39c Mon Sep 17 00:00:00 2001 From: ljcornel Date: Mon, 4 Mar 2024 13:17:30 +0100 Subject: [PATCH 06/22] Fix token auth error handling --- geti_sdk/http_session/geti_session.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/geti_sdk/http_session/geti_session.py b/geti_sdk/http_session/geti_session.py index 83a34422..58cec82d 100644 --- a/geti_sdk/http_session/geti_session.py +++ b/geti_sdk/http_session/geti_session.py @@ -515,9 +515,16 @@ def _handle_error_response( logging.info("Authentication complete.") else: - access_token = self._acquire_access_token() - logging.info("New bearer token obtained.") - self.headers.update({"Authorization": f"Bearer {access_token}"}) + if self.authentication_service == AUTHENTICATION_DEX_OLD: + access_token = self._acquire_access_token() + logging.info("New bearer token obtained.") + self.headers.update({"Authorization": f"Bearer {access_token}"}) + else: + raise ValueError( + "Authentication via your personal access token has failed, " + "most likely the token has expired. Please verify that you " + "have provided a valid token." + ) retry_request = True From cf8b761fde7f042eeef002d5074812ac8d2d5361 Mon Sep 17 00:00:00 2001 From: Igor Davidyuk Date: Thu, 7 Mar 2024 09:00:56 +0100 Subject: [PATCH 07/22] otx not used for models from 1.15 Signed-off-by: Igor Davidyuk --- geti_sdk/deployment/deployed_model.py | 11 +++++++ geti_sdk/deployment/deployment.py | 12 ++----- .../prediction_to_annotation_converter.py | 33 +++++++++---------- 3 files changed, 29 insertions(+), 27 deletions(-) diff --git a/geti_sdk/deployment/deployed_model.py b/geti_sdk/deployment/deployed_model.py index 902f7919..b609433a 100644 --- a/geti_sdk/deployment/deployed_model.py +++ b/geti_sdk/deployment/deployed_model.py @@ -677,3 +677,14 @@ def _parse_label_schema_from_dict( ) ) self._label_schema = LabelSchema(label_groups=label_groups) + + # # This is a workaround for a bug in the label schema for anomaly tasks + # if domain is in [ + # Domain.ANOMALY_CLASSIFICATION, Domain.ANOMALY_DETECTION, Domain.ANOMALY_SEGMENTATION + # ]: + # # For some reason the `is_anomaly` flag is not set correctly in the + # # ote_label_schema, which will break loading the prediction converter. + # # We set the flag here + # for label in model.label_schema.get_labels(include_empty=True): + # if label.name == "Anomalous": + # label.is_anomalous = True diff --git a/geti_sdk/deployment/deployment.py b/geti_sdk/deployment/deployment.py index 88dfdb0a..c68bcad0 100644 --- a/geti_sdk/deployment/deployment.py +++ b/geti_sdk/deployment/deployment.py @@ -146,18 +146,10 @@ def load_inference_models(self, device: str = "CPU"): :param device: Device to load the inference models to (e.g. 'CPU', 'GPU', 'AUTO', etc) """ - for model, task in zip(self.models, self.project.get_trainable_tasks()): + # for model, task in zip(self.models, self.project.get_trainable_tasks()): + for model in self.models: model.load_inference_model(device=device, project=self.project) - # This is a workaround for a bug in the label schema for anomaly tasks - if task.type.is_anomaly: - # For some reason the `is_anomaly` flag is not set correctly in the - # ote_label_schema, which will break loading the prediction converter. - # We set the flag here - for label in model.ote_label_schema.get_labels(include_empty=True): - if label.name == "Anomalous": - label.is_anomalous = True - self._are_models_loaded = True logging.info(f"Inference models loaded on device `{device}` successfully.") diff --git a/geti_sdk/deployment/predictions_postprocessing/services/prediction_to_annotation_converter.py b/geti_sdk/deployment/predictions_postprocessing/services/prediction_to_annotation_converter.py index f22650ab..a49371fa 100644 --- a/geti_sdk/deployment/predictions_postprocessing/services/prediction_to_annotation_converter.py +++ b/geti_sdk/deployment/predictions_postprocessing/services/prediction_to_annotation_converter.py @@ -114,7 +114,7 @@ def convert_to_prediction( shape=Rectangle.generate_full_box(image_shape[1], image_shape[0]), labels=labels, ) - return Prediction(annotations) + return Prediction([annotations]) class DetectionToPredictionConverter(InferenceResultsToPredictionConverter): @@ -208,27 +208,26 @@ def convert_to_prediction( :raises ValueError: if metadata is missing from the preprocess step """ annotations = [] - if hasattr(predictions, "segmentedObjects"): - predictions = predictions.segmentedObjects shape: Union[RotatedRectangle, Ellipse] - # for obj in predictions: - for score, class_idx, box, mask in zip(*predictions): - if score < self.confidence_threshold: + for obj in predictions.segmentedObjects: + if obj.score < self.confidence_threshold: continue if self.use_ellipse_shapes: - shape = Ellipse(box[0], box[1], box[2] - box[0], box[3] - box[1]) + shape = Ellipse( + obj.xmin, obj.ymin, obj.xmax - obj.xmin, obj.ymax - obj.ymin + ) annotations.append( Annotation( shape, labels=[ ScoredLabel.from_label( - self.labels[int(class_idx) - 1], float(score) + self.labels[int(obj.id) - 1], float(obj.score) ) ], ) ) else: - mask = mask.astype(np.uint8) + mask = obj.mask.astype(np.uint8) contours, hierarchies = cv2.findContours( mask, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE ) @@ -252,7 +251,7 @@ def convert_to_prediction( shape=RotatedRectangle.from_polygon(shape), labels=[ ScoredLabel.from_label( - self.labels[int(class_idx) - 1], float(score) + self.labels[int(obj.id) - 1], float(obj.score) ) ], ) @@ -286,25 +285,25 @@ def convert_to_prediction( """ annotations = [] shape: Union[Polygon, Ellipse] - for score, class_idx, box, mask in zip(*predictions): - if score < self.confidence_threshold: + for obj in predictions.segmentedObjects: + if obj.score < self.confidence_threshold: continue if self.use_ellipse_shapes: shape = shape = Ellipse( - box[0], box[1], box[2] - box[0], box[3] - box[1] + obj.xmin, obj.ymin, obj.xmax - obj.xmin, obj.ymax - obj.ymin ) annotations.append( Annotation( shape=shape, labels=[ ScoredLabel.from_label( - self.labels[int(class_idx) - 1], float(score) + self.labels[int(obj.id) - 1], float(obj.score) ) ], ) ) else: - mask = mask.astype(np.uint8) + mask = obj.mask.astype(np.uint8) contours, hierarchies = cv2.findContours( mask, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE ) @@ -329,7 +328,7 @@ def convert_to_prediction( shape=shape, labels=[ ScoredLabel.from_label( - self.labels[int(class_idx) - 1], float(score) + self.labels[int(obj.id) - 1], float(obj.score) ) ], ) @@ -405,7 +404,7 @@ def convert_to_prediction( ) annotations = [ Annotation( - Rectangle.generate_full_box(*image_shape[1::-1]), + shape=Rectangle.generate_full_box(*image_shape[1::-1]), labels=[scored_label], ) ] From 84dd934c57921e7d1c09ae4567e80644a187da3c Mon Sep 17 00:00:00 2001 From: Igor Davidyuk Date: Thu, 7 Mar 2024 09:30:08 +0100 Subject: [PATCH 08/22] replase match case with ifs Signed-off-by: Igor Davidyuk --- .../prediction_to_annotation_converter.py | 61 ++++++++++--------- 1 file changed, 31 insertions(+), 30 deletions(-) diff --git a/geti_sdk/deployment/predictions_postprocessing/services/prediction_to_annotation_converter.py b/geti_sdk/deployment/predictions_postprocessing/services/prediction_to_annotation_converter.py index a49371fa..5ad6e509 100644 --- a/geti_sdk/deployment/predictions_postprocessing/services/prediction_to_annotation_converter.py +++ b/geti_sdk/deployment/predictions_postprocessing/services/prediction_to_annotation_converter.py @@ -397,40 +397,41 @@ def convert_to_prediction( pred_label = predictions.pred_label label = self.anomalous_label if pred_label == "Anomalous" else self.normal_label annotations: list[Annotation] = [] - match self.domain: - case Domain.ANOMALY_CLASSIFICATION: - scored_label = ScoredLabel.from_label( - label=label, probability=float(predictions.pred_score) + if self.domain == Domain.ANOMALY_CLASSIFICATION: + scored_label = ScoredLabel.from_label( + label=label, probability=float(predictions.pred_score) + ) + annotations = [ + Annotation( + shape=Rectangle.generate_full_box(*image_shape[1::-1]), + labels=[scored_label], ) - annotations = [ + ] + elif self.domain == Domain.ANOMALY_SEGMENTATION: + annotations = create_annotation_from_segmentation_map( + hard_prediction=predictions.pred_mask, + soft_prediction=predictions.anomaly_map.squeeze(), + label_map={0: self.normal_label, 1: self.anomalous_label}, + ) + elif self.domain == Domain.ANOMALY_DETECTION: + for box in predictions.pred_boxes: + annotations.append( Annotation( - shape=Rectangle.generate_full_box(*image_shape[1::-1]), - labels=[scored_label], - ) - ] - case Domain.ANOMALY_SEGMENTATION: - annotations = create_annotation_from_segmentation_map( - hard_prediction=predictions.pred_mask, - soft_prediction=predictions.anomaly_map.squeeze(), - label_map={0: self.normal_label, 1: self.anomalous_label}, - ) - case Domain.ANOMALY_DETECTION: - for box in predictions.pred_boxes: - annotations.append( - Annotation( - Rectangle(box[0], box[1], box[2] - box[0], box[3] - box[1]), - labels=[ - ScoredLabel.from_label( - label=self.anomalous_label, - probability=predictions.pred_score, - ) - ], - ) + shape=Rectangle( + box[0], box[1], box[2] - box[0], box[3] - box[1] + ), + labels=[ + ScoredLabel.from_label( + label=self.anomalous_label, + probability=predictions.pred_score, + ) + ], ) - case _: - raise ValueError( - f"Cannot convert predictions for task '{self.domain.name}'. Only Anomaly tasks are supported." ) + else: + raise ValueError( + f"Cannot convert predictions for task '{self.domain.name}'. Only Anomaly tasks are supported." + ) if not annotations: scored_label = ScoredLabel.from_label( label=self.normal_label, probability=1.0 From e09634e44a83fd753c33b7a8ccffe3604215599b Mon Sep 17 00:00:00 2001 From: Igor Davidyuk Date: Thu, 7 Mar 2024 19:34:02 +0100 Subject: [PATCH 09/22] add legacy converter support Signed-off-by: Igor Davidyuk --- geti_sdk/data_models/label.py | 6 +- geti_sdk/deployment/deployed_model.py | 48 +++----- .../predictions_postprocessing/__init__.py | 4 - .../__init__.py | 2 +- .../legacy_converter.py} | 103 ++++++++---------- .../prediction_to_annotation_converter.py | 16 ++- geti_sdk/deployment/utils.py | 26 +++++ 7 files changed, 105 insertions(+), 100 deletions(-) rename geti_sdk/deployment/predictions_postprocessing/{services => results_converter}/__init__.py (85%) rename geti_sdk/deployment/predictions_postprocessing/{postprocessing.py => results_converter/legacy_converter.py} (67%) rename geti_sdk/deployment/predictions_postprocessing/{services => results_converter}/prediction_to_annotation_converter.py (97%) diff --git a/geti_sdk/data_models/label.py b/geti_sdk/data_models/label.py index 15f08993..2004fe5d 100644 --- a/geti_sdk/data_models/label.py +++ b/geti_sdk/data_models/label.py @@ -180,7 +180,11 @@ def from_ote(cls, ote_label: OteScoredLabel) -> "ScoredLabel": name=ote_label.name, id=ote_label.id, probability=ote_label.probability, - color=ote_label.color.hex_str, + color=( + ote_label.color + if isinstance(ote_label.color, str) + else ote_label.color.hex_str + ), ) def to_ote(self) -> OteScoredLabel: diff --git a/geti_sdk/deployment/deployed_model.py b/geti_sdk/deployment/deployed_model.py index b609433a..1fc93025 100644 --- a/geti_sdk/deployment/deployed_model.py +++ b/geti_sdk/deployment/deployed_model.py @@ -31,19 +31,13 @@ from geti_sdk.data_models import OptimizedModel, Project, TaskConfiguration from geti_sdk.data_models.enums.domain import Domain - -# from geti_sdk.data_models.annotations import Annotation from geti_sdk.data_models.label import Label -from geti_sdk.data_models.label_group import LabelGroup +from geti_sdk.data_models.label_group import LabelGroup, LabelGroupType from geti_sdk.data_models.label_schema import LabelSchema from geti_sdk.data_models.predictions import Prediction, ResultMedium - -# from geti_sdk.deployment.legacy_converters import ( -# AnomalyClassificationToAnnotationConverter, -# ) -from geti_sdk.deployment.predictions_postprocessing.postprocessing import Postprocessor -from geti_sdk.deployment.predictions_postprocessing.services.prediction_to_annotation_converter import ( +from geti_sdk.deployment.predictions_postprocessing.results_converter.prediction_to_annotation_converter import ( ConverterFactory, + InferenceResultsToPredictionConverter, ) from geti_sdk.http_session import GetiSession from geti_sdk.rest_converters import ConfigurationRESTConverter, ModelRESTConverter @@ -51,20 +45,15 @@ from .utils import ( generate_ovms_model_address, generate_ovms_model_name, + get_package_version_from_requirements, rgb_to_hex, target_device_is_ovms, ) -# from otx.algorithms.classification.utils import get_cls_inferencer_configuration -# from otx.api.entities.color import Color -# from otx.api.entities.label import Domain as OTEDomain -# from otx.api.entities.label import LabelEntity -# from otx.api.entities.label_schema import LabelGroup, LabelGroupType, LabelSchemaEntity - - MODEL_DIR_NAME = "model" PYTHON_DIR_NAME = "python" WRAPPER_DIR_NAME = "model_wrappers" +REQUIREMENTS_FILE_NAME = "requirements.txt" LABELS_CONFIG_KEY = "labels" LABEL_TREE_KEY = "label_tree" @@ -100,7 +89,7 @@ def __attrs_post_init__(self): self._needs_tempdir_deletion: bool = False self._tempdir_path: Optional[str] = None self._has_custom_model_wrappers: bool = False - # self._label_schema: Optional[LabelSchemaEntity] = None + self._label_schema: Optional[LabelSchema] = None # Attributes related to model explainability self._saliency_key: Optional[str] = None @@ -108,9 +97,7 @@ def __attrs_post_init__(self): self._feature_vector_key: Optional[str] = None self._feature_vector_location: Optional[str] = None - self.openvino_model_parameters: Optional[Dict[str, Any]] = None - - self._postprocessor: Optional[Postprocessor] = None + self._converter: Optional[Union[InferenceResultsToPredictionConverter]] = None @property def model_data_path(self) -> str: @@ -322,17 +309,26 @@ def load_inference_model( f"is required, but could not be found at path " f"{wrapper_module_path}." ) from ex + model = model_api_Model.create_model( model=model_adapter, model_type=model_type, preload=True, + configuration=configuration, ) # self.openvino_model_parameters = configuration self._inference_model = model # Load results to Prediction converter + otx_version = get_package_version_from_requirements( + requirements_path=os.path.join( + self._model_python_path, REQUIREMENTS_FILE_NAME + ), + package_name="otx", + ) + use_leagacy_converter = not otx_version.startswith("1.5") self._converter = ConverterFactory.create_converter( - self.label_schema, configuration + self.label_schema, configuration, use_legacy_converter=use_leagacy_converter ) # TODO: This is a workaround to fix the issue that causes the output blob name @@ -595,14 +591,6 @@ def infer(self, image: np.ndarray, explain: bool = False) -> Prediction: ) postprocessing_results = self._postprocess(inference_results, metadata=metadata) - # Create a postprocessor - # if self._postprocessor is None: - # self._postprocessor = Postprocessor( - # label_schema=self.ote_label_schema, - # configuration=self.openvino_model_parameters, - # task=task, - # ) - # prediction = self._postprocessor(postprocessing_results, image, metadata) prediction = self._converter.convert_to_prediction( postprocessing_results, image_shape=metadata["original_shape"] ) @@ -672,7 +660,7 @@ def _parse_label_schema_from_dict( LabelGroup( id=group_dict["_id"], name=group_dict["name"], - group_type=group_dict["relation_type"], + group_type=LabelGroupType[group_dict["relation_type"]], labels=labels, ) ) diff --git a/geti_sdk/deployment/predictions_postprocessing/__init__.py b/geti_sdk/deployment/predictions_postprocessing/__init__.py index 09552458..a038e0dc 100644 --- a/geti_sdk/deployment/predictions_postprocessing/__init__.py +++ b/geti_sdk/deployment/predictions_postprocessing/__init__.py @@ -15,7 +15,3 @@ """ Predictions postprocessing module. """ - -from .postprocessing import detection2array - -__all__ = ["detection2array"] diff --git a/geti_sdk/deployment/predictions_postprocessing/services/__init__.py b/geti_sdk/deployment/predictions_postprocessing/results_converter/__init__.py similarity index 85% rename from geti_sdk/deployment/predictions_postprocessing/services/__init__.py rename to geti_sdk/deployment/predictions_postprocessing/results_converter/__init__.py index 5ecbbdac..547cb94f 100644 --- a/geti_sdk/deployment/predictions_postprocessing/services/__init__.py +++ b/geti_sdk/deployment/predictions_postprocessing/results_converter/__init__.py @@ -13,5 +13,5 @@ # in the License. """ -The module contains services for predictions post-processing. +The module contains classes for inference results post-processing and conversion to internal entities. """ diff --git a/geti_sdk/deployment/predictions_postprocessing/postprocessing.py b/geti_sdk/deployment/predictions_postprocessing/results_converter/legacy_converter.py similarity index 67% rename from geti_sdk/deployment/predictions_postprocessing/postprocessing.py rename to geti_sdk/deployment/predictions_postprocessing/results_converter/legacy_converter.py index 79e78a86..9c51b692 100644 --- a/geti_sdk/deployment/predictions_postprocessing/postprocessing.py +++ b/geti_sdk/deployment/predictions_postprocessing/results_converter/legacy_converter.py @@ -14,80 +14,72 @@ """Module implements the Postprocessor class.""" -from typing import Dict, List, Tuple +from typing import List, Tuple import numpy as np import otx +from otx.api.entities.label_schema import LabelGroup, LabelGroupType, LabelSchemaEntity +from otx.api.entities.model_template import Domain as OteDomain from otx.api.usecases.exportable_code.prediction_to_annotation_converter import ( IPredictionToAnnotationConverter, create_converter, ) from geti_sdk.data_models.annotations import Annotation +from geti_sdk.data_models.enums.domain import Domain from geti_sdk.data_models.enums.task_type import TaskType from geti_sdk.data_models.label import ScoredLabel +from geti_sdk.data_models.label_schema import LabelSchema from geti_sdk.data_models.predictions import Prediction from geti_sdk.data_models.shapes import Polygon, Rectangle, RotatedRectangle -from geti_sdk.data_models.task import Task from geti_sdk.deployment.legacy_converters.legacy_anomaly_converter import ( AnomalyClassificationToAnnotationConverter, ) +from geti_sdk.deployment.predictions_postprocessing.utils.detection_utils import ( + detection2array, +) -def detection2array(detections: List) -> np.ndarray: - """ - Convert list of OpenVINO Detection to a numpy array. - - :param detections: List of OpenVINO Detection containing score, id, xmin, ymin, xmax, ymax - - :return: np.ndarray: numpy array with [label, confidence, x1, y1, x2, y2] - """ - scores = np.empty((0, 1), dtype=np.float32) - labels = np.empty((0, 1), dtype=np.uint32) - boxes = np.empty((0, 4), dtype=np.float32) - for det in detections: - if (det.xmax - det.xmin) * (det.ymax - det.ymin) < 1.0: - continue - scores = np.append(scores, [[det.score]], axis=0) - labels = np.append(labels, [[det.id]], axis=0) - boxes = np.append( - boxes, - [[float(det.xmin), float(det.ymin), float(det.xmax), float(det.ymax)]], - axis=0, - ) - detections = np.concatenate((labels, scores, boxes), -1) - return detections - - -class Postprocessor: +class LegacyConverter: """ - Postprocessor class responsible for converting the output of the model to a Prediction object. + LegacyConverter class responsible for converting the output of the model to a Prediction object. + For models generated with Geti v1.8 and below. :param labels: Label schema to be used for the conversion. :param configuration: Configuration to be used for the conversion. :param task: Task object containing the task metadata. """ - def __init__(self, label_schema, configuration, task: Task) -> None: - self.task = task - self.ote_label_schema = label_schema + def __init__( + self, label_schema: LabelSchema, configuration, domain: Domain + ) -> None: + self.domain = domain + self.task_type = TaskType[self.domain.name] + self.label_schema = LabelSchemaEntity( + label_groups=[ + LabelGroup( + name=group.name, + labels=[label.to_ote(self.task_type) for label in group.labels], + group_type=LabelGroupType[group.group_type.name], + id=group.id, + ) + for group in label_schema.get_groups(include_empty=True) + ] + ) # Create OTX converter - converter_args = {"labels": self.ote_label_schema} + converter_args = {"labels": self.label_schema} if otx.__version__ > "1.2.0": if "use_ellipse_shapes" not in configuration.keys(): configuration.update({"use_ellipse_shapes": False}) converter_args["configuration"] = configuration self.converter: IPredictionToAnnotationConverter = create_converter( - converter_type=self.task.type.to_ote_domain(), **converter_args + converter_type=OteDomain[self.domain.name], **converter_args ) - def __call__( - self, - postprocessing_results: List, - image: np.ndarray, - metadata: Dict[str, Tuple[int, int, int]], + def convert_to_prediction( + self, postprocessing_results: List, image_shape: Tuple[int], **kwargs ) -> Prediction: """ Convert the postprocessing results to a Prediction object. @@ -103,15 +95,15 @@ def __call__( # segmentation models if ( hasattr(postprocessing_results, "objects") - and self.task.type == TaskType.DETECTION + and self.domain == Domain.DETECTION ): n_outputs = len(postprocessing_results.objects) postprocessing_results = detection2array(postprocessing_results.objects) elif hasattr( postprocessing_results, "segmentedObjects" - ) and self.task.type in [ - TaskType.INSTANCE_SEGMENTATION, - TaskType.ROTATED_DETECTION, + ) and self.domain in [ + Domain.INSTANCE_SEGMENTATION, + Domain.ROTATED_DETECTION, ]: n_outputs = len(postprocessing_results.segmentedObjects) postprocessing_results = postprocessing_results.segmentedObjects @@ -127,22 +119,24 @@ def __call__( ) # Proceed with postprocessing - width: int = image.shape[1] - height: int = image.shape[0] + width: int = image_shape[1] + height: int = image_shape[0] if n_outputs != 0: try: annotation_scene_entity = self.converter.convert_to_annotation( - predictions=postprocessing_results, metadata=metadata + predictions=postprocessing_results, + metadata={"original_shape": image_shape}, ) except AttributeError: # Add backwards compatibility for anomaly models created in Geti v1.8 and below - if self.task.type.is_anomaly: + if self.domain.is_anomaly: legacy_converter = AnomalyClassificationToAnnotationConverter( - label_schema=self.ote_label_schema + label_schema=self.label_schema ) annotation_scene_entity = legacy_converter.convert_to_annotation( - predictions=postprocessing_results, metadata=metadata + predictions=postprocessing_results, + metadata={"original_shape": image_shape}, ) self.converter = legacy_converter @@ -152,15 +146,6 @@ def __call__( else: prediction = Prediction(annotations=[]) - # print( - # "pre-converter", - # postprocessing_results, - # "metadata", - # metadata, - # ) - # print("width", width, "height", height) - # print("post-converter", prediction) - # Empty label is not generated by OTE correctly, append it here if there are # no other predictions if len(prediction.annotations) == 0: @@ -177,7 +162,7 @@ def __call__( # Rotated detection models produce Polygons, convert them here to # RotatedRectangles - if self.task.type == TaskType.ROTATED_DETECTION: + if self.domain == Domain.ROTATED_DETECTION: for annotation in prediction.annotations: if isinstance(annotation.shape, Polygon): annotation.shape = RotatedRectangle.from_polygon(annotation.shape) diff --git a/geti_sdk/deployment/predictions_postprocessing/services/prediction_to_annotation_converter.py b/geti_sdk/deployment/predictions_postprocessing/results_converter/prediction_to_annotation_converter.py similarity index 97% rename from geti_sdk/deployment/predictions_postprocessing/services/prediction_to_annotation_converter.py rename to geti_sdk/deployment/predictions_postprocessing/results_converter/prediction_to_annotation_converter.py index 5ad6e509..d50d3373 100644 --- a/geti_sdk/deployment/predictions_postprocessing/services/prediction_to_annotation_converter.py +++ b/geti_sdk/deployment/predictions_postprocessing/results_converter/prediction_to_annotation_converter.py @@ -44,10 +44,9 @@ Rectangle, RotatedRectangle, ) - -# from otx.api.entities.shapes.ellipse import Ellipse -# from otx.api.entities.shapes.polygon import Point, Polygon -# from otx.api.entities.shapes.rectangle import Rectangle +from geti_sdk.deployment.predictions_postprocessing.results_converter.legacy_converter import ( + LegacyConverter, +) from geti_sdk.deployment.predictions_postprocessing.utils.detection_utils import ( detection2array, ) @@ -452,17 +451,24 @@ class ConverterFactory: @staticmethod def create_converter( - label_schema: LabelSchema, configuration: dict[str, Any] | None = None + label_schema: LabelSchema, + configuration: Optional[dict[str, Any]] = None, + use_legacy_converter: bool = False, ) -> InferenceResultsToPredictionConverter: """ Create the appropriate inferencer object according to the model's task. :param label_schema: The label schema containing the label info of the task. :param configuration: Optional configuration for the converter. Defaults to None. + :param use_legacy_converter: Load a legacy converter for models generated by OTX version 1.4. :return: The created inference result to prediction converter. :raises ValueError: If the task type cannot be determined from the label schema. """ domain = ConverterFactory._get_labels_domain(label_schema) + + if use_legacy_converter: + return LegacyConverter(label_schema, configuration, domain) + if domain == Domain.CLASSIFICATION: return ClassificationToPredictionConverter(label_schema) if domain == Domain.DETECTION: diff --git a/geti_sdk/deployment/utils.py b/geti_sdk/deployment/utils.py index 458aafc6..1d972ac0 100644 --- a/geti_sdk/deployment/utils.py +++ b/geti_sdk/deployment/utils.py @@ -12,8 +12,10 @@ # See the License for the specific language governing permissions # and limitations under the License. +import os import re from importlib import resources +from pathlib import Path from typing import Tuple from pathvalidate import sanitize_filepath @@ -102,3 +104,27 @@ def rgb_to_hex(rgb: Tuple[int, int, int]) -> str: '#ff0000' """ return "#{:02x}{:02x}{:02x}".format(rgb[0], rgb[1], rgb[2]) + + +def get_package_version_from_requirements( + requirements_path: os.PathLike, package_name: str +) -> str: + """ + Get the version of a package from a requirements file. + + :param requirements_path: The requirements file path + :param package_name: The name of the package to get the version of + :return: The version of the package as a string, empty line if the package is not found. + :raises: ValueError If the requirements file is not found. + """ + if not package_name: + return "" + + requirements_path = Path(requirements_path).resolve() + if not requirements_path.exists(): + raise ValueError(f"Requirements file {requirements_path} not found") + + for line in requirements_path.read_text().split("\n"): + if line.startswith(package_name): + return line.split("==")[1].strip() + return "" From fda809cb2a4dabc40d9b08c60b7519c3eff89ed7 Mon Sep 17 00:00:00 2001 From: Igor Davidyuk Date: Thu, 7 Mar 2024 19:58:14 +0100 Subject: [PATCH 10/22] fix typing Signed-off-by: Igor Davidyuk --- .../prediction_to_annotation_converter.py | 2 +- .../utils/detection_utils.py | 5 +++-- .../utils/segmentation_utils.py | 10 +++++----- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/geti_sdk/deployment/predictions_postprocessing/results_converter/prediction_to_annotation_converter.py b/geti_sdk/deployment/predictions_postprocessing/results_converter/prediction_to_annotation_converter.py index d50d3373..635e4387 100644 --- a/geti_sdk/deployment/predictions_postprocessing/results_converter/prediction_to_annotation_converter.py +++ b/geti_sdk/deployment/predictions_postprocessing/results_converter/prediction_to_annotation_converter.py @@ -452,7 +452,7 @@ class ConverterFactory: @staticmethod def create_converter( label_schema: LabelSchema, - configuration: Optional[dict[str, Any]] = None, + configuration: Optional[Dict[str, Any]] = None, use_legacy_converter: bool = False, ) -> InferenceResultsToPredictionConverter: """ diff --git a/geti_sdk/deployment/predictions_postprocessing/utils/detection_utils.py b/geti_sdk/deployment/predictions_postprocessing/utils/detection_utils.py index 768541dd..405126c9 100644 --- a/geti_sdk/deployment/predictions_postprocessing/utils/detection_utils.py +++ b/geti_sdk/deployment/predictions_postprocessing/utils/detection_utils.py @@ -14,6 +14,7 @@ import json import logging +from typing import Any, Dict, List import numpy as np from openvino.model_api.models.utils import Detection @@ -25,7 +26,7 @@ logger = logging.getLogger(__name__) -def detection2array(detections: list[Detection]) -> np.ndarray: +def detection2array(detections: List[Detection]) -> np.ndarray: """ Convert list of OpenVINO Detection to a numpy array. @@ -71,7 +72,7 @@ def get_detection_inferencer_configuration(model: Model) -> dict: return configuration -def _flatten_config_values(config: dict) -> None: +def _flatten_config_values(config: Dict[str, Any]) -> None: """ Extract the "value" field from any nested config. diff --git a/geti_sdk/deployment/predictions_postprocessing/utils/segmentation_utils.py b/geti_sdk/deployment/predictions_postprocessing/utils/segmentation_utils.py index d0fbeb76..0b52d12b 100644 --- a/geti_sdk/deployment/predictions_postprocessing/utils/segmentation_utils.py +++ b/geti_sdk/deployment/predictions_postprocessing/utils/segmentation_utils.py @@ -13,7 +13,7 @@ # in the License. import logging from copy import copy -from typing import cast +from typing import Dict, List, cast import cv2 import numpy as np @@ -70,7 +70,7 @@ def create_hard_prediction_from_soft_prediction( return hard_prediction -def get_subcontours(contour: Contour) -> list[Contour]: +def get_subcontours(contour: Contour) -> List[Contour]: """ Split contour into sub-contours that do not have self intersections. @@ -78,7 +78,7 @@ def get_subcontours(contour: Contour) -> list[Contour]: :return: list of sub-contours """ - def find_loops(points: ContourInternal) -> list: + def find_loops(points: ContourInternal) -> List: """For each consecutive pair of equivalent rows in the input matrix returns their indices.""" _, inverse, count = np.unique(points, axis=0, return_inverse=True, return_counts=True) # type: ignore duplicates = np.where(count > 1)[0] @@ -108,8 +108,8 @@ def find_loops(points: ContourInternal) -> list: def create_annotation_from_segmentation_map( - hard_prediction: np.ndarray, soft_prediction: np.ndarray, label_map: dict -) -> list[Annotation]: + hard_prediction: np.ndarray, soft_prediction: np.ndarray, label_map: Dict +) -> List[Annotation]: """ Create polygons from the soft predictions. From b6665338f7eac9a7ea6518f6d9e22648d5496523 Mon Sep 17 00:00:00 2001 From: Igor Davidyuk Date: Fri, 8 Mar 2024 08:44:30 +0100 Subject: [PATCH 11/22] fix typing Signed-off-by: Igor Davidyuk --- .../prediction_to_annotation_converter.py | 6 ++--- .../utils/segmentation_utils.py | 22 +++++-------------- 2 files changed, 8 insertions(+), 20 deletions(-) diff --git a/geti_sdk/deployment/predictions_postprocessing/results_converter/prediction_to_annotation_converter.py b/geti_sdk/deployment/predictions_postprocessing/results_converter/prediction_to_annotation_converter.py index 635e4387..06aaa1cd 100644 --- a/geti_sdk/deployment/predictions_postprocessing/results_converter/prediction_to_annotation_converter.py +++ b/geti_sdk/deployment/predictions_postprocessing/results_converter/prediction_to_annotation_converter.py @@ -15,7 +15,7 @@ """Module implements the InferenceResultsToPredictionConverter class.""" import abc -from typing import Any, Dict, NamedTuple, Optional, Tuple, Union +from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union import cv2 import numpy as np @@ -274,7 +274,7 @@ def __init__( self.confidence_threshold = configuration["confidence_threshold"] def convert_to_prediction( - self, predictions: tuple, **kwargs: Dict[str, Any] + self, predictions: Tuple, **kwargs: Dict[str, Any] ) -> Prediction: """ Convert predictions to Annotation Scene using the metadata. @@ -395,7 +395,7 @@ def convert_to_prediction( """ pred_label = predictions.pred_label label = self.anomalous_label if pred_label == "Anomalous" else self.normal_label - annotations: list[Annotation] = [] + annotations: List[Annotation] = [] if self.domain == Domain.ANOMALY_CLASSIFICATION: scored_label = ScoredLabel.from_label( label=label, probability=float(predictions.pred_score) diff --git a/geti_sdk/deployment/predictions_postprocessing/utils/segmentation_utils.py b/geti_sdk/deployment/predictions_postprocessing/utils/segmentation_utils.py index 0b52d12b..e2784943 100644 --- a/geti_sdk/deployment/predictions_postprocessing/utils/segmentation_utils.py +++ b/geti_sdk/deployment/predictions_postprocessing/utils/segmentation_utils.py @@ -13,7 +13,7 @@ # in the License. import logging from copy import copy -from typing import Dict, List, cast +from typing import Dict, List, Tuple, cast import cv2 import numpy as np @@ -22,22 +22,10 @@ from geti_sdk.data_models.label import ScoredLabel from geti_sdk.data_models.shapes import Point, Polygon -# from bson import ObjectId - - -# from otx.api.entities.annotation import Annotation -# from otx.api.entities.id import ID -# from otx.api.entities.scored_label import ScoredLabel -# from otx.api.entities.shapes.polygon import Point, Polygon -# from otx.api.utils.shape_factory import ShapeFactory - -# from sc_sdk.entities.dataset_item import DatasetItem -# from sc_sdk.entities.label import Label - logger = logging.getLogger(__name__) -Contour = list[tuple[float, float]] -ContourInternal = list[tuple[float, float] | None] +Contour = List[Tuple[float, float]] +ContourInternal = List[Tuple[float, float] | None] def create_hard_prediction_from_soft_prediction( @@ -95,7 +83,7 @@ def find_loops(points: ContourInternal) -> List: if not np.array_equal(base_contour[0], base_contour[-1]): # type: ignore base_contour.append(base_contour[0]) - subcontours: list[Contour] = [] + subcontours: List[Contour] = [] loops = sorted(find_loops(base_contour), key=lambda x: x[0], reverse=True) for loop in loops: i, j = loop @@ -132,7 +120,7 @@ def create_annotation_from_segmentation_map( img_class = hard_prediction.swapaxes(0, 1) # pylint: disable=too-many-nested-blocks - annotations: list[Annotation] = [] + annotations: List[Annotation] = [] for label_index, label in label_map.items(): # Skip background if label_index == 0: From b8779da1fe7902d63630e3ac921e36b6d4f2c2d1 Mon Sep 17 00:00:00 2001 From: Igor Davidyuk Date: Fri, 8 Mar 2024 09:01:43 +0100 Subject: [PATCH 12/22] fix docstrings Signed-off-by: Igor Davidyuk --- .../prediction_to_annotation_converter.py | 39 ++++++++----------- .../utils/segmentation_utils.py | 4 +- 2 files changed, 19 insertions(+), 24 deletions(-) diff --git a/geti_sdk/deployment/predictions_postprocessing/results_converter/prediction_to_annotation_converter.py b/geti_sdk/deployment/predictions_postprocessing/results_converter/prediction_to_annotation_converter.py index 06aaa1cd..52cbfe19 100644 --- a/geti_sdk/deployment/predictions_postprocessing/results_converter/prediction_to_annotation_converter.py +++ b/geti_sdk/deployment/predictions_postprocessing/results_converter/prediction_to_annotation_converter.py @@ -28,12 +28,7 @@ ) from geti_sdk.data_models.annotations import Annotation - -# from otx.api.entities.annotation import Annotation from geti_sdk.data_models.enums.domain import Domain - -# from otx.api.entities.label import Domain -# from otx.api.entities.scored_label import ScoredLabel from geti_sdk.data_models.label import ScoredLabel from geti_sdk.data_models.label_schema import LabelSchema from geti_sdk.data_models.predictions import Prediction @@ -61,17 +56,17 @@ class InferenceResultsToPredictionConverter(metaclass=abc.ABCMeta): @abc.abstractmethod def convert_to_prediction(self, predictions: NamedTuple, **kwargs) -> Prediction: """ - Convert raw predictions to Annotation format. + Convert raw predictions to Prediction format. :param predictions: raw predictions from inference - :return: lisf of annotation objects containing the shapes obtained from the raw predictions. + :return: Prediction object containing the shapes obtained from the raw predictions. """ raise NotImplementedError class ClassificationToPredictionConverter(InferenceResultsToPredictionConverter): """ - Converts ModelAPI Classification predictions to Annotations. + Converts ModelAPI Classification predictions to Prediction object. :param label_schema: LabelSchema containing the label info of the task """ @@ -95,10 +90,10 @@ def convert_to_prediction( self, predictions: ClassificationResult, image_shape: Tuple[int], **kwargs ) -> Prediction: # noqa: ARG003 """ - Convert ModelAPI ClassificationResult predictions to sc_sdk annotations. + Convert ModelAPI ClassificationResult predictions to Prediction object. :param predictions: classification labels represented in ModelAPI format (label_index, label_name, confidence) - :return: list of full box annotations objects with corresponding label + :return: Prediction object with corresponding label """ labels = [] for label in predictions.top_labels: @@ -118,7 +113,7 @@ def convert_to_prediction( class DetectionToPredictionConverter(InferenceResultsToPredictionConverter): """ - Converts ModelAPI Detection objects to Prediction. + Converts ModelAPI Detection objects to Prediction object. :param label_schema: LabelSchema containing the label info of the task :param configuration: optional model configuration setting @@ -141,7 +136,7 @@ def convert_to_prediction( self, predictions: DetectionResult, **kwargs ) -> Prediction: """ - Convert ModelAPI DetectionResult predictions to Prediction. + Convert ModelAPI DetectionResult predictions to Prediction object. :param predictions: detection represented in ModelAPI format (label, confidence, x1, y1, x2, y2). @@ -149,7 +144,7 @@ def convert_to_prediction( - `label` can be any integer that can be mapped to `self.labels` - `confidence` should be a value between 0 and 1 - `x1`, `x2`, `y1` and `y2` are expected to be in pixel - :return: list of annotations object containing the boxes obtained from the prediction + :return: Prediction object containing the boxes obtained from the prediction """ detections = detection2array(predictions.objects) @@ -172,7 +167,7 @@ def convert_to_prediction( confidence = _detection[1] scored_label = ScoredLabel.from_label(self.label_map[label], confidence) coords = _detection[2:] - shape: Ellipse | Rectangle + shape: Union[Ellipse, Rectangle] if confidence < self.confidence_threshold: continue @@ -203,7 +198,7 @@ def convert_to_prediction( Convert ModelAPI instance segmentation predictions to a rotated bounding box annotation format. :param predictions: segmentation represented in ModelAPI format - :return: list of annotations containing the rotated boxes obtained from the segmentation contours + :return: Prediction object containing the rotated boxes obtained from the segmentation contours :raises ValueError: if metadata is missing from the preprocess step """ annotations = [] @@ -259,7 +254,7 @@ def convert_to_prediction( class MaskToAnnotationConverter(InferenceResultsToPredictionConverter): - """Converts DetectionBox Predictions ModelAPI to Annotations.""" + """Converts DetectionBox Predictions ModelAPI to Prediction object.""" def __init__( self, label_schema: LabelSchema, configuration: Optional[Dict[str, Any]] = None @@ -277,7 +272,7 @@ def convert_to_prediction( self, predictions: Tuple, **kwargs: Dict[str, Any] ) -> Prediction: """ - Convert predictions to Annotation Scene using the metadata. + Convert predictions to Prediction object. :param predictions: Raw predictions from the model. :return: Prediction object. @@ -337,7 +332,7 @@ def convert_to_prediction( class SegmentationToPredictionConverter(InferenceResultsToPredictionConverter): """ - Converts ModelAPI Segmentation objects to Annotations. + Converts ModelAPI Segmentation objects to Prediction object. :param label_schema: LabelSchema containing the label info of the task """ @@ -351,10 +346,10 @@ def convert_to_prediction( self, predictions: ImageResultWithSoftPrediction, **kwargs # noqa: ARG002 ) -> Prediction: """ - Convert ModelAPI instance segmentation predictions to sc_sdk annotations. + Convert ModelAPI instance segmentation predictions to Prediction object. :param predictions: segmentation represented in ModelAPI format - :return: list of annotations object containing the contour polygon obtained from the segmentation + :return: Prediction object containing the contour polygon obtained from the segmentation """ annotations = create_annotation_from_segmentation_map( hard_prediction=predictions.resultImage, @@ -366,7 +361,7 @@ def convert_to_prediction( class AnomalyToPredictionConverter(InferenceResultsToPredictionConverter): """ - Convert ModelAPI AnomalyResult predictions to Annotations. + Convert ModelAPI AnomalyResult predictions to Prediction object. :param label_schema: LabelSchema containing the label info of the task """ @@ -388,7 +383,7 @@ def convert_to_prediction( Convert ModelAPI AnomalyResult predictions to sc_sdk annotations. :param predictions: anomaly result represented in ModelAPI format (same for all anomaly tasks) - :return: list of annotation objects based on the specific anomaly task: + :return: Prediction object based on the specific anomaly task: - Classification: single label (normal or anomalous). - Segmentation: contour polygon representing the segmentation. - Detection: predicted bounding boxes. diff --git a/geti_sdk/deployment/predictions_postprocessing/utils/segmentation_utils.py b/geti_sdk/deployment/predictions_postprocessing/utils/segmentation_utils.py index e2784943..6c09e94a 100644 --- a/geti_sdk/deployment/predictions_postprocessing/utils/segmentation_utils.py +++ b/geti_sdk/deployment/predictions_postprocessing/utils/segmentation_utils.py @@ -13,7 +13,7 @@ # in the License. import logging from copy import copy -from typing import Dict, List, Tuple, cast +from typing import Dict, List, Optional, Tuple, cast import cv2 import numpy as np @@ -25,7 +25,7 @@ logger = logging.getLogger(__name__) Contour = List[Tuple[float, float]] -ContourInternal = List[Tuple[float, float] | None] +ContourInternal = Optional[List[Tuple[float, float]]] def create_hard_prediction_from_soft_prediction( From 5f3f69d8281ce6608f1fba0640b162d00c55e2d8 Mon Sep 17 00:00:00 2001 From: Igor Davidyuk Date: Fri, 8 Mar 2024 09:11:49 +0100 Subject: [PATCH 13/22] typing fix Signed-off-by: Igor Davidyuk --- .../results_converter/prediction_to_annotation_converter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/geti_sdk/deployment/predictions_postprocessing/results_converter/prediction_to_annotation_converter.py b/geti_sdk/deployment/predictions_postprocessing/results_converter/prediction_to_annotation_converter.py index 52cbfe19..5d074f0d 100644 --- a/geti_sdk/deployment/predictions_postprocessing/results_converter/prediction_to_annotation_converter.py +++ b/geti_sdk/deployment/predictions_postprocessing/results_converter/prediction_to_annotation_converter.py @@ -120,7 +120,7 @@ class DetectionToPredictionConverter(InferenceResultsToPredictionConverter): """ def __init__( - self, label_schema: LabelSchema, configuration: Optional[dict[str, Any]] = None + self, label_schema: LabelSchema, configuration: Optional[Dict[str, Any]] = None ): self.labels = label_schema.get_labels(include_empty=False) self.label_map = dict(enumerate(self.labels)) From 8ae644b9d1019fb03395f448c89c05940ffbea10 Mon Sep 17 00:00:00 2001 From: Igor Davidyuk Date: Fri, 8 Mar 2024 10:44:22 +0100 Subject: [PATCH 14/22] fix anomaly converter for otx 1.4 Signed-off-by: Igor Davidyuk --- geti_sdk/data_models/label.py | 1 + geti_sdk/deployment/deployed_model.py | 12 ------------ .../results_converter/legacy_converter.py | 2 +- 3 files changed, 2 insertions(+), 13 deletions(-) diff --git a/geti_sdk/data_models/label.py b/geti_sdk/data_models/label.py index 2004fe5d..fc959494 100644 --- a/geti_sdk/data_models/label.py +++ b/geti_sdk/data_models/label.py @@ -101,6 +101,7 @@ def to_ote(self, task_type: TaskType) -> LabelEntity: hotkey=self.hotkey, is_empty=self.is_empty, color=Color.from_hex_str(self.color), + is_anomalous=self.is_anomalous, ) def prepare_for_post(self) -> None: diff --git a/geti_sdk/deployment/deployed_model.py b/geti_sdk/deployment/deployed_model.py index 1fc93025..7eff40f3 100644 --- a/geti_sdk/deployment/deployed_model.py +++ b/geti_sdk/deployment/deployed_model.py @@ -316,7 +316,6 @@ def load_inference_model( preload=True, configuration=configuration, ) - # self.openvino_model_parameters = configuration self._inference_model = model # Load results to Prediction converter @@ -665,14 +664,3 @@ def _parse_label_schema_from_dict( ) ) self._label_schema = LabelSchema(label_groups=label_groups) - - # # This is a workaround for a bug in the label schema for anomaly tasks - # if domain is in [ - # Domain.ANOMALY_CLASSIFICATION, Domain.ANOMALY_DETECTION, Domain.ANOMALY_SEGMENTATION - # ]: - # # For some reason the `is_anomaly` flag is not set correctly in the - # # ote_label_schema, which will break loading the prediction converter. - # # We set the flag here - # for label in model.label_schema.get_labels(include_empty=True): - # if label.name == "Anomalous": - # label.is_anomalous = True diff --git a/geti_sdk/deployment/predictions_postprocessing/results_converter/legacy_converter.py b/geti_sdk/deployment/predictions_postprocessing/results_converter/legacy_converter.py index 9c51b692..a2825a84 100644 --- a/geti_sdk/deployment/predictions_postprocessing/results_converter/legacy_converter.py +++ b/geti_sdk/deployment/predictions_postprocessing/results_converter/legacy_converter.py @@ -130,7 +130,7 @@ def convert_to_prediction( ) except AttributeError: # Add backwards compatibility for anomaly models created in Geti v1.8 and below - if self.domain.is_anomaly: + if self.domain == Domain.ANOMALY_CLASSIFICATION: legacy_converter = AnomalyClassificationToAnnotationConverter( label_schema=self.label_schema ) From fed6ad16eb047ef56b3ed5ffa07f255d3050a61b Mon Sep 17 00:00:00 2001 From: Igor Davidyuk Date: Fri, 8 Mar 2024 11:42:31 +0100 Subject: [PATCH 15/22] fix anomaly label assignment Signed-off-by: Igor Davidyuk --- .../results_converter/prediction_to_annotation_converter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/geti_sdk/deployment/predictions_postprocessing/results_converter/prediction_to_annotation_converter.py b/geti_sdk/deployment/predictions_postprocessing/results_converter/prediction_to_annotation_converter.py index 5d074f0d..6fe61464 100644 --- a/geti_sdk/deployment/predictions_postprocessing/results_converter/prediction_to_annotation_converter.py +++ b/geti_sdk/deployment/predictions_postprocessing/results_converter/prediction_to_annotation_converter.py @@ -389,7 +389,7 @@ def convert_to_prediction( - Detection: predicted bounding boxes. """ pred_label = predictions.pred_label - label = self.anomalous_label if pred_label == "Anomalous" else self.normal_label + label = self.anomalous_label if pred_label == "Anomaly" else self.normal_label annotations: List[Annotation] = [] if self.domain == Domain.ANOMALY_CLASSIFICATION: scored_label = ScoredLabel.from_label( From 8c7827b645fd6657c46b468478c302a6aaada249 Mon Sep 17 00:00:00 2001 From: Igor Davidyuk Date: Mon, 11 Mar 2024 09:06:48 +0100 Subject: [PATCH 16/22] converter tests Signed-off-by: Igor Davidyuk --- geti_sdk/deployment/deployed_model.py | 2 +- ....py => results_to_prediction_converter.py} | 8 +- .../utils/segmentation_utils.py | 40 --- tests/fixtures/unit_tests/label.py | 190 +++++++++++++ tests/fixtures/unit_tests/label_schema.py | 75 +++++ .../deployment/test_prediction_converter.py | 259 ++++++++++++++++++ 6 files changed, 531 insertions(+), 43 deletions(-) rename geti_sdk/deployment/predictions_postprocessing/results_converter/{prediction_to_annotation_converter.py => results_to_prediction_converter.py} (99%) create mode 100644 tests/fixtures/unit_tests/label_schema.py create mode 100644 tests/pre-merge/unit/deployment/test_prediction_converter.py diff --git a/geti_sdk/deployment/deployed_model.py b/geti_sdk/deployment/deployed_model.py index 7eff40f3..0c7052ac 100644 --- a/geti_sdk/deployment/deployed_model.py +++ b/geti_sdk/deployment/deployed_model.py @@ -35,7 +35,7 @@ from geti_sdk.data_models.label_group import LabelGroup, LabelGroupType from geti_sdk.data_models.label_schema import LabelSchema from geti_sdk.data_models.predictions import Prediction, ResultMedium -from geti_sdk.deployment.predictions_postprocessing.results_converter.prediction_to_annotation_converter import ( +from geti_sdk.deployment.predictions_postprocessing.results_converter.results_to_prediction_converter import ( ConverterFactory, InferenceResultsToPredictionConverter, ) diff --git a/geti_sdk/deployment/predictions_postprocessing/results_converter/prediction_to_annotation_converter.py b/geti_sdk/deployment/predictions_postprocessing/results_converter/results_to_prediction_converter.py similarity index 99% rename from geti_sdk/deployment/predictions_postprocessing/results_converter/prediction_to_annotation_converter.py rename to geti_sdk/deployment/predictions_postprocessing/results_converter/results_to_prediction_converter.py index 6fe61464..fbc56518 100644 --- a/geti_sdk/deployment/predictions_postprocessing/results_converter/prediction_to_annotation_converter.py +++ b/geti_sdk/deployment/predictions_postprocessing/results_converter/results_to_prediction_converter.py @@ -212,7 +212,7 @@ def convert_to_prediction( ) annotations.append( Annotation( - shape, + shape=shape, labels=[ ScoredLabel.from_label( self.labels[int(obj.id) - 1], float(obj.score) @@ -389,7 +389,11 @@ def convert_to_prediction( - Detection: predicted bounding boxes. """ pred_label = predictions.pred_label - label = self.anomalous_label if pred_label == "Anomaly" else self.normal_label + label = ( + self.anomalous_label + if pred_label in ("Anomaly", "Anomalous") + else self.normal_label + ) annotations: List[Annotation] = [] if self.domain == Domain.ANOMALY_CLASSIFICATION: scored_label = ScoredLabel.from_label( diff --git a/geti_sdk/deployment/predictions_postprocessing/utils/segmentation_utils.py b/geti_sdk/deployment/predictions_postprocessing/utils/segmentation_utils.py index 6c09e94a..9163d48c 100644 --- a/geti_sdk/deployment/predictions_postprocessing/utils/segmentation_utils.py +++ b/geti_sdk/deployment/predictions_postprocessing/utils/segmentation_utils.py @@ -116,7 +116,6 @@ def create_annotation_from_segmentation_map( :return: list of annotations with polygons """ # pylint: disable=too-many-locals - height, width = hard_prediction.shape[:2] img_class = hard_prediction.swapaxes(0, 1) # pylint: disable=too-many-nested-blocks @@ -194,42 +193,3 @@ def create_annotation_from_segmentation_map( "not fully supported. A hole was found and will be filled.", ) return annotations - - -# def mask_from_annotation(annotations: list[Annotation], labels: list[Label], width: int, height: int) -> np.ndarray: -# """ -# Generate a segmentation mask of a numpy image, and a list of shapes. - -# The mask is will be two dimensional and the value of each pixel matches the class -# index with offset 1. The background class index is zero. labels[0] matches pixel -# value 1, etc. The class index is determined based on the order of `labels`: - -# :param annotations: List of annotations to plot in mask -# :param labels: List of labels. The index position of the label determines the class number in the segmentation mask. -# :param width: Width of the mask -# :param height: Height of the mask -# :return: 2d numpy array of mask -# """ - -# mask = np.zeros(shape=(height, width), dtype=np.uint8) -# for annotation in annotations: -# shape = annotation.shape -# if not isinstance(shape, Polygon): -# shape = ShapeFactory.shape_as_polygon(annotation.shape) -# known_labels = [ -# label for label in annotation.get_labels() if isinstance(label, ScoredLabel) and label.get_label() in labels -# ] -# if len(known_labels) == 0: -# # Skip unknown shapes -# continue - -# label_to_compare = known_labels[0].get_label() - -# class_idx = labels.index(label_to_compare) + 1 -# contour = [] -# for point in shape.points: -# contour.append([int(point.x * width), int(point.y * height)]) - -# mask = cv2.drawContours(mask, np.asarray([contour]), 0, (class_idx, class_idx, class_idx), -1) - -# return np.expand_dims(mask, axis=2) diff --git a/tests/fixtures/unit_tests/label.py b/tests/fixtures/unit_tests/label.py index e65dc7d0..e67746f0 100644 --- a/tests/fixtures/unit_tests/label.py +++ b/tests/fixtures/unit_tests/label.py @@ -12,9 +12,13 @@ # See the License for the specific language governing permissions # and limitations under the License. +from datetime import datetime, timezone + import pytest from geti_sdk.data_models import Label, ScoredLabel +from geti_sdk.data_models.enums.annotation_kind import AnnotationKind +from geti_sdk.data_models.enums.domain import Domain @pytest.fixture() @@ -30,3 +34,189 @@ def fxt_label() -> Label: @pytest.fixture() def fxt_scored_label(fxt_label: Label) -> ScoredLabel: yield ScoredLabel.from_label(label=fxt_label, probability=1.0) + + +class DummyValues: + MEDIA_HEIGHT = 480 + MEDIA_WIDTH = 640 + LABEL_NAMES = ["rectangle", "ellipse", "triangle"] + CREATOR_NAME = "SC SDK Fixtures" + CREATION_DATE = datetime.strptime( + "01/01/1970 00:00:01", "%d/%m/%Y %H:%M:%S" + ).astimezone(timezone.utc) + ANNOTATION_SCENE_KIND = AnnotationKind.ANNOTATION + ANNOTATION_EDITOR_NAME = "editor" + MODIFICATION_DATE = datetime(2021, 7, 15, tzinfo=timezone.utc) + X = 0.375 + Y = 0.25 + WIDTH = 0.25 + HEIGHT = 0.125 + UUID = "92497df6-f45a-11eb-9a03-0242ac130003" + LABEL_PROBABILITY = 1.0 + LABEL_NAME = "dog" + DETECTION_DOMAIN = Domain.DETECTION + ANOMALY_DETECTION_DOMAIN = Domain.ANOMALY_DETECTION + LABEL_HOTKEY = "ctrl+V" + FRAME_INDEX = 0 + + +@pytest.fixture +def fxt_empty_classification_label(): + yield Label( + name="Empty classification label", + id="0", + domain=Domain.CLASSIFICATION, + group="Empty label group", + color="#ff0000", + is_empty=True, + ) + + +@pytest.fixture +def fxt_classification_labels(fxt_empty_classification_label): + yield [ + Label( + name=name, + domain=Domain.CLASSIFICATION, + hotkey=f"CTRL+{index}", + group="Default classification group", + color="#ffdddd", + id=str(index + 1), + is_empty=False, + ) + for index, name in enumerate(DummyValues.LABEL_NAMES) + ] + [fxt_empty_classification_label] + + +@pytest.fixture +def fxt_empty_detection_label(): + yield Label( + name="Empty detection label", + domain=Domain.DETECTION, + hotkey=DummyValues.LABEL_HOTKEY, + color="#ff0000", + id="0", + group="Empty label group", + is_empty=True, + ) + + +@pytest.fixture +def fxt_detection_labels(fxt_empty_detection_label): + yield [ + Label( + name=name, + color="#ff4400", + group="Default detection group", + is_empty=False, + domain=Domain.DETECTION, + id=str(index + 1), + hotkey=f"CTRL+{index}", + ) + for index, name in enumerate(DummyValues.LABEL_NAMES) + ] + [fxt_empty_detection_label] + + +@pytest.fixture +def fxt_segmentation_labels(fxt_empty_segmentation_label): + yield [ + Label( + name=name, + color="#ff4400", + group="Default segmentation group", + is_empty=False, + domain=Domain.SEGMENTATION, + id=str(index + 1), + hotkey=f"CTRL+{index}", + ) + for index, name in enumerate(DummyValues.LABEL_NAMES) + ] + [fxt_empty_segmentation_label] + + +@pytest.fixture +def fxt_empty_segmentation_label(): + yield Label( + name="Empty segmentation label", + domain=Domain.SEGMENTATION, + color="#ff0000", + id="0", + hotkey=DummyValues.LABEL_HOTKEY, + group="Empty label group", + is_empty=True, + ) + + +@pytest.fixture +def fxt_rotated_detection_labels(): + yield [ + Label( + name=name, + color="#ff4400", + group="Default rotated detection group", + is_empty=False, + domain=Domain.ROTATED_DETECTION, + hotkey=f"CTRL+{index}", + id=str(index + 1), + ) + for index, name in enumerate(DummyValues.LABEL_NAMES) + ] + + +@pytest.fixture +def fxt_empty_rotated_detection_label(): + yield Label( + name="Empty rotated detection label", + domain=Domain.ROTATED_DETECTION, + hotkey=DummyValues.LABEL_HOTKEY, + id="0", + group="Empty label group", + color="#ff0000", + is_empty=True, + ) + + +@pytest.fixture +def fxt_anomalous_label(): + yield Label( + name=DummyValues.LABEL_NAME, + domain=DummyValues.ANOMALY_DETECTION_DOMAIN, + color="#ff0000", + hotkey=DummyValues.LABEL_HOTKEY, + group="anomal group", + is_empty=False, + is_anomalous=True, + ) + + +@pytest.fixture +def fxt_anomaly_labels_factory(): + def _build_anom_labels(domain: Domain) -> list[Label]: + if domain not in ( + Domain.ANOMALY_CLASSIFICATION, + Domain.ANOMALY_SEGMENTATION, + Domain.ANOMALY_DETECTION, + ): + raise ValueError("This fixtures only generates anomaly labels.") + normal_label = Label( + name="dummy_normal_label", + is_empty=False, + domain=domain, + color="#00BF00", + hotkey=DummyValues.LABEL_HOTKEY, + id="0", + is_anomalous=False, + group="normal group", + ) + anomalous_label = Label( + name="dummy_anomalous_label", + is_empty=False, + domain=domain, + color="#ff0000", + id="1", + hotkey=DummyValues.LABEL_HOTKEY, + group="anomal group", + is_anomalous=True, + ) + return [normal_label, anomalous_label] + + yield _build_anom_labels diff --git a/tests/fixtures/unit_tests/label_schema.py b/tests/fixtures/unit_tests/label_schema.py new file mode 100644 index 00000000..09cb42c4 --- /dev/null +++ b/tests/fixtures/unit_tests/label_schema.py @@ -0,0 +1,75 @@ +# INTEL CONFIDENTIAL +# +# Copyright (C) 2021 Intel Corporation +# +# This software and the related documents are Intel copyrighted materials, and +# your use of them is governed by the express license under which they were provided to +# you ("License"). Unless the License provides otherwise, you may not use, modify, copy, +# publish, distribute, disclose or transmit this software or the related documents +# without Intel's prior written permission. +# +# This software and the related documents are provided as is, +# with no express or implied warranties, other than those that are expressly stated +# in the License. + +import pytest + +from geti_sdk.data_models.enums.domain import Domain +from geti_sdk.data_models.label_group import LabelGroup, LabelGroupType +from geti_sdk.data_models.label_schema import LabelSchema + + +@pytest.fixture +def fxt_label_schema_factory( + fxt_classification_labels, + fxt_detection_labels, + fxt_empty_detection_label, + fxt_segmentation_labels, + fxt_empty_segmentation_label, + fxt_rotated_detection_labels, + fxt_empty_rotated_detection_label, + fxt_anomaly_labels_factory, +): + domain_to_label_properties = { + Domain.DETECTION: { + "labels": fxt_detection_labels, + "empty_label": fxt_empty_detection_label, + }, + Domain.CLASSIFICATION: { + "labels": fxt_classification_labels, + }, + Domain.SEGMENTATION: { + "labels": fxt_segmentation_labels, + "empty_label": fxt_empty_segmentation_label, + }, + Domain.ROTATED_DETECTION: { + "labels": fxt_rotated_detection_labels, + "empty_label": fxt_empty_rotated_detection_label, + }, + Domain.ANOMALY_CLASSIFICATION: { + "labels": fxt_anomaly_labels_factory(Domain.ANOMALY_CLASSIFICATION), + }, + Domain.ANOMALY_SEGMENTATION: { + "labels": fxt_anomaly_labels_factory(Domain.ANOMALY_SEGMENTATION), + }, + Domain.ANOMALY_DETECTION: { + "labels": fxt_anomaly_labels_factory(Domain.ANOMALY_DETECTION), + }, + } + + def _label_schema_factory(domain: Domain): + labels = domain_to_label_properties[domain]["labels"] + empty_label = domain_to_label_properties[domain].get("empty_label", None) + label_groups = [ + LabelGroup(labels=labels, name=f"dummy {domain.name.lower()} label group") + ] + if empty_label is not None: + empty_label_group = LabelGroup( + labels=[empty_label], + name=f"dummy {domain.name.lower()} empty group", + group_type=LabelGroupType.EMPTY_LABEL, + ) + label_groups.append(empty_label_group) + return LabelSchema(label_groups) + + yield _label_schema_factory diff --git a/tests/pre-merge/unit/deployment/test_prediction_converter.py b/tests/pre-merge/unit/deployment/test_prediction_converter.py new file mode 100644 index 00000000..2426ce2c --- /dev/null +++ b/tests/pre-merge/unit/deployment/test_prediction_converter.py @@ -0,0 +1,259 @@ +# INTEL CONFIDENTIAL +# +# Copyright (C) 2024 Intel Corporation +# +# This software and the related documents are Intel copyrighted materials, and +# your use of them is governed by the express license under which they were provided to +# you ("License"). Unless the License provides otherwise, you may not use, modify, copy, +# publish, distribute, disclose or transmit this software or the related documents +# without Intel's prior written permission. +# +# This software and the related documents are provided as is, +# with no express or implied warranties, other than those that are expressly stated +# in the License. +import numpy as np +import pytest +from openvino.model_api.models.utils import ( + AnomalyResult, + ClassificationResult, + Detection, + DetectionResult, + ImageResultWithSoftPrediction, + InstanceSegmentationResult, + SegmentedObject, +) + +from geti_sdk.data_models.enums.domain import Domain +from geti_sdk.data_models.label import ScoredLabel +from geti_sdk.data_models.label_group import LabelGroup +from geti_sdk.data_models.label_schema import LabelSchema +from geti_sdk.data_models.shapes import ( + Ellipse, + Point, + Polygon, + Rectangle, + RotatedRectangle, +) +from geti_sdk.deployment.predictions_postprocessing.results_converter.results_to_prediction_converter import ( + AnomalyToPredictionConverter, + ClassificationToPredictionConverter, + DetectionToPredictionConverter, + RotatedRectToPredictionConverter, + SegmentationToPredictionConverter, +) + + +def coords_to_xmin_xmax_width_height(coords): + x1, y1, x2, y2 = coords + return x1, y1, x2 - x1, y2 - y1 + + +@pytest.mark.JobsComponent +class TestInferenceResultsToPredictionConverter: + def test_classification_to_prediction_converter(self, fxt_label_schema_factory): + # Arrange + label_schema = fxt_label_schema_factory(Domain.CLASSIFICATION) + labels = label_schema.get_labels(include_empty=False) + raw_prediction = ClassificationResult( + top_labels=[(1, labels[1].name, 0.81)], + raw_scores=[0.19, 0.81], + saliency_map=None, + feature_vector=None, + ) + + # Act + converter = ClassificationToPredictionConverter(label_schema) + prediction = converter.convert_to_prediction( + raw_prediction, image_shape=(10, 10) + ) + + # Assert + assert converter.labels == labels + assert len(prediction.annotations) == 1 + predicted_label = prediction.annotations[0].labels[0] + assert predicted_label.name == labels[1].name + assert predicted_label.probability == 0.81 + + @pytest.mark.parametrize("use_ellipse_shapes", [True, False]) + def test_detection_to_prediction_converter( + self, use_ellipse_shapes, fxt_label_schema_factory + ): + # Arrange + label_schema = fxt_label_schema_factory(Domain.DETECTION) + labels = label_schema.get_labels(include_empty=False) + coords = [12.0, 41.0, 12.5, 45.5] + raw_prediction = DetectionResult( + objects=[Detection(*coords, score=0.51, id=0)], + saliency_map=None, + feature_vector=None, + ) + + # Act + converter = DetectionToPredictionConverter( + label_schema=label_schema, + configuration={"use_ellipse_shapes": use_ellipse_shapes}, + ) + prediction = converter.convert_to_prediction(raw_prediction) + + # Assert + assert converter.labels == labels + assert len(prediction.annotations) == 1 + if use_ellipse_shapes: + assert prediction.annotations[0].shape == Ellipse( + *coords_to_xmin_xmax_width_height(coords) + ) + else: + assert prediction.annotations[0].shape == Rectangle( + *coords_to_xmin_xmax_width_height(coords) + ) + assert prediction.annotations[0].labels[0] == ScoredLabel.from_label( + labels[0], probability=raw_prediction.objects[0].score + ) + + @pytest.mark.parametrize("use_ellipse_shapes", [True, False]) + def test_rotated_rect_to_prediction_converter( + self, use_ellipse_shapes, fxt_label_schema_factory + ): + # Arrange + label_schema = fxt_label_schema_factory(Domain.ROTATED_DETECTION) + labels = label_schema.get_labels(include_empty=False) + coords = [1, 1, 4, 4] + score = 0.51 + mask = np.array( + [ + [0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 0, 1, 1, 0], + [0, 0, 1, 1, 0], + [0, 0, 0, 0, 0], + ] + ) + raw_prediction = InstanceSegmentationResult( + segmentedObjects=[ + SegmentedObject(*coords, mask=mask, score=score, id=1, str_label="") + ], + saliency_map=None, + feature_vector=None, + ) + height, width = mask.shape + metadata = {"original_shape": (height, width, 3)} + + # Act + converter = RotatedRectToPredictionConverter( + label_schema, configuration={"use_ellipse_shapes": use_ellipse_shapes} + ) + prediction = converter.convert_to_prediction(raw_prediction, metadata=metadata) + + # Assert + assert converter.labels == labels + assert len(prediction.annotations) == 1 + # raise Exception(prediction.annotations[0].labels[0], labels[0]) + # raise Exception(prediction.annotations[0].shape) + if use_ellipse_shapes: + assert prediction.annotations[0].shape == Ellipse( + *coords_to_xmin_xmax_width_height(coords) + ) + else: + assert prediction.annotations[0].shape == RotatedRectangle.from_polygon( + Polygon( + points=[ + Point(x=1, y=1), + Point(x=3, y=1), + Point(x=3, y=3), + Point(x=1, y=3), + ] + ) + ) + assert prediction.annotations[0].labels[0] == ScoredLabel.from_label( + label=labels[0], probability=score + ) + + def test_segmentation_to_prediction_converter(self, fxt_segmentation_labels): + # Arrange + seg_labels = [fxt_segmentation_labels[0]] + label_group = LabelGroup( + labels=seg_labels, name="dummy segmentation label group" + ) + label_schema = LabelSchema(label_groups=[label_group]) + labels = label_schema.get_labels(include_empty=False) + result_image = np.array( + [ + [0, 0, 0], + [0, 1, 0], + [1, 1, 1], + ] + ) + soft_predictions = np.array( + [ + [[0.9, 0.1, 0.1], [0.7, 0.1, 0.2], [0.9, 0.1, 0.1]], + [[0.9, 0.0, 0.1], [0.9, 0.0, 0.1], [0.9, 0.0, 0.0]], + [[0.2, 0.2, 0.6], [0.1, 0.2, 0.7], [0.2, 0.2, 0.6]], + ] + ) + raw_prediction = ImageResultWithSoftPrediction( + resultImage=result_image, + soft_prediction=soft_predictions, + saliency_map=None, + feature_vector=None, + ) + + # Act + converter = SegmentationToPredictionConverter(label_schema) + prediction = converter.convert_to_prediction(raw_prediction) + + # Assert + assert converter.labels == labels + assert len(prediction.annotations) == 1 + assert prediction.annotations[0].labels[0].name == labels[0].name + assert prediction.annotations[0].shape == Polygon( + points=[Point(1.0, 1.0), Point(0.0, 2.0), Point(1.0, 2.0), Point(2.0, 2.0)] + ) + + @pytest.mark.parametrize( + "domain", + [ + Domain.ANOMALY_CLASSIFICATION, + Domain.ANOMALY_SEGMENTATION, + Domain.ANOMALY_DETECTION, + ], + ) + def test_anomaly_to_prediction_converter(self, domain, fxt_label_schema_factory): + # Arrange + label_schema = fxt_label_schema_factory(domain) + labels = label_schema.get_labels(include_empty=False) + anomaly_map = np.ones((2, 2)) + pred_boxes = np.array([[2, 2, 4, 4]]) + pred_mask = np.ones((2, 2)) + raw_prediction = AnomalyResult( + anomaly_map=anomaly_map, + pred_boxes=pred_boxes, + pred_mask=pred_mask, + pred_label="Anomalous", + pred_score=1.0, + ) + + # Act + converter = AnomalyToPredictionConverter(label_schema) + prediction = converter.convert_to_prediction( + raw_prediction, image_shape=anomaly_map.shape + ) + + # Assert + assert converter.labels == labels + assert len(prediction.annotations) == 1 + assert prediction.annotations[0].labels[0] == ScoredLabel.from_label( + next(label for label in labels if label.is_anomalous), probability=1.0 + ) + if domain == Domain.ANOMALY_SEGMENTATION: + assert prediction.annotations[0].shape == Polygon( + points=[ + Point(0.0, 0.0), + Point(0.0, 1.0), + Point(1.0, 1.0), + Point(1.0, 0.0), + ] + ) + elif domain == Domain.ANOMALY_DETECTION: + assert prediction.annotations[0].shape == Rectangle( + *coords_to_xmin_xmax_width_height(pred_boxes[0]) + ) From e098b2d4e8c75ed9586902ee62ebe8374257516d Mon Sep 17 00:00:00 2001 From: Igor Davidyuk Date: Mon, 11 Mar 2024 09:18:34 +0100 Subject: [PATCH 17/22] add doc strings to packages Signed-off-by: Igor Davidyuk --- .../predictions_postprocessing/__init__.py | 8 +++++++- .../results_converter/__init__.py | 18 +++++++++++++++++- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/geti_sdk/deployment/predictions_postprocessing/__init__.py b/geti_sdk/deployment/predictions_postprocessing/__init__.py index a038e0dc..88c5825a 100644 --- a/geti_sdk/deployment/predictions_postprocessing/__init__.py +++ b/geti_sdk/deployment/predictions_postprocessing/__init__.py @@ -13,5 +13,11 @@ # and limitations under the License. """ -Predictions postprocessing module. +Predictions postprocessing package. + +This package provides means for converting inference results obtained from model_api-wrapped OpenVINO models to internal Prediction entities. """ + +from .results_converter import ConverterFactory + +__all__ = ["ConverterFactory"] diff --git a/geti_sdk/deployment/predictions_postprocessing/results_converter/__init__.py b/geti_sdk/deployment/predictions_postprocessing/results_converter/__init__.py index 547cb94f..d4fa29fc 100644 --- a/geti_sdk/deployment/predictions_postprocessing/results_converter/__init__.py +++ b/geti_sdk/deployment/predictions_postprocessing/results_converter/__init__.py @@ -13,5 +13,21 @@ # in the License. """ -The module contains classes for inference results post-processing and conversion to internal entities. +The package contains classes for inference results post-processing and conversion to internal Prediction entities. + +The package contains the following classes: + - `AnomalyToPredictionConverter` - class for converting anomaly classification / segmentation / detection results to internal Prediction entities + - `ClassificationToPredictionConverter` - class for converting classification results to internal Prediction entities + - `DetectionToPredictionConverter` - class for converting detection results to internal Prediction entities + - `MaskToAnnotationConverter` - class for converting rotated detection results to internal Prediction entities + - `RotatedRectToPredictionConverter` - class for converting rotated detection results to internal Prediction entities + - `SegmentationToPredictionConverter` - class for converting segmentation results to internal Prediction entities + + - `LegacyConverter` - OTX based universal converter for models generated with Geti v1.8 and OTX 1.4 + + - `ConverterFactory` - factory class for creating the appropriate converter based on the domain of the inference results """ + +from .results_to_prediction_converter import ConverterFactory + +__all__ = ["ConverterFactory"] From 2e6e995c173f5a8359e50426a9b02d8d6f7ea194 Mon Sep 17 00:00:00 2001 From: Igor Davidyuk Date: Mon, 11 Mar 2024 09:43:29 +0100 Subject: [PATCH 18/22] fix typing Signed-off-by: Igor Davidyuk --- tests/fixtures/unit_tests/label.py | 3 ++- .../pre-merge/unit/deployment/test_prediction_converter.py | 7 ++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/fixtures/unit_tests/label.py b/tests/fixtures/unit_tests/label.py index e67746f0..d2d63752 100644 --- a/tests/fixtures/unit_tests/label.py +++ b/tests/fixtures/unit_tests/label.py @@ -13,6 +13,7 @@ # and limitations under the License. from datetime import datetime, timezone +from typing import List import pytest @@ -190,7 +191,7 @@ def fxt_anomalous_label(): @pytest.fixture def fxt_anomaly_labels_factory(): - def _build_anom_labels(domain: Domain) -> list[Label]: + def _build_anom_labels(domain: Domain) -> List[Label]: if domain not in ( Domain.ANOMALY_CLASSIFICATION, Domain.ANOMALY_SEGMENTATION, diff --git a/tests/pre-merge/unit/deployment/test_prediction_converter.py b/tests/pre-merge/unit/deployment/test_prediction_converter.py index 2426ce2c..1707e6fc 100644 --- a/tests/pre-merge/unit/deployment/test_prediction_converter.py +++ b/tests/pre-merge/unit/deployment/test_prediction_converter.py @@ -11,6 +11,8 @@ # This software and the related documents are provided as is, # with no express or implied warranties, other than those that are expressly stated # in the License. +from typing import Tuple + import numpy as np import pytest from openvino.model_api.models.utils import ( @@ -43,7 +45,10 @@ ) -def coords_to_xmin_xmax_width_height(coords): +def coords_to_xmin_xmax_width_height( + coords: Tuple[int, int, int, int] +) -> Tuple[int, int, int, int]: + "Convert bbox to xmin, ymin, width, height format" x1, y1, x2, y2 = coords return x1, y1, x2 - x1, y2 - y1 From 3d86dd1f373baf131506f97e6bf8076b771f169b Mon Sep 17 00:00:00 2001 From: Igor Davidyuk Date: Mon, 11 Mar 2024 10:02:28 +0100 Subject: [PATCH 19/22] label timestamp fix Signed-off-by: Igor Davidyuk --- tests/fixtures/unit_tests/label.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fixtures/unit_tests/label.py b/tests/fixtures/unit_tests/label.py index d2d63752..250535ad 100644 --- a/tests/fixtures/unit_tests/label.py +++ b/tests/fixtures/unit_tests/label.py @@ -43,7 +43,7 @@ class DummyValues: LABEL_NAMES = ["rectangle", "ellipse", "triangle"] CREATOR_NAME = "SC SDK Fixtures" CREATION_DATE = datetime.strptime( - "01/01/1970 00:00:01", "%d/%m/%Y %H:%M:%S" + "01-01-1971_00:00:01", "%d-%m-%Y_%H:%M:%S" ).astimezone(timezone.utc) ANNOTATION_SCENE_KIND = AnnotationKind.ANNOTATION ANNOTATION_EDITOR_NAME = "editor" From fc17327a427dea249706365d2db332261d7cf70d Mon Sep 17 00:00:00 2001 From: Igor Davidyuk Date: Mon, 11 Mar 2024 13:10:46 +0300 Subject: [PATCH 20/22] Update geti_sdk/deployment/deployed_model.py Co-authored-by: Ludo Cornelissen --- geti_sdk/deployment/deployed_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/geti_sdk/deployment/deployed_model.py b/geti_sdk/deployment/deployed_model.py index 0c7052ac..14f4e036 100644 --- a/geti_sdk/deployment/deployed_model.py +++ b/geti_sdk/deployment/deployed_model.py @@ -325,7 +325,7 @@ def load_inference_model( ), package_name="otx", ) - use_leagacy_converter = not otx_version.startswith("1.5") + use_legacy_converter = not otx_version.startswith("1.5") self._converter = ConverterFactory.create_converter( self.label_schema, configuration, use_legacy_converter=use_leagacy_converter ) From 17b25e2d195d1546b3bc5a2b9b3e06016ac7f112 Mon Sep 17 00:00:00 2001 From: Igor Davidyuk Date: Mon, 11 Mar 2024 13:11:20 +0300 Subject: [PATCH 21/22] Update geti_sdk/deployment/deployed_model.py Co-authored-by: Ludo Cornelissen --- geti_sdk/deployment/deployed_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/geti_sdk/deployment/deployed_model.py b/geti_sdk/deployment/deployed_model.py index 14f4e036..13fa98f0 100644 --- a/geti_sdk/deployment/deployed_model.py +++ b/geti_sdk/deployment/deployed_model.py @@ -97,7 +97,7 @@ def __attrs_post_init__(self): self._feature_vector_key: Optional[str] = None self._feature_vector_location: Optional[str] = None - self._converter: Optional[Union[InferenceResultsToPredictionConverter]] = None + self._converter: Optional[InferenceResultsToPredictionConverter] = None @property def model_data_path(self) -> str: From 9af9b5c64cc32e08f82f569ce6a284c5484a035e Mon Sep 17 00:00:00 2001 From: Igor Davidyuk Date: Mon, 11 Mar 2024 13:17:07 +0300 Subject: [PATCH 22/22] Update deployed_model.py --- geti_sdk/deployment/deployed_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/geti_sdk/deployment/deployed_model.py b/geti_sdk/deployment/deployed_model.py index 13fa98f0..4879cff2 100644 --- a/geti_sdk/deployment/deployed_model.py +++ b/geti_sdk/deployment/deployed_model.py @@ -327,7 +327,7 @@ def load_inference_model( ) use_legacy_converter = not otx_version.startswith("1.5") self._converter = ConverterFactory.create_converter( - self.label_schema, configuration, use_legacy_converter=use_leagacy_converter + self.label_schema, configuration, use_legacy_converter=use_legacy_converter ) # TODO: This is a workaround to fix the issue that causes the output blob name