Skip to content

Commit

Permalink
eval task save avg_time_per_images as result
Browse files Browse the repository at this point in the history
  • Loading branch information
eunwoosh committed Oct 19, 2023
1 parent ab6a63e commit df41ae6
Show file tree
Hide file tree
Showing 2 changed files with 33 additions and 0 deletions.
17 changes: 17 additions & 0 deletions otx/algorithms/classification/adapters/openvino/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import os
import tempfile
import warnings
import time
from typing import Any, Dict, Optional, Tuple, Union
from zipfile import ZipFile

Expand Down Expand Up @@ -175,6 +176,11 @@ def __init__(self, task_environment: TaskEnvironment):
self.hparams = self.task_environment.get_hyper_parameters(ClassificationConfig)
self.model = self.task_environment.model
self.inferencer = self.load_inferencer()
self._avg_time_per_image = None

@property
def avg_time_per_image(self):
return self._avg_time_per_image

def load_inferencer(self) -> ClassificationOpenVINOInferencer:
"""load_inferencer function of ClassificationOpenVINOTask."""
Expand Down Expand Up @@ -206,7 +212,9 @@ def infer(
explain_predicted_classes = inference_parameters.explain_predicted_classes

dataset_size = len(dataset)
total_time = 0.0
for i, dataset_item in enumerate(dataset, 1):
start_time = time.perf_counter()
predicted_scene, probs, saliency_map, repr_vector, act_score = self.inferencer.predict(dataset_item.numpy)
item_labels = predicted_scene.annotations[0].get_labels()
dataset_item.append_labels(item_labels)
Expand Down Expand Up @@ -235,7 +243,16 @@ def infer(
"Could not find Feature Vector and Saliency Map in OpenVINO output. "
"Please rerun OpenVINO export or retrain the model."
)
end_time = time.perf_counter() - start_time
total_time += end_time
update_progress_callback(int(i / dataset_size * 100))


self._avg_time_per_image = total_time / dataset_size
logger.info(f"Avg time per image: {self._avg_time_per_image} secs")
logger.info(f"Total time: {total_time} secs")
logger.info("Classification OpenVINO inference completed")

return dataset

def explain(
Expand Down
16 changes: 16 additions & 0 deletions otx/algorithms/segmentation/adapters/openvino/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import json
import os
import tempfile
import time
from typing import Any, Dict, Optional, Tuple, Union
from zipfile import ZipFile

Expand Down Expand Up @@ -168,6 +169,7 @@ def __init__(self, task_environment: TaskEnvironment):
self.model = self.task_environment.model
self.model_name = self.task_environment.model_template.model_template_id
self.inferencer = self.load_inferencer()
self._avg_time_per_image = None

labels = task_environment.get_labels(include_empty=False)
self._label_dictionary = dict(enumerate(labels, 1))
Expand All @@ -179,6 +181,10 @@ def hparams(self):
"""Hparams of OpenVINO Segmentation Task."""
return self.task_environment.get_hyper_parameters(SegmentationConfig)

@property
def avg_time_per_image(self):
return self._avg_time_per_image

def load_inferencer(self) -> OpenVINOSegmentationInferencer:
"""load_inferencer function of OpenVINO Segmentation Task."""
if self.model is None:
Expand All @@ -204,7 +210,9 @@ def infer(
process_soft_prediction = False

dataset_size = len(dataset)
total_time = 0.0
for i, dataset_item in enumerate(dataset, 1):
start_time = time.perf_counter()
predicted_scene, feature_vector, soft_prediction = self.inferencer.predict(dataset_item.numpy)
dataset_item.append_annotations(predicted_scene.annotations)

Expand All @@ -231,8 +239,16 @@ def infer(
)
dataset_item.append_metadata_item(result_media, model=self.model)

end_time = time.perf_counter() - start_time
total_time += end_time
update_progress_callback(int(i / dataset_size * 100), None)


self._avg_time_per_image = total_time / dataset_size
logger.info(f"Avg time per image: {self._avg_time_per_image} secs")
logger.info(f"Total time: {total_time} secs")
logger.info("Segmentation OpenVINO inference completed")

return dataset

def evaluate(self, output_resultset: ResultSetEntity, evaluation_metric: Optional[str] = None):
Expand Down

0 comments on commit df41ae6

Please sign in to comment.