Skip to content

Commit

Permalink
add check for otx requirement in deployed model
Browse files Browse the repository at this point in the history
Signed-off-by: Igor Davidyuk <igor.davidyuk@intel.com>
  • Loading branch information
igor-davidyuk committed Apr 24, 2024
1 parent dca0ca1 commit 29f5137
Showing 1 changed file with 17 additions and 0 deletions.
17 changes: 17 additions & 0 deletions geti_sdk/deployment/deployed_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
from .utils import (
generate_ovms_model_address,
generate_ovms_model_name,
get_package_version_from_requirements,
rgb_to_hex,
target_device_is_ovms,
)
Expand Down Expand Up @@ -161,6 +162,21 @@ def get_data(self, source: Union[str, os.PathLike, GetiSession]):
)

self._model_python_path = os.path.join(source, PYTHON_DIR_NAME)
# A model is being loaded from disk, check if it is a legacy model
# We support OTX models starting from version 1.5.0
otx_version = get_package_version_from_requirements(
requirements_path=os.path.join(
self._model_python_path, REQUIREMENTS_FILE_NAME
),
package_name="otx",
)
if otx_version: # Empty string if package not found
otx_version = otx_version.split(".")
if int(otx_version[0]) <= 1 and int(otx_version[1]) < 5:
raise ValueError(
"Model version is not supported. Please use a model trained with "
"OTX version 1.5.0 or higher."
)

elif isinstance(source, GetiSession):
if self.base_url is None:
Expand Down Expand Up @@ -276,6 +292,7 @@ def load_inference_model(
)
self._inference_model = model

# Load a Results-to-Prediction converter
self._converter = ConverterFactory.create_converter(
self.label_schema, configuration
)
Expand Down

0 comments on commit 29f5137

Please sign in to comment.