From fdbba2945445c97f44183a9a0004f85def676d45 Mon Sep 17 00:00:00 2001 From: Ashwin Vaidya Date: Tue, 12 Jul 2022 04:03:00 +0200 Subject: [PATCH] [Anomaly Task] Move to learning parameters (#1152) * Move to learning paramters * Remove opencv constraints * Remove nncf constraints * Fix key Co-authored-by: Ashwin Vaidya --- .../padim/configuration.yaml | 54 +- .../stfpm/configuration.yaml | 103 ++-- .../stfpm/hpo_config.yaml | 4 +- .../stfpm/template.yaml | 3 +- .../padim/configuration.yaml | 54 +- .../stfpm/configuration.yaml | 103 ++-- .../anomaly_detection/stfpm/hpo_config.yaml | 4 +- .../anomaly_detection/stfpm/template.yaml | 3 +- .../padim/configuration.yaml | 54 +- .../stfpm/configuration.yaml | 103 ++-- .../stfpm/hpo_config.yaml | 4 +- .../anomaly_segmentation/stfpm/template.yaml | 3 +- external/anomaly/constraints.txt | 3 - .../ote_anomalib/configs/anomalib_config.py | 27 +- .../configs/base/configuration.py | 40 +- .../configs/stfpm/configuration.py | 18 +- ...est_ote_anomaly_classification_training.py | 72 +-- .../test_ote_anomaly_segmentation_training.py | 529 +++++++++--------- external/anomaly/tests/test_ote_task.py | 5 +- ote_cli/ote_cli/utils/hpo.py | 14 +- 20 files changed, 600 insertions(+), 600 deletions(-) diff --git a/external/anomaly/configs/anomaly_classification/padim/configuration.yaml b/external/anomaly/configs/anomaly_classification/padim/configuration.yaml index 690e2560d2f..ff0cc759b8e 100644 --- a/external/anomaly/configs/anomaly_classification/padim/configuration.yaml +++ b/external/anomaly/configs/anomaly_classification/padim/configuration.yaml @@ -23,36 +23,12 @@ dataset: value: 8 visible_in_ui: true warning: null - train_batch_size: - affects_outcome_of: TRAINING - auto_hpo_state: not_possible - auto_hpo_value: null - default_value: 32 - description: - The number of training samples seen in each iteration of training. - Increasing this value improves training time and may make the training more - stable. A larger batch size has higher memory requirements. - editable: true - header: Batch size - max_value: 512 - min_value: 1 - type: INTEGER - ui_rules: - action: DISABLE_EDITING - operator: AND - rules: [] - type: UI_RULES - value: 32 - visible_in_ui: true - warning: - Increasing this value may cause the system to use more memory than available, - potentially causing out of memory errors, please update with caution. type: PARAMETER_GROUP visible_in_ui: true description: Configuration for Padim header: Configuration for Padim id: "" -model: +learning_parameters: backbone: affects_outcome_of: NONE auto_hpo_state: not_possible @@ -74,8 +50,32 @@ model: value: resnet18 visible_in_ui: true warning: null - description: Model Parameters - header: Model Parameters + description: Learning Parameters + header: Learning Parameters + train_batch_size: + affects_outcome_of: TRAINING + auto_hpo_state: not_possible + auto_hpo_value: null + default_value: 32 + description: + The number of training samples seen in each iteration of training. + Increasing this value improves training time and may make the training more + stable. A larger batch size has higher memory requirements. + editable: true + header: Batch size + max_value: 512 + min_value: 1 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 32 + visible_in_ui: true + warning: + Increasing this value may cause the system to use more memory than available, + potentially causing out of memory errors, please update with caution. type: PARAMETER_GROUP visible_in_ui: true nncf_optimization: diff --git a/external/anomaly/configs/anomaly_classification/stfpm/configuration.yaml b/external/anomaly/configs/anomaly_classification/stfpm/configuration.yaml index 8bcbda3bfa3..b38dfad7270 100644 --- a/external/anomaly/configs/anomaly_classification/stfpm/configuration.yaml +++ b/external/anomaly/configs/anomaly_classification/stfpm/configuration.yaml @@ -23,36 +23,12 @@ dataset: value: 8 visible_in_ui: true warning: null - train_batch_size: - affects_outcome_of: TRAINING - auto_hpo_state: not_possible - auto_hpo_value: null - default_value: 32 - description: - The number of training samples seen in each iteration of training. - Increasing this value improves training time and may make the training more - stable. A larger batch size has higher memory requirements. - editable: true - header: Batch size - max_value: 512 - min_value: 1 - type: INTEGER - ui_rules: - action: DISABLE_EDITING - operator: AND - rules: [] - type: UI_RULES - value: 32 - visible_in_ui: true - warning: - Increasing this value may cause the system to use more memory than available, - potentially causing out of memory errors, please update with caution. type: PARAMETER_GROUP visible_in_ui: true description: Configuration for STFPM header: Configuration for STFPM id: "" -model: +learning_parameters: backbone: affects_outcome_of: NONE auto_hpo_state: not_possible @@ -74,7 +50,7 @@ model: value: resnet18 visible_in_ui: true warning: null - description: Model Parameters + description: Learning Parameters early_stopping: description: Early Stopping Parameters header: Early Stopping Parameters @@ -125,7 +101,7 @@ model: value too high will increase the training time and might lead to overfitting. type: PARAMETER_GROUP visible_in_ui: true - header: Model Parameters + header: Learning Parameters lr: affects_outcome_of: NONE auto_hpo_state: not_possible @@ -145,6 +121,28 @@ model: value: 0.4 visible_in_ui: true warning: null + max_epochs: + affects_outcome_of: TRAINING + auto_hpo_state: not_possible + auto_hpo_value: null + default_value: 100 + description: Maximum number of epochs to train the model for. + editable: true + header: Max Epochs + max_value: 500 + min_value: 1 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 100 + visible_in_ui: true + warning: + Training for very few epochs might lead to poor performance. If Early + Stopping is enabled then increasing the value of max epochs might not lead to + desired result. momentum: affects_outcome_of: NONE auto_hpo_state: not_possible @@ -164,6 +162,30 @@ model: value: 0.9 visible_in_ui: true warning: null + train_batch_size: + affects_outcome_of: TRAINING + auto_hpo_state: not_possible + auto_hpo_value: null + default_value: 32 + description: + The number of training samples seen in each iteration of training. + Increasing this value improves training time and may make the training more + stable. A larger batch size has higher memory requirements. + editable: true + header: Batch size + max_value: 512 + min_value: 1 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 32 + visible_in_ui: true + warning: + Increasing this value may cause the system to use more memory than available, + potentially causing out of memory errors, please update with caution. type: PARAMETER_GROUP visible_in_ui: true weight_decay: @@ -286,32 +308,5 @@ pot_parameters: warning: null type: PARAMETER_GROUP visible_in_ui: false -trainer: - description: Trainer Parameters - header: Trainer Parameters - max_epochs: - affects_outcome_of: TRAINING - auto_hpo_state: not_possible - auto_hpo_value: null - default_value: 100 - description: Maximum number of epochs to train the model for. - editable: true - header: Max Epochs - max_value: 500 - min_value: 1 - type: INTEGER - ui_rules: - action: DISABLE_EDITING - operator: AND - rules: [] - type: UI_RULES - value: 100 - visible_in_ui: true - warning: - Training for very few epochs might lead to poor performance. If Early - Stopping is enabled then increasing the value of max epochs might not lead to - desired result. - type: PARAMETER_GROUP - visible_in_ui: true type: CONFIGURABLE_PARAMETERS visible_in_ui: true diff --git a/external/anomaly/configs/anomaly_classification/stfpm/hpo_config.yaml b/external/anomaly/configs/anomaly_classification/stfpm/hpo_config.yaml index f5971667df0..7891885c263 100644 --- a/external/anomaly/configs/anomaly_classification/stfpm/hpo_config.yaml +++ b/external/anomaly/configs/anomaly_classification/stfpm/hpo_config.yaml @@ -4,13 +4,13 @@ mode: max search_algorithm: asha early_stop: None hp_space: - model.lr: + learning_parameters.lr: param_type: qloguniform range: - 0.04 - 0.8 - 0.01 - dataset.train_batch_size: + learning_parameters.train_batch_size: param_type: qloguniform range: - 16 diff --git a/external/anomaly/configs/anomaly_classification/stfpm/template.yaml b/external/anomaly/configs/anomaly_classification/stfpm/template.yaml index 8247ed1dad6..fb15667fb1a 100644 --- a/external/anomaly/configs/anomaly_classification/stfpm/template.yaml +++ b/external/anomaly/configs/anomaly_classification/stfpm/template.yaml @@ -20,10 +20,9 @@ entrypoints: hyper_parameters: base_path: ./configuration.yaml parameter_overrides: - dataset: + learning_parameters: train_batch_size: auto_hpo_state: POSSIBLE - model: lr: auto_hpo_state: POSSIBLE diff --git a/external/anomaly/configs/anomaly_detection/padim/configuration.yaml b/external/anomaly/configs/anomaly_detection/padim/configuration.yaml index 690e2560d2f..ff0cc759b8e 100644 --- a/external/anomaly/configs/anomaly_detection/padim/configuration.yaml +++ b/external/anomaly/configs/anomaly_detection/padim/configuration.yaml @@ -23,36 +23,12 @@ dataset: value: 8 visible_in_ui: true warning: null - train_batch_size: - affects_outcome_of: TRAINING - auto_hpo_state: not_possible - auto_hpo_value: null - default_value: 32 - description: - The number of training samples seen in each iteration of training. - Increasing this value improves training time and may make the training more - stable. A larger batch size has higher memory requirements. - editable: true - header: Batch size - max_value: 512 - min_value: 1 - type: INTEGER - ui_rules: - action: DISABLE_EDITING - operator: AND - rules: [] - type: UI_RULES - value: 32 - visible_in_ui: true - warning: - Increasing this value may cause the system to use more memory than available, - potentially causing out of memory errors, please update with caution. type: PARAMETER_GROUP visible_in_ui: true description: Configuration for Padim header: Configuration for Padim id: "" -model: +learning_parameters: backbone: affects_outcome_of: NONE auto_hpo_state: not_possible @@ -74,8 +50,32 @@ model: value: resnet18 visible_in_ui: true warning: null - description: Model Parameters - header: Model Parameters + description: Learning Parameters + header: Learning Parameters + train_batch_size: + affects_outcome_of: TRAINING + auto_hpo_state: not_possible + auto_hpo_value: null + default_value: 32 + description: + The number of training samples seen in each iteration of training. + Increasing this value improves training time and may make the training more + stable. A larger batch size has higher memory requirements. + editable: true + header: Batch size + max_value: 512 + min_value: 1 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 32 + visible_in_ui: true + warning: + Increasing this value may cause the system to use more memory than available, + potentially causing out of memory errors, please update with caution. type: PARAMETER_GROUP visible_in_ui: true nncf_optimization: diff --git a/external/anomaly/configs/anomaly_detection/stfpm/configuration.yaml b/external/anomaly/configs/anomaly_detection/stfpm/configuration.yaml index 8bcbda3bfa3..b38dfad7270 100644 --- a/external/anomaly/configs/anomaly_detection/stfpm/configuration.yaml +++ b/external/anomaly/configs/anomaly_detection/stfpm/configuration.yaml @@ -23,36 +23,12 @@ dataset: value: 8 visible_in_ui: true warning: null - train_batch_size: - affects_outcome_of: TRAINING - auto_hpo_state: not_possible - auto_hpo_value: null - default_value: 32 - description: - The number of training samples seen in each iteration of training. - Increasing this value improves training time and may make the training more - stable. A larger batch size has higher memory requirements. - editable: true - header: Batch size - max_value: 512 - min_value: 1 - type: INTEGER - ui_rules: - action: DISABLE_EDITING - operator: AND - rules: [] - type: UI_RULES - value: 32 - visible_in_ui: true - warning: - Increasing this value may cause the system to use more memory than available, - potentially causing out of memory errors, please update with caution. type: PARAMETER_GROUP visible_in_ui: true description: Configuration for STFPM header: Configuration for STFPM id: "" -model: +learning_parameters: backbone: affects_outcome_of: NONE auto_hpo_state: not_possible @@ -74,7 +50,7 @@ model: value: resnet18 visible_in_ui: true warning: null - description: Model Parameters + description: Learning Parameters early_stopping: description: Early Stopping Parameters header: Early Stopping Parameters @@ -125,7 +101,7 @@ model: value too high will increase the training time and might lead to overfitting. type: PARAMETER_GROUP visible_in_ui: true - header: Model Parameters + header: Learning Parameters lr: affects_outcome_of: NONE auto_hpo_state: not_possible @@ -145,6 +121,28 @@ model: value: 0.4 visible_in_ui: true warning: null + max_epochs: + affects_outcome_of: TRAINING + auto_hpo_state: not_possible + auto_hpo_value: null + default_value: 100 + description: Maximum number of epochs to train the model for. + editable: true + header: Max Epochs + max_value: 500 + min_value: 1 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 100 + visible_in_ui: true + warning: + Training for very few epochs might lead to poor performance. If Early + Stopping is enabled then increasing the value of max epochs might not lead to + desired result. momentum: affects_outcome_of: NONE auto_hpo_state: not_possible @@ -164,6 +162,30 @@ model: value: 0.9 visible_in_ui: true warning: null + train_batch_size: + affects_outcome_of: TRAINING + auto_hpo_state: not_possible + auto_hpo_value: null + default_value: 32 + description: + The number of training samples seen in each iteration of training. + Increasing this value improves training time and may make the training more + stable. A larger batch size has higher memory requirements. + editable: true + header: Batch size + max_value: 512 + min_value: 1 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 32 + visible_in_ui: true + warning: + Increasing this value may cause the system to use more memory than available, + potentially causing out of memory errors, please update with caution. type: PARAMETER_GROUP visible_in_ui: true weight_decay: @@ -286,32 +308,5 @@ pot_parameters: warning: null type: PARAMETER_GROUP visible_in_ui: false -trainer: - description: Trainer Parameters - header: Trainer Parameters - max_epochs: - affects_outcome_of: TRAINING - auto_hpo_state: not_possible - auto_hpo_value: null - default_value: 100 - description: Maximum number of epochs to train the model for. - editable: true - header: Max Epochs - max_value: 500 - min_value: 1 - type: INTEGER - ui_rules: - action: DISABLE_EDITING - operator: AND - rules: [] - type: UI_RULES - value: 100 - visible_in_ui: true - warning: - Training for very few epochs might lead to poor performance. If Early - Stopping is enabled then increasing the value of max epochs might not lead to - desired result. - type: PARAMETER_GROUP - visible_in_ui: true type: CONFIGURABLE_PARAMETERS visible_in_ui: true diff --git a/external/anomaly/configs/anomaly_detection/stfpm/hpo_config.yaml b/external/anomaly/configs/anomaly_detection/stfpm/hpo_config.yaml index f5971667df0..7891885c263 100644 --- a/external/anomaly/configs/anomaly_detection/stfpm/hpo_config.yaml +++ b/external/anomaly/configs/anomaly_detection/stfpm/hpo_config.yaml @@ -4,13 +4,13 @@ mode: max search_algorithm: asha early_stop: None hp_space: - model.lr: + learning_parameters.lr: param_type: qloguniform range: - 0.04 - 0.8 - 0.01 - dataset.train_batch_size: + learning_parameters.train_batch_size: param_type: qloguniform range: - 16 diff --git a/external/anomaly/configs/anomaly_detection/stfpm/template.yaml b/external/anomaly/configs/anomaly_detection/stfpm/template.yaml index 2a62af47bb8..74023275a06 100644 --- a/external/anomaly/configs/anomaly_detection/stfpm/template.yaml +++ b/external/anomaly/configs/anomaly_detection/stfpm/template.yaml @@ -20,10 +20,9 @@ entrypoints: hyper_parameters: base_path: ./configuration.yaml parameter_overrides: - dataset: + learning_parameters: train_batch_size: auto_hpo_state: POSSIBLE - model: lr: auto_hpo_state: POSSIBLE diff --git a/external/anomaly/configs/anomaly_segmentation/padim/configuration.yaml b/external/anomaly/configs/anomaly_segmentation/padim/configuration.yaml index 690e2560d2f..ff0cc759b8e 100644 --- a/external/anomaly/configs/anomaly_segmentation/padim/configuration.yaml +++ b/external/anomaly/configs/anomaly_segmentation/padim/configuration.yaml @@ -23,36 +23,12 @@ dataset: value: 8 visible_in_ui: true warning: null - train_batch_size: - affects_outcome_of: TRAINING - auto_hpo_state: not_possible - auto_hpo_value: null - default_value: 32 - description: - The number of training samples seen in each iteration of training. - Increasing this value improves training time and may make the training more - stable. A larger batch size has higher memory requirements. - editable: true - header: Batch size - max_value: 512 - min_value: 1 - type: INTEGER - ui_rules: - action: DISABLE_EDITING - operator: AND - rules: [] - type: UI_RULES - value: 32 - visible_in_ui: true - warning: - Increasing this value may cause the system to use more memory than available, - potentially causing out of memory errors, please update with caution. type: PARAMETER_GROUP visible_in_ui: true description: Configuration for Padim header: Configuration for Padim id: "" -model: +learning_parameters: backbone: affects_outcome_of: NONE auto_hpo_state: not_possible @@ -74,8 +50,32 @@ model: value: resnet18 visible_in_ui: true warning: null - description: Model Parameters - header: Model Parameters + description: Learning Parameters + header: Learning Parameters + train_batch_size: + affects_outcome_of: TRAINING + auto_hpo_state: not_possible + auto_hpo_value: null + default_value: 32 + description: + The number of training samples seen in each iteration of training. + Increasing this value improves training time and may make the training more + stable. A larger batch size has higher memory requirements. + editable: true + header: Batch size + max_value: 512 + min_value: 1 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 32 + visible_in_ui: true + warning: + Increasing this value may cause the system to use more memory than available, + potentially causing out of memory errors, please update with caution. type: PARAMETER_GROUP visible_in_ui: true nncf_optimization: diff --git a/external/anomaly/configs/anomaly_segmentation/stfpm/configuration.yaml b/external/anomaly/configs/anomaly_segmentation/stfpm/configuration.yaml index 8bcbda3bfa3..b38dfad7270 100644 --- a/external/anomaly/configs/anomaly_segmentation/stfpm/configuration.yaml +++ b/external/anomaly/configs/anomaly_segmentation/stfpm/configuration.yaml @@ -23,36 +23,12 @@ dataset: value: 8 visible_in_ui: true warning: null - train_batch_size: - affects_outcome_of: TRAINING - auto_hpo_state: not_possible - auto_hpo_value: null - default_value: 32 - description: - The number of training samples seen in each iteration of training. - Increasing this value improves training time and may make the training more - stable. A larger batch size has higher memory requirements. - editable: true - header: Batch size - max_value: 512 - min_value: 1 - type: INTEGER - ui_rules: - action: DISABLE_EDITING - operator: AND - rules: [] - type: UI_RULES - value: 32 - visible_in_ui: true - warning: - Increasing this value may cause the system to use more memory than available, - potentially causing out of memory errors, please update with caution. type: PARAMETER_GROUP visible_in_ui: true description: Configuration for STFPM header: Configuration for STFPM id: "" -model: +learning_parameters: backbone: affects_outcome_of: NONE auto_hpo_state: not_possible @@ -74,7 +50,7 @@ model: value: resnet18 visible_in_ui: true warning: null - description: Model Parameters + description: Learning Parameters early_stopping: description: Early Stopping Parameters header: Early Stopping Parameters @@ -125,7 +101,7 @@ model: value too high will increase the training time and might lead to overfitting. type: PARAMETER_GROUP visible_in_ui: true - header: Model Parameters + header: Learning Parameters lr: affects_outcome_of: NONE auto_hpo_state: not_possible @@ -145,6 +121,28 @@ model: value: 0.4 visible_in_ui: true warning: null + max_epochs: + affects_outcome_of: TRAINING + auto_hpo_state: not_possible + auto_hpo_value: null + default_value: 100 + description: Maximum number of epochs to train the model for. + editable: true + header: Max Epochs + max_value: 500 + min_value: 1 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 100 + visible_in_ui: true + warning: + Training for very few epochs might lead to poor performance. If Early + Stopping is enabled then increasing the value of max epochs might not lead to + desired result. momentum: affects_outcome_of: NONE auto_hpo_state: not_possible @@ -164,6 +162,30 @@ model: value: 0.9 visible_in_ui: true warning: null + train_batch_size: + affects_outcome_of: TRAINING + auto_hpo_state: not_possible + auto_hpo_value: null + default_value: 32 + description: + The number of training samples seen in each iteration of training. + Increasing this value improves training time and may make the training more + stable. A larger batch size has higher memory requirements. + editable: true + header: Batch size + max_value: 512 + min_value: 1 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 32 + visible_in_ui: true + warning: + Increasing this value may cause the system to use more memory than available, + potentially causing out of memory errors, please update with caution. type: PARAMETER_GROUP visible_in_ui: true weight_decay: @@ -286,32 +308,5 @@ pot_parameters: warning: null type: PARAMETER_GROUP visible_in_ui: false -trainer: - description: Trainer Parameters - header: Trainer Parameters - max_epochs: - affects_outcome_of: TRAINING - auto_hpo_state: not_possible - auto_hpo_value: null - default_value: 100 - description: Maximum number of epochs to train the model for. - editable: true - header: Max Epochs - max_value: 500 - min_value: 1 - type: INTEGER - ui_rules: - action: DISABLE_EDITING - operator: AND - rules: [] - type: UI_RULES - value: 100 - visible_in_ui: true - warning: - Training for very few epochs might lead to poor performance. If Early - Stopping is enabled then increasing the value of max epochs might not lead to - desired result. - type: PARAMETER_GROUP - visible_in_ui: true type: CONFIGURABLE_PARAMETERS visible_in_ui: true diff --git a/external/anomaly/configs/anomaly_segmentation/stfpm/hpo_config.yaml b/external/anomaly/configs/anomaly_segmentation/stfpm/hpo_config.yaml index f5971667df0..7891885c263 100644 --- a/external/anomaly/configs/anomaly_segmentation/stfpm/hpo_config.yaml +++ b/external/anomaly/configs/anomaly_segmentation/stfpm/hpo_config.yaml @@ -4,13 +4,13 @@ mode: max search_algorithm: asha early_stop: None hp_space: - model.lr: + learning_parameters.lr: param_type: qloguniform range: - 0.04 - 0.8 - 0.01 - dataset.train_batch_size: + learning_parameters.train_batch_size: param_type: qloguniform range: - 16 diff --git a/external/anomaly/configs/anomaly_segmentation/stfpm/template.yaml b/external/anomaly/configs/anomaly_segmentation/stfpm/template.yaml index a19a3b09725..75a03ac3bfa 100644 --- a/external/anomaly/configs/anomaly_segmentation/stfpm/template.yaml +++ b/external/anomaly/configs/anomaly_segmentation/stfpm/template.yaml @@ -20,10 +20,9 @@ entrypoints: hyper_parameters: base_path: ./configuration.yaml parameter_overrides: - dataset: + learning_parameters: train_batch_size: auto_hpo_state: POSSIBLE - model: lr: auto_hpo_state: POSSIBLE diff --git a/external/anomaly/constraints.txt b/external/anomaly/constraints.txt index f1102c4c650..a615cf556f6 100644 --- a/external/anomaly/constraints.txt +++ b/external/anomaly/constraints.txt @@ -6,13 +6,10 @@ lxml==4.6.5 matplotlib==3.4.3 networkx~=2.5 nncf==2.2.0 -numpy==1.19.5 omegaconf==2.1.1 onnx==1.10.1 -opencv-python==4.5.3.56 openvino-dev==2022.1.0 pillow==9.0.0 -protobuf==3.14.0 pytorch-lightning==1.5.9 requests==2.26.0 scikit-image==0.17.2 diff --git a/external/anomaly/ote_anomalib/configs/anomalib_config.py b/external/anomaly/ote_anomalib/configs/anomalib_config.py index db081df9a14..9dc50c24271 100644 --- a/external/anomaly/ote_anomalib/configs/anomalib_config.py +++ b/external/anomaly/ote_anomalib/configs/anomalib_config.py @@ -40,6 +40,29 @@ def get_anomalib_config(task_name: str, ote_config: ConfigurableParameters) -> U return anomalib_config +def _anomalib_config_mapper(anomalib_config: Union[DictConfig, ListConfig], ote_config: ConfigurableParameters): + """Returns mapping from learning parameters to anomalib parameters + + Args: + anomalib_config: DictConfig: Anomalib config object + ote_config: ConfigurableParameters: OTE config object parsed from configuration.yaml file + """ + parameters = ote_config.parameters + groups = ote_config.groups + for name in parameters: + if name == "train_batch_size": + anomalib_config.dataset["train_batch_size"] = getattr(ote_config, "train_batch_size") + elif name == "max_epochs": + anomalib_config.trainer["max_epochs"] = getattr(ote_config, "max_epochs") + else: + assert name in anomalib_config.model.keys(), f"Parameter {name} not present in anomalib config." + sc_value = getattr(ote_config, name) + sc_value = sc_value.value if hasattr(sc_value, "value") else sc_value + anomalib_config.model[name] = sc_value + for group in groups: + update_anomalib_config(anomalib_config.model[group], getattr(ote_config, group)) + + def update_anomalib_config(anomalib_config: Union[DictConfig, ListConfig], ote_config: ConfigurableParameters): """ Overwrite the default parameter values in the anomalib config with the values specified in the OTE config. The @@ -56,5 +79,7 @@ def update_anomalib_config(anomalib_config: Union[DictConfig, ListConfig], ote_c anomalib_config[param] = sc_value for group in ote_config.groups: # Since pot_parameters and nncf_optimization are specific to OTE - if group not in ["pot_parameters", "nncf_optimization"]: + if group == "learning_parameters": + _anomalib_config_mapper(anomalib_config, getattr(ote_config, "learning_parameters")) + elif group not in ["pot_parameters", "nncf_optimization"]: update_anomalib_config(anomalib_config[group], getattr(ote_config, group)) diff --git a/external/anomaly/ote_anomalib/configs/base/configuration.py b/external/anomaly/ote_anomalib/configs/base/configuration.py index 01c20b5b136..9ca73ddaba2 100644 --- a/external/anomaly/ote_anomalib/configs/base/configuration.py +++ b/external/anomaly/ote_anomalib/configs/base/configuration.py @@ -46,12 +46,10 @@ class BaseAnomalyConfig(ConfigurableParameters): description = header @attrs - class DatasetParameters(ParameterGroup): - """ - Parameters related to dataloader - """ + class LearningParameters(ParameterGroup): + """Parameters that can be tuned using HPO.""" - header = string_attribute("Dataset Parameters") + header = string_attribute("Learning Parameters") description = header train_batch_size = configurable_integer( @@ -67,6 +65,21 @@ class DatasetParameters(ParameterGroup): affects_outcome_of=ModelLifecycle.TRAINING, ) + backbone = selectable( + default_value=ModelBackbone.RESNET18, + header="Model Backbone", + description="Pre-trained backbone used for feature extraction", + ) + + @attrs + class DatasetParameters(ParameterGroup): + """ + Parameters related to dataloader + """ + + header = string_attribute("Dataset Parameters") + description = header + num_workers = configurable_integer( default_value=8, min_value=0, @@ -129,22 +142,7 @@ class NNCFOptimization(ParameterGroup): affects_outcome_of=ModelLifecycle.TRAINING, ) - @attrs - class ModelParameters(ParameterGroup): - """ - Parameter Group for tuning the model - """ - - header = string_attribute("Model Parameters") - description = header - - backbone = selectable( - default_value=ModelBackbone.RESNET18, - header="Model Backbone", - description="Pre-trained backbone used for feature extraction", - ) - - model = add_parameter_group(ModelParameters) + learning_parameters = add_parameter_group(LearningParameters) dataset = add_parameter_group(DatasetParameters) pot_parameters = add_parameter_group(POTParameters) nncf_optimization = add_parameter_group(NNCFOptimization) diff --git a/external/anomaly/ote_anomalib/configs/stfpm/configuration.py b/external/anomaly/ote_anomalib/configs/stfpm/configuration.py index 0295db477fe..dd7b6cf8300 100644 --- a/external/anomaly/ote_anomalib/configs/stfpm/configuration.py +++ b/external/anomaly/ote_anomalib/configs/stfpm/configuration.py @@ -40,10 +40,8 @@ class STFPMAnomalyBaseConfig(BaseAnomalyConfig): description = header @attrs - class ModelParameters(BaseAnomalyConfig.ModelParameters): - """ - Parameter Group for training model - """ + class LearningParameters(BaseAnomalyConfig.LearningParameters): + """Parameters that can be tuned using HPO.""" lr = configurable_float( default_value=0.4, @@ -99,15 +97,6 @@ class EarlyStoppingParameters(ParameterGroup): early_stopping = add_parameter_group(EarlyStoppingParameters) - @attrs - class TrainerParameters(ParameterGroup): - """ - Parameters related to PyTorch Lightning trainer - """ - - header = string_attribute("Trainer Parameters") - description = header - max_epochs = configurable_integer( default_value=100, header="Max Epochs", @@ -119,5 +108,4 @@ class TrainerParameters(ParameterGroup): affects_outcome_of=ModelLifecycle.TRAINING, ) - trainer = add_parameter_group(TrainerParameters) - model = add_parameter_group(ModelParameters) + learning_parameters = add_parameter_group(LearningParameters) diff --git a/external/anomaly/tests/test_ote_anomaly_classification_training.py b/external/anomaly/tests/test_ote_anomaly_classification_training.py index 8c20895897d..8e04d26d639 100644 --- a/external/anomaly/tests/test_ote_anomaly_classification_training.py +++ b/external/anomaly/tests/test_ote_anomaly_classification_training.py @@ -13,49 +13,44 @@ # and limitations under the License. import os -from collections import namedtuple, OrderedDict +from collections import OrderedDict from copy import deepcopy - from pprint import pformat from typing import Any, Callable, Dict, List, Optional, Type import pytest +from ote_anomalib.logging import get_logger +from ote_sdk.configuration.helper import create as ote_sdk_configuration_helper_create from ote_sdk.entities.model import ModelEntity -from ote_sdk.entities.datasets import DatasetEntity -from ote_sdk.entities.model_template import parse_model_template -from ote_sdk.entities.label_schema import LabelSchemaEntity +from ote_sdk.entities.model_template import TaskType, parse_model_template from ote_sdk.entities.subset import Subset from ote_sdk.entities.train_parameters import TrainParameters -from ote_sdk.entities.model_template import TaskType - -from ote_anomalib.data.mvtec import OteMvtecDataset -from ote_anomalib.logging import get_logger - -from ote_sdk.configuration.helper import create as ote_sdk_configuration_helper_create -from ote_sdk.test_suite.training_test_case import OTETestCaseInterface, generate_ote_integration_test_case_class from ote_sdk.test_suite.e2e_test_system import DataCollector, e2e_pytest_performance +from ote_sdk.test_suite.training_test_case import ( + OTETestCaseInterface, + generate_ote_integration_test_case_class, +) +from ote_sdk.test_suite.training_tests_actions import ( + OTETestTrainingAction, + create_environment_and_task, +) from ote_sdk.test_suite.training_tests_common import ( - make_path_be_abs, - make_paths_be_abs, - performance_to_score_name_value, KEEP_CONFIG_FIELD_VALUE, REALLIFE_USECASE_CONSTANT, ROOT_PATH_KEY, + make_path_be_abs, + performance_to_score_name_value, ) from ote_sdk.test_suite.training_tests_helper import ( - OTETestHelper, DefaultOTETestCreationParametersInterface, + OTETestHelper, OTETrainingTestInterface, ) -from ote_sdk.test_suite.training_tests_actions import ( - create_environment_and_task, - OTETestTrainingAction, -) from tests.anomaly_common import ( - _get_dataset_params_from_dataset_definitions, _create_anomaly_dataset_and_labels_schema, - get_anomaly_domain_test_action_classes + _get_dataset_params_from_dataset_definitions, + get_anomaly_domain_test_action_classes, ) logger = get_logger(__name__) @@ -69,7 +64,8 @@ def ote_test_domain_fx(): class AnomalyClassificationTrainingTestParameters(DefaultOTETestCreationParametersInterface): def test_case_class(self) -> Type[OTETestCaseInterface]: return generate_ote_integration_test_case_class( - get_anomaly_domain_test_action_classes(AnomalyDetectionTestTrainingAction)) + get_anomaly_domain_test_action_classes(AnomalyDetectionTestTrainingAction) + ) def test_bunches(self) -> List[Dict[str, Any]]: test_bunches = [ @@ -154,17 +150,27 @@ def _run_ote_training(self, data_collector): logger.debug("Set hyperparameters") params = ote_sdk_configuration_helper_create(self.model_template.hyper_parameters.data) - if hasattr(params, "model") and hasattr(params.model, "early_stopping"): + if hasattr(params, "learning_parameters") and hasattr(params.learning_parameters, "early_stopping"): if self.num_training_iters != KEEP_CONFIG_FIELD_VALUE: - params.model.early_stopping.patience = int(self.num_training_iters) - logger.debug(f"Set params.model.early_stopping.patience=" f"{params.model.early_stopping.patience}") + params.learning_parameters.early_stopping.patience = int(self.num_training_iters) + logger.debug( + f"Set params.learning_parameters.early_stopping.patience=" + f"{params.learning_parameters.early_stopping.patience}" + ) else: - logger.debug(f"Keep params.model.early_stopping.patience=" f"{params.model.early_stopping.patience}") + logger.debug( + f"Keep params.learning_parameters.early_stopping.patience=" + f"{params.learning_parameters.early_stopping.patience}" + ) if self.batch_size != KEEP_CONFIG_FIELD_VALUE: - params.dataset.train_batch_size = int(self.batch_size) - logger.debug(f"Set params.dataset.train_batch_size=" f"{params.dataset.train_batch_size}") + params.learning_parameters.train_batch_size = int(self.batch_size) + logger.debug( + f"Set params.learning_parameters.train_batch_size=" f"{params.learning_parameters.train_batch_size}" + ) else: - logger.debug(f"Keep params.dataset.train_batch_size=" f"{params.dataset.train_batch_size}") + logger.debug( + f"Keep params.learning_parameters.train_batch_size=" f"{params.learning_parameters.train_batch_size}" + ) logger.debug("Setup environment") self.environment, self.task = create_environment_and_task(params, self.labels_schema, self.model_template) @@ -243,7 +249,8 @@ def _training_params_factory() -> Dict: template_path = make_path_be_abs(template_paths[model_name], template_paths[ROOT_PATH_KEY]) logger.debug("training params factory: Before creating dataset and labels_schema") dataset, labels_schema = _create_anomaly_dataset_and_labels_schema( - dataset_params, dataset_name, TaskType.ANOMALY_CLASSIFICATION) + dataset_params, dataset_name, TaskType.ANOMALY_CLASSIFICATION + ) logger.debug("training params factory: After creating dataset and labels_schema") return { "dataset": dataset, @@ -271,7 +278,8 @@ def _nncf_graph_params_factory() -> Dict: logger.debug("training params factory: Before creating dataset and labels_schema") dataset, labels_schema = _create_anomaly_dataset_and_labels_schema( - dataset_params, dataset_name, TaskType.ANOMALY_CLASSIFICATION) + dataset_params, dataset_name, TaskType.ANOMALY_CLASSIFICATION + ) logger.debug("training params factory: After creating dataset and labels_schema") return { diff --git a/external/anomaly/tests/test_ote_anomaly_segmentation_training.py b/external/anomaly/tests/test_ote_anomaly_segmentation_training.py index fb4ae828a02..e068b9f76c5 100644 --- a/external/anomaly/tests/test_ote_anomaly_segmentation_training.py +++ b/external/anomaly/tests/test_ote_anomaly_segmentation_training.py @@ -20,33 +20,39 @@ from typing import Any, Callable, Dict, List, Optional, Type import pytest -from ote_sdk.entities.model import ModelEntity +from ote_sdk.configuration.helper import create as ote_sdk_configuration_helper_create from ote_sdk.entities.datasets import DatasetEntity -from ote_sdk.entities.model_template import parse_model_template from ote_sdk.entities.label_schema import LabelSchemaEntity +from ote_sdk.entities.model import ModelEntity +from ote_sdk.entities.model_template import TaskType, parse_model_template from ote_sdk.entities.subset import Subset from ote_sdk.entities.train_parameters import TrainParameters -from ote_sdk.entities.model_template import TaskType - -from ote_sdk.configuration.helper import create as ote_sdk_configuration_helper_create -from ote_sdk.test_suite.training_test_case import (OTETestCaseInterface, - generate_ote_integration_test_case_class) from ote_sdk.test_suite.e2e_test_system import DataCollector, e2e_pytest_performance -from ote_sdk.test_suite.training_tests_common import (make_path_be_abs, - performance_to_score_name_value, - KEEP_CONFIG_FIELD_VALUE, - REALLIFE_USECASE_CONSTANT, - ROOT_PATH_KEY) -from ote_sdk.test_suite.training_tests_helper import (OTETestHelper, - DefaultOTETestCreationParametersInterface, - OTETrainingTestInterface) -from ote_sdk.test_suite.training_tests_actions import (create_environment_and_task, - OTETestTrainingAction) +from ote_sdk.test_suite.training_test_case import ( + OTETestCaseInterface, + generate_ote_integration_test_case_class, +) +from ote_sdk.test_suite.training_tests_actions import ( + OTETestTrainingAction, + create_environment_and_task, +) +from ote_sdk.test_suite.training_tests_common import ( + KEEP_CONFIG_FIELD_VALUE, + REALLIFE_USECASE_CONSTANT, + ROOT_PATH_KEY, + make_path_be_abs, + performance_to_score_name_value, +) +from ote_sdk.test_suite.training_tests_helper import ( + DefaultOTETestCreationParametersInterface, + OTETestHelper, + OTETrainingTestInterface, +) from tests.anomaly_common import ( - _get_dataset_params_from_dataset_definitions, _create_anomaly_dataset_and_labels_schema, - get_anomaly_domain_test_action_classes + _get_dataset_params_from_dataset_definitions, + get_anomaly_domain_test_action_classes, ) logger = logging.getLogger(__name__) @@ -54,175 +60,176 @@ @pytest.fixture def ote_test_domain_fx(): - return 'custom-anomaly-segmentation' + return "custom-anomaly-segmentation" class AnomalySegmentationTrainingTestParameters(DefaultOTETestCreationParametersInterface): def test_case_class(self) -> Type[OTETestCaseInterface]: return generate_ote_integration_test_case_class( - get_anomaly_domain_test_action_classes(AnomalySegmentationTestTrainingAction)) + get_anomaly_domain_test_action_classes(AnomalySegmentationTestTrainingAction) + ) def test_bunches(self) -> List[Dict[str, Any]]: # Extend with other datasets test_bunches = [ - dict( - model_name=[ - 'ote_anomaly_segmentation_padim', - 'ote_anomaly_segmentation_stfpm', - ], - dataset_name='mvtec_short_bottle', - usecase='precommit', - ), - dict( - model_name=[ - 'ote_anomaly_segmentation_padim', - 'ote_anomaly_segmentation_stfpm', - ], - dataset_name='mvtec_short_bottle', - patience=KEEP_CONFIG_FIELD_VALUE, - batch_size=KEEP_CONFIG_FIELD_VALUE, - usecase=REALLIFE_USECASE_CONSTANT, - ), - dict( - model_name=[ - 'ote_anomaly_segmentation_padim', - 'ote_anomaly_segmentation_stfpm', - ], - dataset_name='mvtec_short_cable', - patience=KEEP_CONFIG_FIELD_VALUE, - batch_size=KEEP_CONFIG_FIELD_VALUE, - usecase=REALLIFE_USECASE_CONSTANT, - ), - dict( - model_name=[ - 'ote_anomaly_segmentation_padim', - 'ote_anomaly_segmentation_stfpm', - ], - dataset_name='mvtec_short_capsule', - patience=KEEP_CONFIG_FIELD_VALUE, - batch_size=KEEP_CONFIG_FIELD_VALUE, - usecase=REALLIFE_USECASE_CONSTANT, - ), - dict( - model_name=[ - 'ote_anomaly_segmentation_padim', - 'ote_anomaly_segmentation_stfpm', - ], - dataset_name='mvtec_short_carpet', - patience=KEEP_CONFIG_FIELD_VALUE, - batch_size=KEEP_CONFIG_FIELD_VALUE, - usecase=REALLIFE_USECASE_CONSTANT, - ), - dict( - model_name=[ - 'ote_anomaly_segmentation_padim', - 'ote_anomaly_segmentation_stfpm', - ], - dataset_name='mvtec_short_grid', - patience=KEEP_CONFIG_FIELD_VALUE, - batch_size=KEEP_CONFIG_FIELD_VALUE, - usecase=REALLIFE_USECASE_CONSTANT, - ), - dict( - model_name=[ - 'ote_anomaly_segmentation_padim', - 'ote_anomaly_segmentation_stfpm', - ], - dataset_name='mvtec_short_hazelnut', - patience=KEEP_CONFIG_FIELD_VALUE, - batch_size=KEEP_CONFIG_FIELD_VALUE, - usecase=REALLIFE_USECASE_CONSTANT, - ), - dict( - model_name=[ - 'ote_anomaly_segmentation_padim', - 'ote_anomaly_segmentation_stfpm', - ], - dataset_name='mvtec_short_leather', - patience=KEEP_CONFIG_FIELD_VALUE, - batch_size=KEEP_CONFIG_FIELD_VALUE, - usecase=REALLIFE_USECASE_CONSTANT, - ), - dict( - model_name=[ - 'ote_anomaly_segmentation_padim', - 'ote_anomaly_segmentation_stfpm', - ], - dataset_name='mvtec_short_metal_nut', - patience=KEEP_CONFIG_FIELD_VALUE, - batch_size=KEEP_CONFIG_FIELD_VALUE, - usecase=REALLIFE_USECASE_CONSTANT, - ), - dict( - model_name=[ - 'ote_anomaly_segmentation_padim', - 'ote_anomaly_segmentation_stfpm', - ], - dataset_name='mvtec_short_pill', - patience=KEEP_CONFIG_FIELD_VALUE, - batch_size=KEEP_CONFIG_FIELD_VALUE, - usecase=REALLIFE_USECASE_CONSTANT, - ), - dict( - model_name=[ - 'ote_anomaly_segmentation_padim', - 'ote_anomaly_segmentation_stfpm', - ], - dataset_name='mvtec_short_screw', - patience=KEEP_CONFIG_FIELD_VALUE, - batch_size=KEEP_CONFIG_FIELD_VALUE, - usecase=REALLIFE_USECASE_CONSTANT, - ), - dict( - model_name=[ - 'ote_anomaly_segmentation_padim', - 'ote_anomaly_segmentation_stfpm', - ], - dataset_name='mvtec_short_tile', - patience=KEEP_CONFIG_FIELD_VALUE, - batch_size=KEEP_CONFIG_FIELD_VALUE, - usecase=REALLIFE_USECASE_CONSTANT, - ), - dict( - model_name=[ - 'ote_anomaly_segmentation_padim', - 'ote_anomaly_segmentation_stfpm', - ], - dataset_name='mvtec_short_toothbrush', - patience=KEEP_CONFIG_FIELD_VALUE, - batch_size=KEEP_CONFIG_FIELD_VALUE, - usecase=REALLIFE_USECASE_CONSTANT, - ), - dict( - model_name=[ - 'ote_anomaly_segmentation_padim', - 'ote_anomaly_segmentation_stfpm', - ], - dataset_name='mvtec_short_transistor', - patience=KEEP_CONFIG_FIELD_VALUE, - batch_size=KEEP_CONFIG_FIELD_VALUE, - usecase=REALLIFE_USECASE_CONSTANT, - ), - dict( - model_name=[ - 'ote_anomaly_segmentation_padim', - 'ote_anomaly_segmentation_stfpm', - ], - dataset_name='mvtec_short_wood', - patience=KEEP_CONFIG_FIELD_VALUE, - batch_size=KEEP_CONFIG_FIELD_VALUE, - usecase=REALLIFE_USECASE_CONSTANT, - ), - dict( - model_name=[ - 'ote_anomaly_segmentation_padim', - 'ote_anomaly_segmentation_stfpm', - ], - dataset_name='mvtec_short_zipper', - patience=KEEP_CONFIG_FIELD_VALUE, - batch_size=KEEP_CONFIG_FIELD_VALUE, - usecase=REALLIFE_USECASE_CONSTANT, - ), + dict( + model_name=[ + "ote_anomaly_segmentation_padim", + "ote_anomaly_segmentation_stfpm", + ], + dataset_name="mvtec_short_bottle", + usecase="precommit", + ), + dict( + model_name=[ + "ote_anomaly_segmentation_padim", + "ote_anomaly_segmentation_stfpm", + ], + dataset_name="mvtec_short_bottle", + patience=KEEP_CONFIG_FIELD_VALUE, + batch_size=KEEP_CONFIG_FIELD_VALUE, + usecase=REALLIFE_USECASE_CONSTANT, + ), + dict( + model_name=[ + "ote_anomaly_segmentation_padim", + "ote_anomaly_segmentation_stfpm", + ], + dataset_name="mvtec_short_cable", + patience=KEEP_CONFIG_FIELD_VALUE, + batch_size=KEEP_CONFIG_FIELD_VALUE, + usecase=REALLIFE_USECASE_CONSTANT, + ), + dict( + model_name=[ + "ote_anomaly_segmentation_padim", + "ote_anomaly_segmentation_stfpm", + ], + dataset_name="mvtec_short_capsule", + patience=KEEP_CONFIG_FIELD_VALUE, + batch_size=KEEP_CONFIG_FIELD_VALUE, + usecase=REALLIFE_USECASE_CONSTANT, + ), + dict( + model_name=[ + "ote_anomaly_segmentation_padim", + "ote_anomaly_segmentation_stfpm", + ], + dataset_name="mvtec_short_carpet", + patience=KEEP_CONFIG_FIELD_VALUE, + batch_size=KEEP_CONFIG_FIELD_VALUE, + usecase=REALLIFE_USECASE_CONSTANT, + ), + dict( + model_name=[ + "ote_anomaly_segmentation_padim", + "ote_anomaly_segmentation_stfpm", + ], + dataset_name="mvtec_short_grid", + patience=KEEP_CONFIG_FIELD_VALUE, + batch_size=KEEP_CONFIG_FIELD_VALUE, + usecase=REALLIFE_USECASE_CONSTANT, + ), + dict( + model_name=[ + "ote_anomaly_segmentation_padim", + "ote_anomaly_segmentation_stfpm", + ], + dataset_name="mvtec_short_hazelnut", + patience=KEEP_CONFIG_FIELD_VALUE, + batch_size=KEEP_CONFIG_FIELD_VALUE, + usecase=REALLIFE_USECASE_CONSTANT, + ), + dict( + model_name=[ + "ote_anomaly_segmentation_padim", + "ote_anomaly_segmentation_stfpm", + ], + dataset_name="mvtec_short_leather", + patience=KEEP_CONFIG_FIELD_VALUE, + batch_size=KEEP_CONFIG_FIELD_VALUE, + usecase=REALLIFE_USECASE_CONSTANT, + ), + dict( + model_name=[ + "ote_anomaly_segmentation_padim", + "ote_anomaly_segmentation_stfpm", + ], + dataset_name="mvtec_short_metal_nut", + patience=KEEP_CONFIG_FIELD_VALUE, + batch_size=KEEP_CONFIG_FIELD_VALUE, + usecase=REALLIFE_USECASE_CONSTANT, + ), + dict( + model_name=[ + "ote_anomaly_segmentation_padim", + "ote_anomaly_segmentation_stfpm", + ], + dataset_name="mvtec_short_pill", + patience=KEEP_CONFIG_FIELD_VALUE, + batch_size=KEEP_CONFIG_FIELD_VALUE, + usecase=REALLIFE_USECASE_CONSTANT, + ), + dict( + model_name=[ + "ote_anomaly_segmentation_padim", + "ote_anomaly_segmentation_stfpm", + ], + dataset_name="mvtec_short_screw", + patience=KEEP_CONFIG_FIELD_VALUE, + batch_size=KEEP_CONFIG_FIELD_VALUE, + usecase=REALLIFE_USECASE_CONSTANT, + ), + dict( + model_name=[ + "ote_anomaly_segmentation_padim", + "ote_anomaly_segmentation_stfpm", + ], + dataset_name="mvtec_short_tile", + patience=KEEP_CONFIG_FIELD_VALUE, + batch_size=KEEP_CONFIG_FIELD_VALUE, + usecase=REALLIFE_USECASE_CONSTANT, + ), + dict( + model_name=[ + "ote_anomaly_segmentation_padim", + "ote_anomaly_segmentation_stfpm", + ], + dataset_name="mvtec_short_toothbrush", + patience=KEEP_CONFIG_FIELD_VALUE, + batch_size=KEEP_CONFIG_FIELD_VALUE, + usecase=REALLIFE_USECASE_CONSTANT, + ), + dict( + model_name=[ + "ote_anomaly_segmentation_padim", + "ote_anomaly_segmentation_stfpm", + ], + dataset_name="mvtec_short_transistor", + patience=KEEP_CONFIG_FIELD_VALUE, + batch_size=KEEP_CONFIG_FIELD_VALUE, + usecase=REALLIFE_USECASE_CONSTANT, + ), + dict( + model_name=[ + "ote_anomaly_segmentation_padim", + "ote_anomaly_segmentation_stfpm", + ], + dataset_name="mvtec_short_wood", + patience=KEEP_CONFIG_FIELD_VALUE, + batch_size=KEEP_CONFIG_FIELD_VALUE, + usecase=REALLIFE_USECASE_CONSTANT, + ), + dict( + model_name=[ + "ote_anomaly_segmentation_padim", + "ote_anomaly_segmentation_stfpm", + ], + dataset_name="mvtec_short_zipper", + patience=KEEP_CONFIG_FIELD_VALUE, + batch_size=KEEP_CONFIG_FIELD_VALUE, + usecase=REALLIFE_USECASE_CONSTANT, + ), ] return deepcopy(test_bunches) @@ -260,7 +267,12 @@ class AnomalySegmentationTestTrainingAction(OTETestTrainingAction): _name = "training" def __init__( - self, dataset: DatasetEntity, labels_schema:LabelSchemaEntity(), template_path:str, patience:str, batch_size:str + self, + dataset: DatasetEntity, + labels_schema: LabelSchemaEntity(), + template_path: str, + patience: str, + batch_size: str, ): self.dataset = dataset self.labels_schema = labels_schema @@ -278,46 +290,37 @@ def _run_ote_training(self, data_collector: DataCollector): logger.debug(f"self.template_path = {self.template_path}") print(f"train dataset: {len(self.dataset.get_subset(Subset.TRAINING))} items") - print( - f"validation dataset: " - f"{len(self.dataset.get_subset(Subset.VALIDATION))} items" - ) + print(f"validation dataset: " f"{len(self.dataset.get_subset(Subset.VALIDATION))} items") logger.debug("Load model template") self.model_template = parse_model_template(self.template_path) logger.debug("Set hyperparameters") - params = ote_sdk_configuration_helper_create( - self.model_template.hyper_parameters.data - ) - if hasattr(params, "model") and hasattr(params.model, "early_stopping"): + params = ote_sdk_configuration_helper_create(self.model_template.hyper_parameters.data) + if hasattr(params, "model") and hasattr(params.learning_parameters, "early_stopping"): if self.num_training_iters != KEEP_CONFIG_FIELD_VALUE: - params.model.early_stopping.patience = int(self.num_training_iters) + params.learning_parameters.early_stopping.patience = int(self.num_training_iters) logger.debug( - f"Set params.model.early_stopping.patience=" - f"{params.model.early_stopping.patience}" + f"Set params.learning_parameters.early_stopping.patience=" + f"{params.learning_parameters.early_stopping.patience}" ) else: logger.debug( - f"Keep params.model.early_stopping.patience=" - f"{params.model.early_stopping.patience}" + f"Keep params.learning_parameters.early_stopping.patience=" + f"{params.learning_parameters.early_stopping.patience}" ) if self.batch_size != KEEP_CONFIG_FIELD_VALUE: - params.dataset.train_batch_size = int(self.batch_size) + params.learning_parameters.train_batch_size = int(self.batch_size) logger.debug( - f"Set params.dataset.train_batch_size=" - f"{params.dataset.train_batch_size}" + f"Set params.learning_parameters.train_batch_size=" f"{params.learning_parameters.train_batch_size}" ) else: logger.debug( - f"Keep params.dataset.train_batch_size=" - f"{params.dataset.train_batch_size}" + f"Keep params.learning_parameters.train_batch_size=" f"{params.learning_parameters.train_batch_size}" ) logger.debug("Setup environment") - self.environment, self.task = create_environment_and_task( - params, self.labels_schema, self.model_template - ) + self.environment, self.task = create_environment_and_task(params, self.labels_schema, self.model_template) logger.debug("Train model") self.output_model = ModelEntity( @@ -353,7 +356,8 @@ class TestOTEReallifeAnomalySegmentation(OTETrainingTestInterface): """ The main class of running test in this file. """ - PERFORMANCE_RESULTS = None # it is required for e2e system + + PERFORMANCE_RESULTS = None # it is required for e2e system helper = OTETestHelper(AnomalySegmentationTrainingTestParameters()) @classmethod @@ -365,71 +369,78 @@ def get_list_of_tests(cls, usecase: Optional[str] = None): return cls.helper.get_list_of_tests(usecase) @pytest.fixture - def params_factories_for_test_actions_fx(self, current_test_parameters_fx, - dataset_definitions_fx,ote_current_reference_dir_fx, - template_paths_fx) -> Dict[str,Callable[[], Dict]]: - logger.debug('params_factories_for_test_actions_fx: begin') + def params_factories_for_test_actions_fx( + self, current_test_parameters_fx, dataset_definitions_fx, ote_current_reference_dir_fx, template_paths_fx + ) -> Dict[str, Callable[[], Dict]]: + logger.debug("params_factories_for_test_actions_fx: begin") test_parameters = deepcopy(current_test_parameters_fx) dataset_definitions = deepcopy(dataset_definitions_fx) template_paths = deepcopy(template_paths_fx) + def _training_params_factory() -> Dict: if dataset_definitions is None: pytest.skip('The parameter "--dataset-definitions" is not set') - model_name = test_parameters['model_name'] - dataset_name = test_parameters['dataset_name'] - patience = test_parameters['patience'] - batch_size = test_parameters['batch_size'] + model_name = test_parameters["model_name"] + dataset_name = test_parameters["dataset_name"] + patience = test_parameters["patience"] + batch_size = test_parameters["batch_size"] dataset_params = _get_dataset_params_from_dataset_definitions(dataset_definitions, dataset_name) if model_name not in template_paths: - raise ValueError(f'Model {model_name} is absent in template_paths, ' - f'template_paths.keys={list(template_paths.keys())}') + raise ValueError( + f"Model {model_name} is absent in template_paths, " + f"template_paths.keys={list(template_paths.keys())}" + ) template_path = make_path_be_abs(template_paths[model_name], template_paths[ROOT_PATH_KEY]) - logger.debug('training params factory: Before creating dataset and labels_schema') + logger.debug("training params factory: Before creating dataset and labels_schema") dataset, labels_schema = _create_anomaly_dataset_and_labels_schema( - dataset_params, dataset_name, TaskType.ANOMALY_SEGMENTATION) - logger.debug('training params factory: After creating dataset and labels_schema') + dataset_params, dataset_name, TaskType.ANOMALY_SEGMENTATION + ) + logger.debug("training params factory: After creating dataset and labels_schema") return { - 'dataset': dataset, - 'labels_schema': labels_schema, - 'template_path': template_path, - 'patience': patience, - 'batch_size': batch_size, + "dataset": dataset, + "labels_schema": labels_schema, + "template_path": template_path, + "patience": patience, + "batch_size": batch_size, } - def _nncf_graph_params_factory() -> Dict[str,Callable[[], Dict]]: + def _nncf_graph_params_factory() -> Dict[str, Callable[[], Dict]]: if dataset_definitions is None: pytest.skip('The parameter "--dataset-definitions" is not set') - model_name = test_parameters['model_name'] - dataset_name = test_parameters['dataset_name'] + model_name = test_parameters["model_name"] + dataset_name = test_parameters["dataset_name"] dataset_params = _get_dataset_params_from_dataset_definitions(dataset_definitions, dataset_name) if model_name not in template_paths: - raise ValueError(f'Model {model_name} is absent in template_paths, ' - f'template_paths.keys={list(template_paths.keys())}') + raise ValueError( + f"Model {model_name} is absent in template_paths, " + f"template_paths.keys={list(template_paths.keys())}" + ) template_path = make_path_be_abs(template_paths[model_name], template_paths[ROOT_PATH_KEY]) - logger.debug('training params factory: Before creating dataset and labels_schema') + logger.debug("training params factory: Before creating dataset and labels_schema") dataset, labels_schema = _create_anomaly_dataset_and_labels_schema( - dataset_params, dataset_name, TaskType.ANOMALY_SEGMENTATION) - logger.debug('training params factory: After creating dataset and labels_schema') + dataset_params, dataset_name, TaskType.ANOMALY_SEGMENTATION + ) + logger.debug("training params factory: After creating dataset and labels_schema") return { - 'dataset': dataset, - 'labels_schema': labels_schema, - 'template_path': template_path, - 'reference_dir': ote_current_reference_dir_fx, - 'fn_get_compressed_model': None #NNCF not yet implemented in Anomaly + "dataset": dataset, + "labels_schema": labels_schema, + "template_path": template_path, + "reference_dir": ote_current_reference_dir_fx, + "fn_get_compressed_model": None, # NNCF not yet implemented in Anomaly } params_factories_for_test_actions = { - 'training': _training_params_factory, - 'nncf_graph': _nncf_graph_params_factory, + "training": _training_params_factory, + "nncf_graph": _nncf_graph_params_factory, } - logger.debug('params_factories_for_test_actions_fx: end') + logger.debug("params_factories_for_test_actions_fx: end") return params_factories_for_test_actions @pytest.fixture @@ -445,37 +456,35 @@ def test_case_fx(self, current_test_parameters_fx, params_factories_for_test_act If the main parameters used for this test differs w.r.t. the previous test, a new instance of test case class will be created. """ - test_case = type(self).helper.get_test_case(current_test_parameters_fx, - params_factories_for_test_actions_fx) + test_case = type(self).helper.get_test_case(current_test_parameters_fx, params_factories_for_test_actions_fx) return test_case @pytest.fixture def data_collector_fx(self, request) -> DataCollector: setup = deepcopy(request.node.callspec.params) - setup['environment_name'] = os.environ.get('TT_ENVIRONMENT_NAME', 'no-env') - setup['test_type'] = os.environ.get('TT_TEST_TYPE', 'no-test-type') # TODO: get from e2e test type - setup['scenario'] = 'api' # TODO(lbeynens): get from a fixture! - setup['test'] = request.node.name - setup['subject'] = 'custom-anomaly-segmentation' - setup['project'] = 'ote' - if 'test_parameters' in setup: - assert isinstance(setup['test_parameters'], dict) - if 'dataset_name' not in setup: - setup['dataset_name'] = setup['test_parameters'].get('dataset_name') - if 'model_name' not in setup: - setup['model_name'] = setup['test_parameters'].get('model_name') - if 'test_stage' not in setup: - setup['test_stage'] = setup['test_parameters'].get('test_stage') - if 'usecase' not in setup: - setup['usecase'] = setup['test_parameters'].get('usecase') - logger.info(f'creating DataCollector: setup=\n{pformat(setup, width=140)}') - data_collector = DataCollector(name='TestOTEIntegration', - setup=setup) + setup["environment_name"] = os.environ.get("TT_ENVIRONMENT_NAME", "no-env") + setup["test_type"] = os.environ.get("TT_TEST_TYPE", "no-test-type") # TODO: get from e2e test type + setup["scenario"] = "api" # TODO(lbeynens): get from a fixture! + setup["test"] = request.node.name + setup["subject"] = "custom-anomaly-segmentation" + setup["project"] = "ote" + if "test_parameters" in setup: + assert isinstance(setup["test_parameters"], dict) + if "dataset_name" not in setup: + setup["dataset_name"] = setup["test_parameters"].get("dataset_name") + if "model_name" not in setup: + setup["model_name"] = setup["test_parameters"].get("model_name") + if "test_stage" not in setup: + setup["test_stage"] = setup["test_parameters"].get("test_stage") + if "usecase" not in setup: + setup["usecase"] = setup["test_parameters"].get("usecase") + logger.info(f"creating DataCollector: setup=\n{pformat(setup, width=140)}") + data_collector = DataCollector(name="TestOTEIntegration", setup=setup) with data_collector: - logger.info('data_collector is created') + logger.info("data_collector is created") yield data_collector - logger.info('data_collector is released') + logger.info("data_collector is released") @e2e_pytest_performance def test(self, test_parameters, test_case_fx, data_collector_fx, cur_test_expected_metrics_callback_fx): - test_case_fx.run_stage(test_parameters['test_stage'], data_collector_fx, cur_test_expected_metrics_callback_fx) + test_case_fx.run_stage(test_parameters["test_stage"], data_collector_fx, cur_test_expected_metrics_callback_fx) diff --git a/external/anomaly/tests/test_ote_task.py b/external/anomaly/tests/test_ote_task.py index 769e40146c7..e2e67eac5d6 100644 --- a/external/anomaly/tests/test_ote_task.py +++ b/external/anomaly/tests/test_ote_task.py @@ -23,6 +23,7 @@ import pytest from ote_anomalib.configs import get_anomalib_config from tools.sample import OteAnomalyTask + from tests.helpers.config import get_config_and_task_name from tests.helpers.dummy_dataset import TestDataset @@ -56,11 +57,11 @@ def test_ote_config(task_path, template_path): ote_config, task_name = get_config_and_task_name(f"{task_path}/configs/{template_path}/template.yaml") # change parameter value in OTE config - ote_config.dataset.train_batch_size = train_batch_size + ote_config.learning_parameters.train_batch_size = train_batch_size # convert OTE -> Anomalib anomalib_config = get_anomalib_config(task_name, ote_config) # check if default parameter was overwritten - assert anomalib_config.dataset.train_batch_size == train_batch_size + assert anomalib_config.learning_parameters.train_batch_size == train_batch_size @TestDataset(num_train=200, num_test=10, dataset_path="./datasets/MVTec", use_mvtec=False) def test_ote_train_export_and_optimize( diff --git a/ote_cli/ote_cli/utils/hpo.py b/ote_cli/ote_cli/utils/hpo.py index 1424d15f473..5e0aa7a0118 100644 --- a/ote_cli/ote_cli/utils/hpo.py +++ b/ote_cli/ote_cli/utils/hpo.py @@ -504,15 +504,7 @@ def __init__( # make batch size range lower than train set size env_hp = self.environment.get_hyper_parameters() - batch_size_name = None - if ( - _is_cls_framework_task(task_type) - or _is_det_framework_task(task_type) - or _is_seg_framework_task(task_type) - ): - batch_size_name = "learning_parameters.batch_size" - elif _is_anomaly_framework_task(task_type): - batch_size_name = "dataset.train_batch_size" + batch_size_name = "learning_parameters.batch_size" if batch_size_name is not None: if batch_size_name in hpopt_cfg["hp_space"]: batch_range = hpopt_cfg["hp_space"][batch_size_name]["range"] @@ -814,8 +806,8 @@ def get_num_full_iterations(environment): learning_parameters = params.learning_parameters num_full_iterations = learning_parameters.num_iters elif _is_anomaly_framework_task(task_type): - trainer = params.trainer - num_full_iterations = trainer.max_epochs + learning_parameters = params.learning_parameters + num_full_iterations = learning_parameters.max_epochs return num_full_iterations