Skip to content

Commit

Permalink
Add Self-SL configs for SegNext (#2215)
Browse files Browse the repository at this point in the history
* add fit selfsl configs to segnext

* Move selfsl recipes into src

* Remove unused parameters

* Update batch & lr

* Remove skip for segnext

* Update CHANGELOG

* Update licenses

* Update docstring
  • Loading branch information
sungchul2 authored Sep 19, 2023
1 parent 7eec927 commit bd8d482
Show file tree
Hide file tree
Showing 16 changed files with 237 additions and 14 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ All notable changes to this project will be documented in this file.
- Add Semi-SL Mean Teacher algorithm for Instance Segmentation task(<https://github.com/openvinotoolkit/training_extensions/pull/2444>)
- Official supports for YOLOX-X, YOLOX-L, YOLOX-S, ResNeXt101-ATSS (<https://github.com/openvinotoolkit/training_extensions/pull/2485>)
- Add new argument to track resource usage in train command(<https://github.com/openvinotoolkit/training_extensions/pull/2500>)
- Add Self-SL for semantic segmentation of SegNext families (<https://github.com/openvinotoolkit/training_extensions/pull/2215>)

### Enhancements

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
"""Initialization of SegNext-B model for Self-SL Segmentation Task."""

# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
"""Data Pipeline of SegNext model for Segmentation Task."""

# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

# pylint: disable=invalid-name
_base_ = ["../../base/data/selfsl/data_pipeline.py"]
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
# Hyperparameters.
hyper_parameters:
parameter_overrides:
learning_parameters:
batch_size:
default_value: 32
learning_rate:
default_value: 0.0001
learning_rate_warmup_iters:
default_value: 0
num_iters:
default_value: 10
enable_early_stopping:
default_value: false
algo_backend:
train_type:
default_value: Selfsupervised
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
"""Model configuration of SegNext-B model for Self-SL Segmentation Task."""

# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

# pylint: disable=invalid-name

_base_ = [
"../../../../../recipes/stages/segmentation/selfsl.py",
"../../../../common/adapters/mmcv/configs/backbones/segnext.py",
]


model = dict(
type="DetConB",
pretrained="https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segnext/mscan_b_20230227-3ab7d230.pth",
num_classes=256,
num_samples=16,
downsample=8,
input_transform="resize_concat",
in_index=[1, 2, 3],
backbone=dict(
embed_dims=[64, 128, 320, 512],
depths=[3, 3, 12, 3],
drop_path_rate=0.1,
norm_cfg=dict(type="BN", requires_grad=True),
),
neck=dict(
type="SelfSLMLP",
in_channels=960,
hid_channels=1920,
out_channels=256,
norm_cfg=dict(type="BN1d", requires_grad=True),
with_avg_pool=False,
),
head=dict(
type="DetConHead",
predictor=dict(
type="SelfSLMLP",
in_channels=256,
hid_channels=1920,
out_channels=256,
norm_cfg=dict(type="BN1d", requires_grad=True),
with_avg_pool=False,
),
loss_cfg=dict(type="DetConLoss", temperature=0.1),
),
)

optimizer = dict(paramwise_cfg=dict(custom_keys={"pos_block": dict(decay_mult=0.0), "norm": dict(decay_mult=0.0)}))
load_from = None
resume_from = None
fp16 = None
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
"""Initialization of SegNext-S model for Self-SL Segmentation Task."""

# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
"""Data Pipeline of SegNext model for Segmentation Task."""

# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

# pylint: disable=invalid-name
_base_ = ["../../base/data/selfsl/data_pipeline.py"]
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
# Hyperparameters.
hyper_parameters:
parameter_overrides:
learning_parameters:
batch_size:
default_value: 32
learning_rate:
default_value: 0.0001
learning_rate_warmup_iters:
default_value: 0
num_iters:
default_value: 10
enable_early_stopping:
default_value: false
algo_backend:
train_type:
default_value: Selfsupervised
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
"""Model configuration of SegNext-S model for Self-SL Segmentation Task."""

# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

# pylint: disable=invalid-name

_base_ = [
"../../../../../recipes/stages/segmentation/selfsl.py",
"../../../../common/adapters/mmcv/configs/backbones/segnext.py",
]


model = dict(
type="DetConB",
pretrained="https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segnext/mscan_s_20230227-f33ccdf2.pth",
num_classes=256,
num_samples=16,
downsample=8,
input_transform="resize_concat",
in_index=[1, 2, 3],
backbone=dict(
embed_dims=[64, 128, 320, 512],
depths=[2, 2, 4, 2],
norm_cfg=dict(type="BN", requires_grad=True),
),
neck=dict(
type="SelfSLMLP",
in_channels=960,
hid_channels=1920,
out_channels=256,
norm_cfg=dict(type="BN1d", requires_grad=True),
with_avg_pool=False,
),
head=dict(
type="DetConHead",
predictor=dict(
type="SelfSLMLP",
in_channels=256,
hid_channels=1920,
out_channels=256,
norm_cfg=dict(type="BN1d", requires_grad=True),
with_avg_pool=False,
),
loss_cfg=dict(type="DetConLoss", temperature=0.1),
),
)

optimizer = dict(paramwise_cfg=dict(custom_keys={"pos_block": dict(decay_mult=0.0), "norm": dict(decay_mult=0.0)}))
load_from = None
resume_from = None
fp16 = None
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
"""Initialization of SegNext-T model for Self-SL Segmentation Task."""

# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
"""Data Pipeline of SegNext model for Segmentation Task."""

# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

# pylint: disable=invalid-name
_base_ = ["../../base/data/selfsl/data_pipeline.py"]
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
# Hyperparameters.
hyper_parameters:
parameter_overrides:
learning_parameters:
batch_size:
default_value: 32
learning_rate:
default_value: 0.0001
learning_rate_warmup_iters:
default_value: 0
num_iters:
default_value: 10
enable_early_stopping:
default_value: false
algo_backend:
train_type:
default_value: Selfsupervised
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
"""Model configuration of SegNext-T model for Self-SL Segmentation Task."""

# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

# pylint: disable=invalid-name

_base_ = [
"../../../../../recipes/stages/segmentation/selfsl.py",
"../../../../common/adapters/mmcv/configs/backbones/segnext.py",
]


model = dict(
type="DetConB",
pretrained="https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segnext/mscan_t_20230227-119e8c9f.pth",
num_classes=256,
num_samples=16,
downsample=8,
input_transform="resize_concat",
in_index=[1, 2, 3],
neck=dict(
type="SelfSLMLP",
in_channels=480,
hid_channels=960,
out_channels=256,
norm_cfg=dict(type="BN1d", requires_grad=True),
with_avg_pool=False,
),
head=dict(
type="DetConHead",
predictor=dict(
type="SelfSLMLP",
in_channels=256,
hid_channels=960,
out_channels=256,
norm_cfg=dict(type="BN1d", requires_grad=True),
with_avg_pool=False,
),
loss_cfg=dict(type="DetConLoss", temperature=0.1),
),
)

optimizer = dict(paramwise_cfg=dict(custom_keys={"pos_block": dict(decay_mult=0.0), "norm": dict(decay_mult=0.0)}))
load_from = None
resume_from = None
fp16 = None
6 changes: 0 additions & 6 deletions tests/e2e/cli/semantic_segmentation/test_segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -318,8 +318,6 @@ class TestToolsOTXSelfSLSegmentation:
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_train(self, template, tmp_dir_path):
tmp_dir_path_1 = tmp_dir_path / "segmentation/test_selfsl"
if not (Path(template.model_template_path).parent / "selfsl").is_dir():
pytest.skip("Self-SL training type isn't available for this template")
otx_train_testing(template, tmp_dir_path_1, otx_dir, args_selfsl)
template_work_dir = get_template_dir(template, tmp_dir_path_1)
assert (Path(template_work_dir) / "selfsl").is_dir()
Expand All @@ -332,8 +330,6 @@ def test_otx_train(self, template, tmp_dir_path):
@pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS")
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_eval(self, template, tmp_dir_path):
if not (Path(template.model_template_path).parent / "selfsl").is_dir():
pytest.skip("Self-SL training type isn't available for this template")
tmp_dir_path = tmp_dir_path / "segmentation/test_selfsl_sl"
otx_eval_testing(template, tmp_dir_path, otx_dir, args)

Expand All @@ -342,8 +338,6 @@ def test_otx_eval(self, template, tmp_dir_path):
@pytest.mark.skipif(MULTI_GPU_UNAVAILABLE, reason="The number of gpu is insufficient")
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_multi_gpu_train_selfsl(self, template, tmp_dir_path):
if not (Path(template.model_template_path).parent / "selfsl").is_dir():
pytest.skip("Self-SL training type isn't available for this template")
tmp_dir_path = tmp_dir_path / "segmentation/test_multi_gpu_selfsl"
args_selfsl_multigpu = copy.deepcopy(args_selfsl)
args_selfsl_multigpu["--gpus"] = "0,1"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -203,17 +203,13 @@ def test_otx_multi_gpu_train_semisl(self, template, tmp_dir_path):
@e2e_pytest_component
@pytest.mark.parametrize("template", default_templates, ids=default_templates_ids)
def test_otx_train_selfsl(self, template, tmp_dir_path):
if not (Path(template.model_template_path).parent / "selfsl").is_dir():
pytest.skip("Self-SL training type isn't available for this template")
tmp_dir_path = tmp_dir_path / "segmentation/test_selfsl"
otx_train_testing(template, tmp_dir_path, otx_dir, args_selfsl)

@e2e_pytest_component
@pytest.mark.skipif(MULTI_GPU_UNAVAILABLE, reason="The number of gpu is insufficient")
@pytest.mark.parametrize("template", default_templates, ids=default_templates_ids)
def test_otx_multi_gpu_train_selfsl(self, template, tmp_dir_path):
if not (Path(template.model_template_path).parent / "selfsl").is_dir():
pytest.skip("Self-SL training type isn't available for this template")
tmp_dir_path = tmp_dir_path / "segmentation/test_multi_gpu_selfsl"
args_selfsl_multigpu = copy.deepcopy(args_selfsl)
args_selfsl_multigpu["--gpus"] = "0,1"
Expand Down
4 changes: 0 additions & 4 deletions tests/regression/semantic_segmentation/test_segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -244,8 +244,6 @@ def test_otx_train_semisl_kpi_test(self, reg_cfg, template):
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_train_selfsl(self, reg_cfg, template, tmp_dir_path):
if not (Path(template.model_template_path).parent / "selfsl").is_dir():
pytest.skip("Self-SL training type isn't available for this template")
train_type = "self_supervised"
self.performance[template.name] = {}

Expand Down Expand Up @@ -297,8 +295,6 @@ def test_otx_train_selfsl(self, reg_cfg, template, tmp_dir_path):
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_train_selfsl_kpi_test(self, reg_cfg, template):
if not (Path(template.model_template_path).parent / "selfsl").is_dir():
pytest.skip("Self-SL training type isn't available for this template")
train_type = "self_supervised"
config_selfsl = reg_cfg.load_config(train_type=train_type)
performance = reg_cfg.get_template_performance(template, train_type=train_type)
Expand Down

0 comments on commit bd8d482

Please sign in to comment.