diff --git a/configs/recognition/r2plus1d/r2plus1d_r34_32x2x1_180e_kinetics400_rgb.py b/configs/recognition/r2plus1d/r2plus1d_r34_32x2x1_180e_kinetics400_rgb.py index 75273502f4..e8539d2410 100644 --- a/configs/recognition/r2plus1d/r2plus1d_r34_32x2x1_180e_kinetics400_rgb.py +++ b/configs/recognition/r2plus1d/r2plus1d_r34_32x2x1_180e_kinetics400_rgb.py @@ -105,7 +105,7 @@ weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy -lr_config = dict(policy='CosineAnealing', min_lr=0) +lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 180 checkpoint_config = dict(interval=5) evaluation = dict( diff --git a/configs/recognition/r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb.py b/configs/recognition/r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb.py index f364860bb9..a7acbc484c 100644 --- a/configs/recognition/r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb.py +++ b/configs/recognition/r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb.py @@ -105,7 +105,7 @@ weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy -lr_config = dict(policy='CosineAnealing', min_lr=0) +lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 180 checkpoint_config = dict(interval=5) evaluation = dict( diff --git a/configs/recognition/r2plus1d/r2plus1d_r34_video_8x8x1_180e_kinetics400_rgb.py b/configs/recognition/r2plus1d/r2plus1d_r34_video_8x8x1_180e_kinetics400_rgb.py index 33e812c2ee..402bb45a18 100644 --- a/configs/recognition/r2plus1d/r2plus1d_r34_video_8x8x1_180e_kinetics400_rgb.py +++ b/configs/recognition/r2plus1d/r2plus1d_r34_video_8x8x1_180e_kinetics400_rgb.py @@ -116,7 +116,7 @@ weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy -lr_config = dict(policy='CosineAnealing', min_lr=0) +lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 180 checkpoint_config = dict(interval=5) evaluation = dict( diff --git a/configs/recognition/slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb.py b/configs/recognition/slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb.py index 3cb74e0701..67e52010a0 100644 --- a/configs/recognition/slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb.py +++ b/configs/recognition/slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb.py @@ -111,7 +111,7 @@ weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy -lr_config = dict(policy='CosineAnealing', min_lr=0) +lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 256 checkpoint_config = dict(interval=4) workflow = [('train', 1)] diff --git a/configs/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb.py b/configs/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb.py index 18a9ab08bf..63b0a5250c 100644 --- a/configs/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb.py +++ b/configs/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb.py @@ -111,7 +111,7 @@ weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy -lr_config = dict(policy='CosineAnealing', min_lr=0) +lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 256 checkpoint_config = dict(interval=4) workflow = [('train', 1)] diff --git a/configs/recognition/slowfast/slowfast_r50_video_4x16x1_256e_kinetics400_rgb.py b/configs/recognition/slowfast/slowfast_r50_video_4x16x1_256e_kinetics400_rgb.py index 708ce05e52..283bbbdaf4 100644 --- a/configs/recognition/slowfast/slowfast_r50_video_4x16x1_256e_kinetics400_rgb.py +++ b/configs/recognition/slowfast/slowfast_r50_video_4x16x1_256e_kinetics400_rgb.py @@ -121,7 +121,7 @@ weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy -lr_config = dict(policy='CosineAnealing', min_lr=0) +lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 256 checkpoint_config = dict(interval=4) workflow = [('train', 1)] diff --git a/configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_flow.py b/configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_flow.py index 86ee95c165..c7a1e9bdd6 100644 --- a/configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_flow.py +++ b/configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_flow.py @@ -102,7 +102,7 @@ optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict( - policy='CosineAnealing', + policy='CosineAnnealing', min_lr=0, warmup='linear', warmup_by_epoch=True, diff --git a/configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb.py b/configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb.py index 37fd24fc79..6980bcebf5 100644 --- a/configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb.py +++ b/configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb.py @@ -94,7 +94,7 @@ weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy -lr_config = dict(policy='CosineAnealing', min_lr=0) +lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 256 checkpoint_config = dict(interval=4) workflow = [('train', 1)] diff --git a/configs/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_flow.py b/configs/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_flow.py index 93199932b1..2c1424b576 100644 --- a/configs/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_flow.py +++ b/configs/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_flow.py @@ -102,7 +102,7 @@ optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict( - policy='CosineAnealing', + policy='CosineAnnealing', min_lr=0, warmup='linear', warmup_by_epoch=True, diff --git a/configs/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_rgb.py b/configs/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_rgb.py index 284c34fd74..2477bb22a9 100644 --- a/configs/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_rgb.py +++ b/configs/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_rgb.py @@ -94,7 +94,7 @@ weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy -lr_config = dict(policy='CosineAnealing', min_lr=0) +lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 256 checkpoint_config = dict(interval=4) workflow = [('train', 1)] diff --git a/configs/recognition/slowonly/slowonly_r50_video_4x16x1_256e_kinetics400_rgb.py b/configs/recognition/slowonly/slowonly_r50_video_4x16x1_256e_kinetics400_rgb.py index 1127017c09..fbe7f44fc8 100644 --- a/configs/recognition/slowonly/slowonly_r50_video_4x16x1_256e_kinetics400_rgb.py +++ b/configs/recognition/slowonly/slowonly_r50_video_4x16x1_256e_kinetics400_rgb.py @@ -104,7 +104,7 @@ weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy -lr_config = dict(policy='CosineAnealing', min_lr=0) +lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 256 checkpoint_config = dict(interval=4) workflow = [('train', 1)] diff --git a/docs/config.md b/docs/config.md index 7be1ed374c..4d79362ded 100644 --- a/docs/config.md +++ b/docs/config.md @@ -158,7 +158,7 @@ optimizer_config = dict( # Config used to build the optimizer hook grad_clip=None) # Most of the methods do not use gradient clip # learning policy lr_config = dict( # Learning rate scheduler config used to register LrUpdater hook - policy='step', # Policy of scheduler, also support CosineAnealing, Cyclic, etc. Refer to details of supported LrUpdater from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9 + policy='step', # Policy of scheduler, also support CosineAnnealing, Cyclic, etc. Refer to details of supported LrUpdater from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9 step=7) # Steps to decay the learning rate total_epochs = 9 # Total epochs to train the model @@ -364,7 +364,7 @@ optimizer_config = dict( # Config used to build the optimizer hook grad_clip=dict(max_norm=40, norm_type=2)) # Use gradient clip # learning policy lr_config = dict( # Learning rate scheduler config used to register LrUpdater hook - policy='step', # Policy of scheduler, also support CosineAnealing, Cyclic, etc. Refer to details of supported LrUpdater from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9 + policy='step', # Policy of scheduler, also support CosineAnnealing, Cyclic, etc. Refer to details of supported LrUpdater from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9 step=[40, 80]) # Steps to decay the learning rate total_epochs = 100 # Total epochs to train the model checkpoint_config = dict( # Config to set the checkpoint hook, Refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/checkpoint.py for implementation