You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
import torch
import torch.nn as nn
import torch.nn.init as init
from mmdet.registry import MODELS
from torch import Tensor
from typing import List, Tuple, Any
class FrozenBatchNorm2d(nn.Module):
"""copy and modified from https://github.com/facebookresearch/detr/blob/master/models/backbone.py
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt,
without which any other models than torchvision.models.resnet[18,34,50,101]
produce nans.
"""
def init(self, num_features, eps=1e-5):
super(FrozenBatchNorm2d, self).init()
n = num_features
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
self.eps = eps
self.num_features = n
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
num_batches_tracked_key = prefix + 'num_batches_tracked'
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super(FrozenBatchNorm2d, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs)
def forward(self, x):
# move reshapes to the beginning
# to make it fuser-friendly
w = self.weight.reshape(1, -1, 1, 1)
b = self.bias.reshape(1, -1, 1, 1)
rv = self.running_var.reshape(1, -1, 1, 1)
rm = self.running_mean.reshape(1, -1, 1, 1)
scale = w * (rv + self.eps).rsqrt()
bias = b - rm * scale
return x * scale + bias
def extra_repr(self):
return (
"{num_features}, eps={eps}".format(**self.__dict__)
)
blocks_list = []
for i in range(block_num):
blocks_list.append(
HG_Block(
in_channels=in_channels if i == 0 else out_channels,
mid_channels=mid_channels,
out_channels=out_channels,
kernel_size=kernel_size,
layer_num=layer_num,
identity=False if i == 0 else True,
light_block=light_block,
use_lab=use_lab))
self.blocks = nn.Sequential(*blocks_list)
def forward(self, x):
if self.downsample:
x = self.downsample(x)
x = self.blocks(x)
return x
@MODELS.register_module()
class HGNetv2(nn.Module):
"""
Args:
stem_channels: list. Number of channels for the stem block.
stage_type: str. The stage configuration of PPHGNet. such as the number of channels, stride, etc.
use_lab: boolean. Whether to use LearnableAffineBlock in network.
lr_mult_list: list. Control the learning rate of different stages.
Returns:
model: nn.Module.
"""
arch_configs = {
'L': {
'stem_channels': [3, 32, 48],
'stage_config': {
# in_channels, mid_channels, out_channels, num_blocks, downsample, light_block, kernel_size, layer_num
"stage1": [48, 48, 128, 1, False, False, 3, 6],
"stage2": [128, 96, 512, 1, True, False, 3, 6],
"stage3": [512, 192, 1024, 3, True, True, 5, 6],
"stage4": [1024, 384, 2048, 1, True, True, 5, 6],
},
'url': 'https://github.com/lyuwenyu/storage/releases/download/v0.1/PPHGNetV2_L_ssld_pretrained_from_paddle.pth',
},
'X': {
'stem_channels': [3, 32, 64],
'stage_config': {
# in_channels, mid_channels, out_channels, num_blocks, downsample, light_block, kernel_size, layer_num
"stage1": [64, 64, 128, 1, False, False, 3, 6],
"stage2": [128, 128, 512, 2, True, False, 3, 6],
"stage3": [512, 256, 1024, 5, True, True, 5, 6],
"stage4": [1024, 512, 2048, 2, True, True, 5, 6],
},
'url': 'https://github.com/lyuwenyu/storage/releases/download/v0.1/PPHGNetV2_X_ssld_pretrained_from_paddle.pth',
},
'H': {
'stem_channels': [3, 48, 96],
'stage_config': {
# in_channels, mid_channels, out_channels, num_blocks, downsample, light_block, kernel_size, layer_num
"stage1": [96, 96, 192, 2, False, False, 3, 6],
"stage2": [192, 192, 512, 3, True, False, 3, 6],
"stage3": [512, 384, 1024, 6, True, True, 5, 6],
"stage4": [1024, 768, 2048, 3, True, True, 5, 6],
},
'url': 'https://github.com/lyuwenyu/storage/releases/download/v0.1/PPHGNetV2_H_ssld_pretrained_from_paddle.pth',
}
}
def __init__(self,
name,
use_lab=False,
return_idx=[1, 2, 3],
freeze_at=-1,
freeze_norm=False,
pretrained=False):
super().__init__()
self.use_lab = use_lab
self.return_idx = return_idx
self.freeze_at = freeze_at
self.freeze_norm = freeze_norm
stem_channels = self.arch_configs[name]['stem_channels']
stage_config = self.arch_configs[name]['stage_config']
download_url = self.arch_configs[name]['url']
self._out_strides = [4, 8, 16, 32]
self._out_channels = [stage_config[k][2] for k in stage_config]
# stem
self.stem = StemBlock(
in_channels=stem_channels[0],
mid_channels=stem_channels[1],
out_channels=stem_channels[2],
use_lab=use_lab
)
# stages
self.stages = nn.ModuleList()
for i, k in enumerate(stage_config):
in_channels, mid_channels, out_channels, block_num, downsample, light_block, kernel_size, layer_num = \
stage_config[
k]
self.stages.append(
HG_Stage(
in_channels,
mid_channels,
out_channels,
block_num,
layer_num,
downsample,
light_block,
kernel_size,
use_lab))
self._init_weights()
self._freeze_stages()
if pretrained:
if isinstance(pretrained, bool) or 'http' in pretrained:
state = torch.hub.load_state_dict_from_url(download_url, map_location='cpu')
else:
state = torch.load(pretrained, map_location='cpu')
self.load_state_dict(state)
print(f'Load HGNetv2_{name} state_dict')
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
elif isinstance(m, (nn.BatchNorm2d)):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.constant_(m.bias, 0)
def _freeze_parameters(self, m: nn.Module):
for p in m.parameters():
p.requires_grad = False
def _freeze_norm(self, m: nn.Module):
if isinstance(m, nn.BatchNorm2d):
m = FrozenBatchNorm2d(m.num_features)
else:
for name, child in m.named_children():
_child = self._freeze_norm(child)
if _child is not child:
setattr(m, name, _child)
return m
def _freeze_stages(self):
if self.freeze_at >= 0:
self._freeze_parameters(self.stem)
for i in range(min(self.freeze_at, 4)):
self._freeze_parameters(self.stages[i])
if self.freeze_norm:
self._freeze_norm(self)
def forward(self, x: Tensor) -> Tuple[Any, ...]:
x = self.stem(x)
outs = []
for idx, stage in enumerate(self.stages):
x = stage(x)
if idx in self.return_idx:
outs.append(x)
return tuple(outs)
def train(self, mode=True):
super(HGNetv2, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()`
### init.py代码如下:
`# Copyright (c) OpenMMLab. All rights reserved.
from .csp_darknet import CSPDarknet
from .cspnext import CSPNeXt
from .darknet import Darknet
from .detectors_resnet import DetectoRS_ResNet
from .detectors_resnext import DetectoRS_ResNeXt
from .efficientnet import EfficientNet
from .hourglass import HourglassNet
from .hrnet import HRNet
from .mobilenet_v2 import MobileNetV2
from .pvt import PyramidVisionTransformer, PyramidVisionTransformerV2
from .regnet import RegNet
from .res2net import Res2Net
from .resnest import ResNeSt
from .resnet import ResNet, ResNetV1d
from .resnext import ResNeXt
from .ssd_vgg import SSDVGG
from .swin import SwinTransformer
from .trident_resnet import TridentResNet
from .hgnetv2 import HGNetv2
想加入一个自定义的backbone,出现了错误KeyError: 'HGNetV2 is not in the mmdet::model registry. Please check whether the value of
HGNetV2
is correct or it was registered as expected. More details can be found at https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#import-the-custom-module'### hgnetv2.py代码如下:
`"""Copyright(c) 2023 lyuwenyu. All Rights Reserved.
https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/backbones/hgnet_v2.py
"""
import torch
import torch.nn as nn
import torch.nn.init as init
from mmdet.registry import MODELS
from torch import Tensor
from typing import List, Tuple, Any
class FrozenBatchNorm2d(nn.Module):
"""copy and modified from https://github.com/facebookresearch/detr/blob/master/models/backbone.py
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt,
without which any other models than torchvision.models.resnet[18,34,50,101]
produce nans.
"""
def init(self, num_features, eps=1e-5):
super(FrozenBatchNorm2d, self).init()
n = num_features
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
self.eps = eps
self.num_features = n
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
num_batches_tracked_key = prefix + 'num_batches_tracked'
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
class LearnableAffineBlock(nn.Module):
def init(self, scale_value=1.0, bias_value=0.0):
super().init()
self.scale = nn.Parameter(torch.tensor([scale_value]))
self.bias = nn.Parameter(torch.tensor([bias_value]))
class ConvBNAct(nn.Module):
def init(self,
in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=0,
groups=1,
use_act=True,
use_lab=False):
super().init()
self.use_act = use_act
self.use_lab = use_lab
if padding == 'same':
self.conv = nn.Sequential(
nn.ZeroPad2d([0, 1, 0, 1]),
nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride,
groups=groups,
bias=False
)
)
else:
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride,
padding=(kernel_size - 1) // 2,
groups=groups,
bias=False
)
self.bn = nn.BatchNorm2d(out_channels)
if self.use_act:
self.act = nn.ReLU()
if self.use_lab:
self.lab = LearnableAffineBlock()
class LightConvBNAct(nn.Module):
def init(self,
in_channels,
out_channels,
kernel_size,
stride,
groups=1,
use_lab=False):
super().init()
self.conv1 = ConvBNAct(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
use_act=False,
use_lab=use_lab
)
self.conv2 = ConvBNAct(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=kernel_size,
groups=out_channels,
use_act=True,
use_lab=use_lab
)
class StemBlock(nn.Module):
def init(self,
in_channels,
mid_channels,
out_channels,
use_lab=False):
super().init()
self.stem1 = ConvBNAct(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=3,
stride=2,
use_lab=use_lab
)
self.stem2a = ConvBNAct(
in_channels=mid_channels,
out_channels=mid_channels // 2,
kernel_size=2,
stride=1,
padding='same',
use_lab=use_lab
)
self.stem2b = ConvBNAct(
in_channels=mid_channels // 2,
out_channels=mid_channels,
kernel_size=2,
stride=1,
padding='same',
use_lab=use_lab
)
self.stem3 = ConvBNAct(
in_channels=mid_channels * 2,
out_channels=mid_channels,
kernel_size=3,
stride=2,
use_lab=use_lab
)
self.stem4 = ConvBNAct(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
use_lab=use_lab
)
class HG_Block(nn.Module):
def init(self,
in_channels,
mid_channels,
out_channels,
kernel_size=3,
layer_num=6,
identity=False,
light_block=True,
use_lab=False):
super().init()
self.identity = identity
class HG_Stage(nn.Module):
def init(self,
in_channels,
mid_channels,
out_channels,
block_num,
layer_num=6,
downsample=True,
light_block=True,
kernel_size=3,
use_lab=False):
super().init()
self.downsample = downsample
if downsample:
self.downsample = ConvBNAct(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=3,
stride=2,
groups=in_channels,
use_act=False,
use_lab=use_lab)
@MODELS.register_module()
class HGNetv2(nn.Module):
"""
Args:
stem_channels: list. Number of channels for the stem block.
stage_type: str. The stage configuration of PPHGNet. such as the number of channels, stride, etc.
use_lab: boolean. Whether to use LearnableAffineBlock in network.
lr_mult_list: list. Control the learning rate of different stages.
Returns:
model: nn.Module.
"""
### init.py代码如下:
`# Copyright (c) OpenMMLab. All rights reserved.
from .csp_darknet import CSPDarknet
from .cspnext import CSPNeXt
from .darknet import Darknet
from .detectors_resnet import DetectoRS_ResNet
from .detectors_resnext import DetectoRS_ResNeXt
from .efficientnet import EfficientNet
from .hourglass import HourglassNet
from .hrnet import HRNet
from .mobilenet_v2 import MobileNetV2
from .pvt import PyramidVisionTransformer, PyramidVisionTransformerV2
from .regnet import RegNet
from .res2net import Res2Net
from .resnest import ResNeSt
from .resnet import ResNet, ResNetV1d
from .resnext import ResNeXt
from .ssd_vgg import SSDVGG
from .swin import SwinTransformer
from .trident_resnet import TridentResNet
from .hgnetv2 import HGNetv2
all = [
'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet',
'MobileNetV2', 'Res2Net', 'HourglassNet', 'DetectoRS_ResNet',
'DetectoRS_ResNeXt', 'Darknet', 'ResNeSt', 'TridentResNet', 'CSPDarknet',
'SwinTransformer', 'PyramidVisionTransformer',
'PyramidVisionTransformerV2', 'EfficientNet', 'CSPNeXt', 'HGNetv2'
]
**_### 配置文件代码如下:_**
base = ['./mask-rcnn_r50_fpn_1x_coco.py'
]
model=dict(
type='MaskRCNN',
backbone=dict(
type='HGNetV2',
name='X',
use_lab=False,
return_idx=[1, 2, 3],
freeze_at=0,
freeze_norm=True,
pretrained=True)
)`
希望各位大神能教教我哪里有问题
The text was updated successfully, but these errors were encountered: