-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathCIAM.py
101 lines (90 loc) · 5.53 KB
/
CIAM.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import torch
from torch import nn
import torch.nn.functional as F
BatchNorm2d = nn.BatchNorm2d
bn_mom = 0.1
class CIA(nn.Module):
def __init__(self, inplanes, branch_planes, outplanes):
super(CIA, self).__init__()
self.scale1 = nn.Sequential(nn.AvgPool2d(kernel_size=3, stride=1, padding=1),
BatchNorm2d(inplanes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),
)
self.scale2 = nn.Sequential(nn.AvgPool2d(kernel_size=5, stride=2, padding=2),
BatchNorm2d(inplanes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),
)
# self.scale3 = nn.Sequential(nn.AvgPool2d(kernel_size=17, stride=8, padding=8),
# BatchNorm2d(inplanes, momentum=bn_mom),
# nn.ReLU(inplace=True),
# nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),
# )
self.scale3 = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
BatchNorm2d(inplanes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),
)
self.scale0 = nn.Sequential(
BatchNorm2d(inplanes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),
)
self.process1 = nn.Sequential(
BatchNorm2d(branch_planes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(branch_planes, branch_planes, kernel_size=3, padding=1, bias=False),
)
self.process2 = nn.Sequential(
BatchNorm2d(branch_planes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(branch_planes, branch_planes, kernel_size=3, padding=1, bias=False),
)
# self.process3 = nn.Sequential(
# BatchNorm2d(branch_planes, momentum=bn_mom),
# nn.ReLU(inplace=True),
# nn.Conv2d(branch_planes, branch_planes, kernel_size=3, padding=1, bias=False),
# )
self.process3 = nn.Sequential(
BatchNorm2d(branch_planes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(branch_planes, branch_planes, kernel_size=3, padding=1, bias=False),
)
self.compression = nn.Sequential(
BatchNorm2d(branch_planes * 4, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(branch_planes * 4, outplanes, kernel_size=1, bias=False),
)
self.shortcut = nn.Sequential(
BatchNorm2d(inplanes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(inplanes, outplanes, kernel_size=1, bias=False),
)
def forward(self, x):
#x = self.downsample(x)
width = x.shape[-1] # 1 1024 16 16
height = x.shape[-2]
x_list = []
x_list.append(self.scale0(x)) # 1*1卷积 1 256 16 16
x_list.append(self.process1((F.interpolate(self.scale1(x), # 3*3卷积(平均池化3 + 1*1卷积 +上采样) 1 256 16 16
size=[height, width],
mode='bilinear')+x_list[0])))
x_list.append((self.process2((F.interpolate(self.scale2(x), # 平均池化 5 1 256 16 16
size=[height, width],
mode='bilinear')+x_list[1])))) # 3*3卷积
x_list.append(self.process3((F.interpolate(self.scale3(x), # 自适应平均池化 1 256 16 16
size=[height, width],
mode='bilinear')+x_list[2]))) # 3*3卷积
# x_list.append(self.process4((F.interpolate(self.scale4(x),
# size=[height, width],
# mode='bilinear')+x_list[3])))
out = self.compression(torch.cat(x_list, 1)) + self.shortcut(x) # cat之后在经过1*1卷积,shortcut对原始进行1*1卷积
return out
# width_output = x.shape[-1] // 8
# height_output = x.shape[-2] // 8
# x = F.interpolate(
# self.spp(self.layer5(self.relu(x))),
# size=[height_output, width_output],
# mode='bilinear')
#self.spp = CIA(planes * 16, spp_planes, planes * 4)