-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathblocks.py
84 lines (65 loc) · 3.13 KB
/
blocks.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
from keras.layers import Conv2DTranspose
from keras.layers import UpSampling2D
from keras.layers import Conv2D
from keras.layers import BatchNormalization
from keras.layers import Activation
from keras.layers import Concatenate
import numpy as np
import argparse
import copy
def handle_block_names(stage):
conv_name = 'decoder_stage{}_conv'.format(stage)
bn_name = 'decoder_stage{}_bn'.format(stage)
relu_name = 'decoder_stage{}_relu'.format(stage)
up_name = 'decoder_stage{}_upsample'.format(stage)
return conv_name, bn_name, relu_name, up_name
def ConvRelu(filters, kernel_size, use_batchnorm=False, conv_name='conv', bn_name='bn', relu_name='relu'):
def layer(x):
x = Conv2D(filters, kernel_size, padding="same", name=conv_name, use_bias=not(use_batchnorm))(x)
if use_batchnorm:
x = BatchNormalization(name=bn_name)(x)
x = Activation('relu', name=relu_name)(x)
return x
return layer
def Upsample2D_block(filters, stage, kernel_size=(3,3), upsample_rate=(2,2),
use_batchnorm=False, skip=None):
def layer(input_tensor):
conv_name, bn_name, relu_name, up_name = handle_block_names(stage)
x = UpSampling2D(size=upsample_rate, name=up_name)(input_tensor)
if skip is not None:
x = Concatenate()([x, skip])
x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm,
conv_name=conv_name + '1', bn_name=bn_name + '1', relu_name=relu_name + '1')(x)
x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm,
conv_name=conv_name + '2', bn_name=bn_name + '2', relu_name=relu_name + '2')(x)
return x
return layer
def Transpose2D_block(filters, stage, kernel_size=(3,3), upsample_rate=(2,2),
transpose_kernel_size=(4,4), use_batchnorm=False, skip=None):
def layer(input_tensor):
conv_name, bn_name, relu_name, up_name = handle_block_names(stage)
x = Conv2DTranspose(filters, transpose_kernel_size, strides=upsample_rate,
padding='same', name=up_name, use_bias=not(use_batchnorm))(input_tensor)
if use_batchnorm:
x = BatchNormalization(name=bn_name+'1')(x)
x = Activation('relu', name=relu_name+'1')(x)
if skip is not None:
x = Concatenate()([x, skip])
x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm,
conv_name=conv_name + '2', bn_name=bn_name + '2', relu_name=relu_name + '2')(x)
return x
return layer
def bilinear_upsample_weights(factor, number_of_classes):
filter_size = factor*2 - factor%2
factor = (filter_size + 1) // 2
if filter_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:filter_size, :filter_size]
upsample_kernel = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)
weights = np.zeros((filter_size, filter_size, number_of_classes, number_of_classes),
dtype=np.float32)
for i in range(number_of_classes):
weights[:, :, i, i] = upsample_kernel
return weights