-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsqueezenet.py
131 lines (87 loc) · 5.57 KB
/
squeezenet.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
import keras
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Concatenate, Activation, add
from keras.layers import Convolution2D, MaxPooling2D, Convolution2DTranspose, BatchNormalization, Lambda, Reshape
from L2_Normalization import L2Normalization
import tensorflow
img_height = 256
img_width = 512
n_classes = 8
def bilinear_upsample(image_tensor):
upsampled = tensorflow.image.resize(image_tensor, size=(img_height, img_width))
return upsampled
class squeeze_segNet():
def __init__(self, n_labels, image_shape):
self.n_labels = n_labels
self.image_shape = image_shape
pass
def fire_module(self, x, filters, name="fire"):
sq_filters, ex1_filters, ex2_filters = filters
squeeze = Convolution2D(sq_filters, (1, 1), activation='elu', padding='same', name=name + "_squeeze1x1")(x)
expand1 = Convolution2D(ex1_filters, (1, 1), activation='elu', padding='same', name=name + "_expand1x1")(squeeze)
expand2 = Convolution2D(ex2_filters, (3, 3), activation='elu', padding='same', name=name + "_expand3x3")(squeeze)
x = Concatenate(axis=-1, name=name)([expand1, expand2])
return x
def squeeze_net(self, x):
x = Convolution2D(64, kernel_size=(3, 3), strides=(2, 2), padding="same", activation="elu", name='conv1')(x)
x_low1 = x
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='maxpool1', padding="same")(x)
x = self.fire_module(x, (16, 64, 64), name="fire2")
x = self.fire_module(x, (16, 64, 64), name="fire3")
x_low2 = x
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='maxpool3', padding="same")(x)
x = self.fire_module(x, (32, 128, 128), name="fire4")
x = self.fire_module(x, (32, 128, 128), name="fire5")
x_low3 = x
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='maxpool5', padding="same")(x)
x = self.fire_module(x, (48, 192, 192), name="fire6")
x = self.fire_module(x, (48, 192, 192), name="fire7")
x = self.fire_module(x, (64, 256, 256), name="fire8")
x = self.fire_module(x, (64, 256, 256), name="fire9")
return x, x_low1, x_low2, x_low3
def paral_dilat_module(self, x, n_filter=128):
x1 = Convolution2D(64, kernel_size=(3,3), dilation_rate=(1,1), activation='elu', kernel_initializer='he_normal', padding='same', name='dilat1')(x)
x2 = Convolution2D(64, kernel_size=(3,3), dilation_rate=(3,3), activation='elu', kernel_initializer='he_normal', padding='same', name='dilat2')(x)
x3 = Convolution2D(64, kernel_size=(3,3), dilation_rate=(5,5), activation='elu', kernel_initializer='he_normal', padding='same', name='dilat3')(x)
x4 = Convolution2D(64, kernel_size=(3,3), dilation_rate=(7,7), activation='elu', kernel_initializer='he_normal', padding='same', name='dilat4')(x)
x_sum = add([x1, x2, x3, x4])
return x_sum
def conv_trans_block(self, x, out_filters, name_trans):
x = Convolution2DTranspose(64, (1, 1), activation='elu', padding='same', kernel_initializer='he_normal', name=name_trans+'_tran1')(x)
x = Convolution2DTranspose(64, (3, 3), strides=(2, 2), activation='elu', padding='same', kernel_initializer='he_normal', name=name_trans+'_tran2')(x)
x = Convolution2DTranspose(out_filters, (1, 1), activation='elu', padding='same', kernel_initializer='he_normal', name=name_trans+'_tran3')(x)
return x
def refine_block(self, x, x_low, refine_name):
x = Convolution2D(64, kernel_size=(3,3), activation='elu', kernel_initializer='he_normal', padding='same', name=refine_name+'_block1')(x)
x = BatchNormalization()(x)
#x = L2Normalization(gamma_init=20, name=refine_name+'l2_norm1')(x)
x_low = Convolution2D(64, kernel_size=(3,3), activation='elu', kernel_initializer='he_normal', padding='same', name=refine_name+'_block2')(x_low)
x_low = BatchNormalization()(x_low)
#x_low = L2Normalization(gamma_init=20, name=refine_name+'l2_norm2')(x_low)
print(f'shape of x, x_low: {x.shape, x_low.shape}')
x_sum1 = add([x, x_low])
return x_sum1
def conv_transpose(self, x, x_low1, x_low2, x_low3):
x = self.conv_trans_block(x, 256, name_trans='tran1')
x = self.refine_block(x, x_low3, refine_name='refine1')
x = self.conv_trans_block(x, 128, name_trans='tran2')
x = self.refine_block(x, x_low2, refine_name= 'refine2')
x = self.conv_trans_block(x, 64, name_trans='tran3')
x = self.refine_block(x, x_low1, refine_name='refine3')
x = Convolution2DTranspose(8, (3, 3), strides=(2, 2), activation='elu', padding='same', kernel_initializer='he_normal', name='lasttran')(x)
x = Lambda(bilinear_upsample, name='bilinear_upsample')(x)
x = Reshape((x.shape[1]*x.shape[2], n_classes))(x)
x = Activation('softmax', name='final_softmax')(x)
return x
def init_model(self):
h, w, d = self.image_shape
input1 = Input(shape=(h,w,d), name='input')
output1, x_low1, x_low2, x_low3 = self.squeeze_net(input1)
#squeeze_net = Model(inputs=input1, outputs=[output1, x_low1, x_low2, x_low3], name='squeez_net')
#squeeze_net.load_weights(self.pretrain_weights_path, by_name=True)
#output2, x_low1, x_low2, x_low3 = squeeze_net.output
output_3 = self.paral_dilat_module(output1)
result = self.conv_transpose(output_3, x_low1, x_low2, x_low3)
squeeze_seg = Model(inputs=input1, outputs=result, name='squeeze_seg')
return squeeze_seg