Inception-ResNet-V2

Inception-ResNet-V2

背景介绍

  Inception-ResNet-V2:和Inception-V4,Inception-ResNet-V1于2017年发表在AAAI同一篇文章中,三者的网络结构基本相同。结合了Inception-V3ResNet的优点而成的深度学习网络。

Inception-ResNet_V2

Inception-ResNet-V2特点

  在Inception-V3的基础上,增加了残差结构
  对网络的输入增加了Stem层,不再是Inception-V3中简单的卷积操作

Spatial Separable Convolution

spatial
  Spatial Separable Convolution(空间可分离卷积):将3x3的卷积分解为3x1的卷积核1x3的卷积,将7x7的卷积分解为7x1的卷积核1x7的卷积.。
  主要作用是
大大降低网络的参数量
。如果一个64x64x256的特征图,经过7x7的卷积核后变为64x64x256的图像,经过普通卷积的参数量为256x(256x7x7+1)=3211520,而空间可分离卷积参数量为2x256x(256x7x1+1)=918016,参数量缩小了约3.5倍。

Inception-ResNet-V2图像分析

Inception-ResNet-V2网络结构较大,建议小伙伴们保存到本地放大观看。
Inception-ResNet_V2

TensorFlow2.0实现

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
from functools import reduce
import tensorflow.keras as keras


def compose(*funcs):
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')


class Conv_Bn_Relu(keras.layers.Layer):
def __init__(self, filters, kernel_size, strides, padding, name):
super(Conv_Bn_Relu, self).__init__()
self._name = name
self.conv = keras.layers.Conv2D(filters, kernel_size, strides, padding)
self.bn = keras.layers.BatchNormalization()
self.relu = keras.layers.ReLU()

def call(self, inputs, **kwargs):
conv = self.conv(inputs)
bn = self.bn(conv)
output = self.relu(bn)

return output


def reduction_A(x, name):
x_1 = compose(keras.layers.MaxPooling2D((3, 3), (2, 2), name='{}_part1_maxpool'.format(name)))(x)

x_2 = compose(Conv_Bn_Relu(384, (3, 3), (2, 2), padding='valid', name='{}_part2_conv_bn_relu1'.format(name)))(x)

x_3 = compose(Conv_Bn_Relu(256, (1, 1), (1, 1), padding='same', name='{}_part3_conv_bn_relu1'.format(name)),
Conv_Bn_Relu(256, (3, 3), (1, 1), padding='same', name='{}_part3_conv_bn_relu2'.format(name)),
Conv_Bn_Relu(384, (3, 3), (2, 2), padding='valid', name='{}_part3_conv_bn_relu3'.format(name)))(x)

x = keras.layers.Concatenate(name='{}_concatenate'.format(name))([x_1, x_2, x_3])

return x


def reduction_B(x, name):
x_1 = compose(keras.layers.MaxPooling2D((3, 3), (2, 2), name='{}_part1_maxpool'.format(name)))(x)

x_2 = compose(Conv_Bn_Relu(256, (1, 1), (1, 1), padding='same', name='{}_part2_conv_bn_relu1'.format(name)),
Conv_Bn_Relu(384, (3, 3), (2, 2), padding='valid', name='{}_part2_conv_bn_relu2'.format(name)))(x)

x_3 = compose(Conv_Bn_Relu(256, (1, 1), (1, 1), padding='same', name='{}_part3_conv_bn_relu1'.format(name)),
Conv_Bn_Relu(288, (3, 3), (2, 2), padding='valid', name='{}_part3_conv_bn_relu2'.format(name)))(x)

x_4 = compose(Conv_Bn_Relu(256, (1, 1), (1, 1), padding='same', name='{}_part4_conv_bn_relu1'.format(name)),
Conv_Bn_Relu(288, (3, 3), (1, 1), padding='same', name='{}_part4_conv_bn_relu2'.format(name)),
Conv_Bn_Relu(320, (3, 3), (2, 2), padding='valid', name='{}_part4_conv_bn_relu3'.format(name)))(x)

x = keras.layers.Concatenate(name='{}_concatenate'.format(name))([x_1, x_2, x_3, x_4])

return x


def stem(x, name):
x = compose(Conv_Bn_Relu(32, (3, 3), (2, 2), padding='valid', name='{}_conv_bn_relu1'.format(name)),
Conv_Bn_Relu(32, (3, 3), (1, 1), padding='valid', name='{}_conv_bn_relu2'.format(name)),
Conv_Bn_Relu(64, (3, 3), (1, 1), padding='same', name='{}_conv_bn_relu3'.format(name)))(x)

x_1 = keras.layers.MaxPool2D((3, 3), (2, 2), name='{}_4_part1_maxpool'.format(name))(x)

x_2 = Conv_Bn_Relu(96, (3, 3), (2, 2), padding='valid', name='{}_4_part2_conv_bn_relu1'.format(name))(x)

x = keras.layers.Concatenate(name='{}_concatenate1'.format(name))([x_1, x_2])

x_1 = compose(Conv_Bn_Relu(64, (1, 1), (1, 1), padding='same', name='{}_5_part1_conv_bn_relu1'.format(name)),
Conv_Bn_Relu(96, (3, 3), (1, 1), padding='valid', name='{}_5_part1_conv_bn_relu2'.format(name)))(x)

x_2 = compose(Conv_Bn_Relu(64, (1, 1), (1, 1), padding='same', name='{}_5_part2_conv_bn_relu1'.format(name)),
Conv_Bn_Relu(64, (1, 7), (1, 1), padding='same', name='{}_5_part2_conv_bn_relu2'.format(name)),
Conv_Bn_Relu(64, (7, 1), (1, 1), padding='same', name='{}_5_part2_conv_bn_relu3'.format(name)),
Conv_Bn_Relu(96, (3, 3), (1, 1), padding='valid', name='{}_5_part2_conv_bn_relu4'.format(name)))(x)

x = keras.layers.Concatenate(name='{}_concatenate2'.format(name))([x_1, x_2])

x_1 = keras.layers.MaxPool2D((3, 3), (2, 2), name='{}_6_part1_maxpool'.format(name))(x)

x_2 = Conv_Bn_Relu(192, (3, 3), (2, 2), padding='valid', name='{}_6_part2_conv_bn_relu1'.format(name))(x)

x = keras.layers.Concatenate(name='{}_concatenate3'.format(name))([x_1, x_2])

return x


def inception_resnet_A(x, name):
shortcut = x

x_1 = compose(Conv_Bn_Relu(32, (1, 1), (1, 1), padding='same', name='{}_part1_conv_bn_relu1'.format(name)))(x)

x_2 = compose(Conv_Bn_Relu(32, (1, 1), (1, 1), padding='same', name='{}_part2_conv_bn_relu1'.format(name)),
Conv_Bn_Relu(32, (3, 3), (1, 1), padding='same', name='{}_part2_conv_bn_relu2'.format(name)))(x)

x_3 = compose(Conv_Bn_Relu(32, (1, 1), (1, 1), padding='same', name='{}_part3_conv_bn_relu1'.format(name)),
Conv_Bn_Relu(48, (3, 3), (1, 1), padding='same', name='{}_part3_conv_bn_relu2'.format(name)),
Conv_Bn_Relu(64, (3, 3), (1, 1), padding='same', name='{}_part3_conv_bn_relu3'.format(name)))(x)

x = keras.layers.Concatenate(name='{}_concatenate'.format(name))([x_1, x_2, x_3])

x = Conv_Bn_Relu(384, (1, 1), (1, 1), padding='same', name='{}_conv_bn_relu1'.format(name))(x)

x = keras.layers.Add(name='{}_add'.format(name))([x, shortcut])

x = keras.layers.ReLU(name='{}_relu'.format(name))(x)

return x


def inception_resnet_B(x, name):
shortcut = x

x_1 = compose(Conv_Bn_Relu(192, (1, 1), (1, 1), padding='same', name='{}_part1_conv_bn_relu1'.format(name)))(x)

x_2 = compose(Conv_Bn_Relu(128, (1, 1), (1, 1), padding='same', name='{}_part2_conv_bn_relu1'.format(name)),
Conv_Bn_Relu(160, (1, 7), (1, 1), padding='same', name='{}_part2_conv_bn_relu2'.format(name)),
Conv_Bn_Relu(192, (7, 1), (1, 1), padding='same', name='{}_part2_conv_bn_relu3'.format(name)))(x)

x = keras.layers.Concatenate(name='{}_concatenate'.format(name))([x_1, x_2])

x = Conv_Bn_Relu(1152, (1, 1), (1, 1), padding='same', name='{}_conv_bn_relu1'.format(name))(x)

x = keras.layers.Add(name='{}_add'.format(name))([x, shortcut])

x = keras.layers.ReLU(name='{}_relu'.format(name))(x)

return x


def inception_resnet_C(x, name):
shortcut = x

x_1 = compose(Conv_Bn_Relu(192, (1, 1), (1, 1), padding='same', name='{}_part1_conv_bn_relu1'.format(name)))(x)

x_2 = compose(Conv_Bn_Relu(192, (1, 1), (1, 1), padding='same', name='{}_part2_conv_bn_relu1'.format(name)),
Conv_Bn_Relu(224, (1, 7), (1, 1), padding='same', name='{}_part2_conv_bn_relu2'.format(name)),
Conv_Bn_Relu(256, (7, 1), (1, 1), padding='same', name='{}_part2_conv_bn_relu3'.format(name)))(x)

x = keras.layers.Concatenate(name='{}_concatenate'.format(name))([x_1, x_2])

x = Conv_Bn_Relu(2144, (1, 1), (1, 1), padding='same', name='{}_conv_bn_relu1'.format(name))(x)

x = keras.layers.Add(name='{}_add'.format(name))([x, shortcut])

x = keras.layers.ReLU(name='{}_relu'.format(name))(x)

return x


def inception_resnet_v2(input_shape):
input_tensor = keras.layers.Input(input_shape, name='input')
x = input_tensor
x = stem(x, 'stem')
for i in range(10):
x = inception_resnet_A(x, 'inception_resnet_A_{}'.format(i + 1))
x = reduction_A(x, 'reduction_A')
for i in range(20):
x = inception_resnet_B(x, 'inception_resnet_B_{}'.format(i + 1))
x = reduction_B(x, 'reduction_B')
for i in range(10):
x = inception_resnet_C(x, 'inception_resnet_C_{}'.format(i + 1))
x = compose(keras.layers.AveragePooling2D((8, 8), (8, 8), name='averagepool'),
keras.layers.Dropout(0.2, name='dropout'),
keras.layers.Flatten(name='flatten'),
keras.layers.Dense(1000, activation='softmax', name='dense'))(x)

model = keras.Model(input_tensor, x, name='Inception_ResNet_V2')

return model


if __name__ == '__main__':

model = inception_resnet_v2(input_shape=(299, 299, 3))
model.build(input_shape=(None, 299, 299, 3))
model.summary()

Inception-ResNet_V2

Inception-ResNet-V2小结

  Inception-ResNet-V2是一种集Inception-V3ResNet所长的深度学习网络,从上图可以看出Inception-ResNet-V2模型的参数量达到60M,但是由于其网络结构太复杂,比Inception-V3要复杂得多,因此在实际中也较少使用其作为特征提取网络

-------------本文结束感谢您的阅读-------------
0%