pix2pix

pix2pix

背景介绍

  pix2pix:于2017年发表在CVPR上,可以实现图像的风格迁移,风格迁移是GAN网络提出后才出现在人们视野里面的图像处理算法,在生成式对抗网络问世之前,人们很难通过传统的图像处理算法实现风格迁移,今天带小伙伴们看一看瞧一瞧。

pix2pix

pix2pix的特点

  类似于半个DiscoGAN,只是从风格A转换到风格B,没有从风格B转换到风格A
  网络结构也类似于DiscoGAN,使用了UNet结构
  生成器损失函数采用绝对误差,判别器损失函数采用均方误差
  对生成器损失函数的权重进行调节,使网络更多关注于生成的图像质量

pix2pix图像分析

generator
discriminator

TensorFlow2.0实现

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
import os
import glob
import numpy as np
import cv2 as cv
from functools import reduce
import tensorflow as tf
import tensorflow.keras as keras


def compose(*funcs):
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')


class Conv_Relu_In(keras.layers.Layer):
def __init__(self, filters, kernel_size, strides, padding, name):
super(Conv_Relu_In, self).__init__()
self._name = name
self.block = keras.Sequential([keras.layers.Conv2D(filters, kernel_size, strides, padding),
keras.layers.LeakyReLU(0.2)])
if name.find('bn') != -1:
self.block.add(keras.layers.BatchNormalization(momentum=0.8))

def call(self, inputs, **kwargs):

return self.block(inputs)


class Upsampling_Conv_Relu_In_Concatenate(keras.layers.Layer):
def __init__(self, filters, kernel_size, strides, padding, name):
super(Upsampling_Conv_Relu_In_Concatenate, self).__init__()
self._name = name
self.block = keras.Sequential([keras.layers.UpSampling2D((2, 2)),
keras.layers.Conv2D(filters, kernel_size, strides, padding, activation='relu'),
keras.layers.BatchNormalization(momentum=0.8)])
self.concatenate = keras.layers.Concatenate()

def call(self, inputs, **kwargs):
x, shortcut = inputs
x = self.block(x)
output = self.concatenate([x, shortcut])

return output


def generator(input_shape, name):
input_tensor = keras.layers.Input(input_shape, name='input')
x = input_tensor

x1 = Conv_Relu_In(64, (4, 4), (2, 2), 'same', name='conv_leakyrelu1')(x)
x2 = Conv_Relu_In(128, (4, 4), (2, 2), 'same', name='conv_leakyrelu_bn2')(x1)
x3 = Conv_Relu_In(256, (4, 4), (2, 2), 'same', name='conv_leakyrelu_bn3')(x2)
x4 = Conv_Relu_In(512, (4, 4), (2, 2), 'same', name='conv_leakyrelu_bn4')(x3)
x5 = Conv_Relu_In(512, (4, 4), (2, 2), 'same', name='conv_leakyrelu_bn5')(x4)
x6 = Conv_Relu_In(512, (4, 4), (2, 2), 'same', name='conv_leakyrelu_bn6')(x5)
x7 = Conv_Relu_In(512, (4, 4), (2, 2), 'same', name='conv_leakyrelu_bn7')(x6)

y6 = Upsampling_Conv_Relu_In_Concatenate(512, (4, 4), (1, 1), 'same', name='upsampling_conv_relu_bn_concatenate1')([x7, x6])
y5 = Upsampling_Conv_Relu_In_Concatenate(512, (4, 4), (1, 1), 'same', name='upsampling_conv_relu_bn_concatenate2')([y6, x5])
y4 = Upsampling_Conv_Relu_In_Concatenate(512, (4, 4), (1, 1), 'same', name='upsampling_conv_relu_bn_concatenate3')([y5, x4])
y3 = Upsampling_Conv_Relu_In_Concatenate(256, (4, 4), (1, 1), 'same', name='upsampling_conv_relu_bn_concatenate4')([y4, x3])
y2 = Upsampling_Conv_Relu_In_Concatenate(128, (4, 4), (1, 1), 'same', name='upsampling_conv_relu_bn_concatenate5')([y3, x2])
y1 = Upsampling_Conv_Relu_In_Concatenate(64, (4, 4), (1, 1), 'same', name='upsampling_conv_relu_bn_concatenate6')([y2, x1])

y = compose(keras.layers.UpSampling2D((2, 2), name='upsampling'),
keras.layers.Conv2D(3, (4, 4), (1, 1), 'same', activation='tanh', name='conv_tanh'))(y1)

model = keras.Model(input_tensor, y, name=name)

return model


def discriminator(input_shape, name):
input_tensor1 = keras.layers.Input(input_shape, name='input1')
input_tensor2 = keras.layers.Input(input_shape, name='input2')

x = keras.layers.Concatenate(name='concatenate')([input_tensor1, input_tensor2])

x = compose(Conv_Relu_In(64, (4, 4), (2, 2), 'same', name='conv_leakyrelu1'),
Conv_Relu_In(128, (4, 4), (2, 2), 'same', name='conv_leakyrelu_bn2'),
Conv_Relu_In(256, (4, 4), (2, 2), 'same', name='conv_leakyrelu_bn3'),
Conv_Relu_In(512, (4, 4), (2, 2), 'same', name='conv_leakyrelu_bn4'),
keras.layers.Conv2D(1, (4, 4), (1, 1), 'same', name='conv'))(x)

model = keras.Model([input_tensor1, input_tensor2], x, name=name)

return model


def pix2pix(input_shapeA, input_shapeB, model_g, model_d):
input_tensorA = keras.layers.Input(input_shapeA, name='input_A')
input_tensorB = keras.layers.Input(input_shapeB, name='input_B')

# 输入风格B生成的风格A类型的图像
fake_A = model_g(input_tensorB)

model_d.trainable = False

conf_A = model_d([fake_A, input_tensorB])

model = keras.Model([input_tensorA, input_tensorB], [conf_A, fake_A], name='Pix2Pix')

return model


def read_data(data_path, batch_size):
filename = glob.glob(data_path + '\\*.jpg')
choose_name = np.random.choice(filename, batch_size)

image_A, image_B = [], []
for i in range(batch_size):
image = cv.imread(choose_name[i]).astype(np.float32)
image_A.append(image[:, 256:, :])
image_B.append(image[:, :256, :])

image_A = np.array(image_A) / 127.5 - 1
image_B = np.array(image_B) / 127.5 - 1

return image_A, image_B


if __name__ == '__main__':
batch_size = 2
epochs = 2000
tf.random.set_seed(22)
img_size = (256, 256)
data_path = r'.\edges2shoes\train'
save_path = r'.\pix2pix'
if not os.path.exists(save_path):
os.makedirs(save_path)

optimizer = keras.optimizers.Adam(0.0002, 0.5)
loss = keras.losses.BinaryCrossentropy()

real_dmse = keras.metrics.MeanSquaredError()
fake_dmse = keras.metrics.MeanSquaredError()
gmse = keras.metrics.MeanSquaredError()

model_d = discriminator(input_shape=(img_size[0], img_size[1], 3), name='pix2pix-Discriminator')
model_d.compile(optimizer=optimizer, loss='mse')

model_g = generator(input_shape=(img_size[0], img_size[1], 3), name='pix2pix-Generator')

model_g.build(input_shape=(img_size[0], img_size[1], 3))
model_g.summary()
keras.utils.plot_model(model_g, 'pix2pix-generator.png', show_shapes=True, show_layer_names=True)

model_d.build(input_shape=(img_size[0], img_size[1], 3))
model_d.summary()
keras.utils.plot_model(model_d, 'pix2pix-discriminator.png', show_shapes=True, show_layer_names=True)

model = pix2pix(input_shapeA=(img_size[0], img_size[1], 3), input_shapeB=(img_size[0], img_size[1], 3), model_g=model_g, model_d=model_d)
model.compile(optimizer=optimizer, loss=['mse', 'mae'], loss_weights=[1, 100])

model.build(input_shape=[(img_size[0], img_size[1], 3), (img_size[0], img_size[1], 3)])
model.summary()
keras.utils.plot_model(model, 'pix2pix.png', show_shapes=True, show_layer_names=True)

for epoch in range(epochs):
image_A, image_B = read_data(data_path, batch_size)

fake_A = model_g(image_B)

real_dmse(np.ones((batch_size, img_size[0] // 16, img_size[1] // 16, 1)), model_d([image_A, image_B]))
fake_dmse(np.zeros((batch_size, img_size[0] // 16, img_size[1] // 16, 1)), model_d([fake_A, image_B]))
gmse(np.ones((batch_size, img_size[0] // 16, img_size[1] // 16, 1)), model([image_A, image_B])[0])

real_dloss = model_d.train_on_batch([image_A, image_B], np.ones((batch_size, img_size[0] // 16, img_size[1] // 16, 1)))
fake_dloss = model_d.train_on_batch([fake_A, image_B], np.zeros((batch_size, img_size[0] // 16, img_size[1] // 16, 1)))

gloss = model.train_on_batch([image_A, image_B], [np.ones((batch_size, img_size[0] // 16, img_size[1] // 16, 1)), image_A])

if epoch % 20 == 0:
print('epoch = {}, real_dmse = {}, fake_dmse = {}, gmse = {}'.format(epoch, real_dmse.result(), fake_dmse.result(), gmse.result()))
real_dmse.reset_states()
fake_dmse.reset_states()
gmse.reset_states()
image_A, image_B = read_data(data_path, batch_size=1)
fake_A = ((model_g(image_B).numpy().squeeze() + 1) * 127.5).astype(np.uint8)
image_A = ((image_A.squeeze() + 1) * 127.5).astype(np.uint8)
image_B = ((image_B.squeeze() + 1) * 127.5).astype(np.uint8)
cv.imwrite(save_path + '\\epoch{}.jpg'.format(epoch), np.concatenate([image_A, image_B, fake_A], axis=1))

pix2pix

模型运行结果

pix2pix

小技巧

  1. 图像输入可以先将其归一化到0-1之间或者-1-1之间,因为网络的参数一般都比较小,所以归一化后计算方便,收敛较快。
  2. 注意其中的一些维度变换和numpytensorflow常用操作,否则在阅读代码时可能会产生一些困难。
  3. 可以设置一些权重的保存方式学习率的下降方式早停方式
  4. pix2pix对于网络结构,优化器参数,网络层的一些超参数都是非常敏感的,效果不好不容易发现原因,这可能需要较多的工程实践经验
  5. 先创建判别器,然后进行compile,这样判别器就固定了,然后创建生成器时,不要训练判别器,需要将判别器的trainable改成False,此时不会影响之前固定的判别器,这个可以通过模型的_collection_collected_trainable_weights属性查看,如果该属性为空,则模型不训练,否则模型可以训练,compile之后,该属性固定,无论后面如何修改trainable,只要不重新compile,都不影响训练。
  6. 在pix2pix的测试图像中,为了体现模型的效果,第一个图片为风格A的鞋子,第二个图片为风格B的鞋子,第三个图片为由风格B生成的风格A的鞋子,这里只是训练了2000代,而且每一代只有2个图像就可以看出pix2pix的效果。小伙伴们可以选择更大的数据集,更加快速的GPU,训练更长的时间,这样风格迁移的效果就会更加明显。

pix2pix小结

  pix2pix是一种有效的风格迁移生成式对抗网络,网络结构,损失函数都和DiscoGAN几乎相同,但是DiscoGAN的生成器和判别器都是两个,可以实现AB风格的互换,而pix2pix只有一个生成器和判别器,因此只能完成风格的单向转换,因此参数量也是DiscoGAN的一半,从上图可以看出pix2pix模型的参数量只有43M,如果数据集足够,还可以生成人物表情包,是不是非常有趣呢?小伙伴们一定要掌握它。

-------------本文结束感谢您的阅读-------------
0%