这是我的鉴别器架构:
def build_discriminator(img_shape,embedding_shape):
model1 = Sequential()
model1.add(Conv2D(32, kernel_size=5, strides=2, input_shape=img_shape, padding="same"))
model1.add(LeakyReLU(alpha=0.2))
model1.add(Dropout(0.25))
model1.add(Conv2D(48, kernel_size=5, strides=2, padding="same"))
#model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model1.add(BatchNormalization(momentum=0.8))
model1.add(LeakyReLU(alpha=0.2))
model1.add(Dropout(0.25))
model1.add(Conv2D(64, kernel_size=5, strides=2, padding="same"))
model1.add(BatchNormalization(momentum=0.8))
model1.add(LeakyReLU(alpha=0.2))
model1.add(Dropout(0.25))
model1.add(Conv2D(128, kernel_size=5, strides=2, padding="same"))
model1.add(BatchNormalization(momentum=0.8))
model1.add(LeakyReLU(alpha=0.2))
model1.add(Dropout(0.25))
model1.add(Conv2D(256, kernel_size=5, strides=2, padding="same"))
model1.add(BatchNormalization(momentum=0.8))
model1.add(LeakyReLU(alpha=0.2))
model1.add(Dropout(0.25))
model1.add(Flatten())
model1.add(Dense(200))
model2=Sequential()
model2.add(Dense(50, input_shape=embedding_shape))
model2.add(Dense(100))
model2.add(Dense(200))
model2.add(Flatten())
merged_model = Sequential()
merged_model.add(Merge([model1, model2], mode='concat'))
merged_model.add(Dense(1, activation='sigmoid', name='output_layer'))
#merged_model.compile(loss='binary_crossentropy', optimizer='adam',
#metrics=['accuracy'])
#model1.add(Dense(1, activation='sigmoid'))
merged_model.summary()
merged_model.input_shape
img = Input(shape=img_shape)
emb = Input(shape=embedding_shape)
validity = merged_model([img,emb])
return Model([img,emb],validity)
这是生成器架构:
def build_generator(latent_dim=484):
model = Sequential()
model.add(Dense(624 * 2 * 2, activation="relu", input_dim=latent_dim))
model.add(Reshape((2, 2, 624)))
model.add(UpSampling2D())
model.add(Conv2D(512, kernel_size=5, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
#4x4x512
model.add(Conv2D(256, kernel_size=5, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
#8x8x256
model.add(Conv2D(128, kernel_size=5, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
#16x16x128
model.add(Conv2D(64, kernel_size=5, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
#32x32x64
model.add(Conv2D(32, kernel_size=5, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
#64x64x32
model.add(Conv2D(3, kernel_size=5, padding="same"))
model.add(Activation("tanh"))
#128x128x3
noise = Input(shape=(latent_dim,))
img = model(noise)
return Model(noise, img)
以下是我制作 GAN 网络的方式:
optimizer = Adam(0.0004, 0.5)
discriminator=build_discriminator((128,128,3),(1,128,3))
discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
# Build the generator
generator = build_generator()
# The generator takes noise as input and generates imgs
z = Input(shape=(100+384,))
img = generator(z)
# For the combined model we will only train the generator
discriminator.trainable = False
temp=Input(shape=(1,128,3))
# The discriminator takes generated images as input and determines validity
valid = discriminator([img,temp])
# The combined model (stacked generator and discriminator)
# Trains the generator to fool the discriminator
combined = Model(z, valid)
combined.compile(loss='binary_crossentropy', optimizer=optimizer)
鉴别器有 2 个模型,将得到一个形状图像128x128x3
和一个形状嵌入作为输入,然后将1x128x3
两个模型合并。生成器模型只是获取噪声并生成128x128x3
图像。所以在这一行combined = Model(z, valid)
我收到以下错误:
RuntimeError: Graph disconnected: cannot obtain value for tensor Tensor("input_5:0", shape=(?, 1, 128, 3), dtype=float32) at layer "input_5". The following previous layers were accessed without issue: ['input_4', 'model_2']
我认为这是因为鉴别器找不到嵌入输入,但我给它输入了一个 shape 的张量(1,128,3)
,就像噪声被输入到生成器模型一样。谁能帮我在哪里做错了?
在这里设置完所有内容后,我将如何从合并在一起的噪声和嵌入向量中生成图像,鉴别器将获取图像和向量来识别假货:
#texts has embedding vectors
pics=np.array(pics) . #images
noise = np.random.normal(0, 1, (batch_size, 100))
j=0
latent_code=[]
for j in range(len(texts)): #appending embedding at the end of noise
n=np.append(noise[j],texts[j])
n=n.tolist()
latent_code.append(n)
latent_code=np.array(latent_code)
gen_imgs = generator.predict(latent_code) #gen making fakes
j=0
vects=[]
for im in gen_imgs:
t=np.array(texts[j])
t=np.reshape(t,[128,3])
t=np.expand_dims(t, axis=0)
vects.append(t)
j+=1
vects=np.array(vects) #vector of ?,1,128,3
#disc marking fakes and reals
d_loss_real = discriminator.train_on_batch([pics,vects], valid)
d_loss_fake = discriminator.train_on_batch([gen_pics,vects], fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
g_loss = combined.train_on_batch(latent_code, valid)