0

我计划使用带有 tensorflow 2.0+ 的 DCGAN 创建新图片。但是模型的生成器和判别器的损失值保持不变。我知道有很多方法可以帮助我们找到问题但找不到问题。请帮我看看。

class Generator(Model):
    alpha = 0.5
    # Set layers.
    def __init__(self, out_channel_dim):
        super(Generator, self).__init__()
        self.out_channel_dim = out_channel_dim
        self.fc1 = layers.Dense(4 * 4 * 512)
        self.bn1 = layers.BatchNormalization()
        self.conv2tr1 = layers.Conv2DTranspose(256, 5, strides=2, padding='VALID')
        self.bn2 = layers.BatchNormalization()
        self.conv2tr3 = layers.Conv2DTranspose(self.out_channel_dim, 5, strides=2, padding='SAME')

    # Set forward pass.
    def __call__(self, x, is_training=False):
        x = self.fc1(x)
        # Reshape to a 4-D array of images: (batch, height, width, channels)
        # New shape: (batch, 4, 4, 512)
        x = tf.reshape(x, shape=[-1, 4, 4, 512])
        x = self.bn1(x, training=is_training)
        x = layers.Maximum()([self.alpha*x,x])
        # First Deconvolution, image shape: (batch, 11, 11, 256)
        x = self.conv2tr1(x)
        x = self.bn2(x, training=is_training)
        x = layers.Maximum()([self.alpha*x,x])
        # Third Deconvolution, image shape:(batch, 44, 44, out_channel_dim)
        x = self.conv2tr3(x)
        x = tf.nn.tanh(x)
        return x

class Discriminator(Model):
    alpha = 0.2
    # Set layers.
    def __init__(self, out_channel_dim):
        super(Discriminator, self).__init__()
        self.out_channel_dim = out_channel_dim
        self.conv1 = layers.Conv2D(64, 5, strides=2, padding='SAME')
        self.bn1 = layers.BatchNormalization()
        self.flatten = layers.Flatten()
        self.cf1 = layers.Dense(1)

    # Set forward pass.
    def call(self, x, is_training=False):
        # First Convolution, image shape: (batch, 22, 22, 64)
        x = self.conv1(x)
        x = self.bn1(x, training=is_training)
        x = layers.Maximum()([self.alpha*x, x])

        # Fully connected layers
        x = self.flatten(x)
        x = self.cf1(x)
        return tf.math.log_sigmoid(x)

generator = Generator(out_channel_dim)
discriminator = Discriminator(out_channel_dim)

# Losses.
def generator_loss(reconstructed_image):
    gen_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
        logits=reconstructed_image, labels=tf.ones_like(reconstructed_image)))
    return gen_loss

def discriminator_loss(disc_fake, disc_real):
    disc_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
        logits=disc_real, labels=tf.ones_like(disc_real)))
    disc_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
        logits=disc_fake, labels=tf.zeros_like(disc_real)))
    return disc_loss_real + disc_loss_fake

# Optimizers.
lr_generator = 0.0002
lr_discriminator = 0.0002
optimizer_gen = tf.optimizers.Adam(learning_rate=lr_generator)#, beta_1=0.5, beta_2=0.999)
optimizer_disc = tf.optimizers.Adam(learning_rate=lr_discriminator)#, beta_1=0.5, beta_2=0.999)

# Optimization process. Inputs: real image and noise.
def run_optimization(real_images):

    # Generate noise.
    noise = np.random.normal(-1., 1., size=[batch_size, noise_dim]).astype(np.float32)
    
    with tf.GradientTape() as g:
            
        fake_images = generator(noise, is_training=True)
        disc_fake = discriminator(fake_images, is_training=True)
        disc_real = discriminator(real_images, is_training=True)

        disc_loss = discriminator_loss(disc_fake, disc_real)
            
    # Training Variables for each optimizer
    gradients_disc = g.gradient(disc_loss,  discriminator.trainable_variables)
    optimizer_disc.apply_gradients(zip(gradients_disc,  discriminator.trainable_variables))
    
    # Generate noise.
    noise = np.random.normal(-1., 1., size=[batch_size, noise_dim]).astype(np.float32)
    
    with tf.GradientTape() as g:
            
        fake_images = generator(noise, is_training=True)
        disc_fake = discriminator(fake_images, is_training=True)

        gen_loss = generator_loss(disc_fake)
            
    gradients_gen = g.gradient(gen_loss, generator.trainable_variables)
    optimizer_gen.apply_gradients(zip(gradients_gen, generator.trainable_variables))
    
    return gen_loss, disc_loss

# Run training for the given number of steps.
step = 0
for iepoch in range(epoch_count):
    for batch_image in get_batches(batch_size):
        # the shape of batch_image is [batch, height, width]
        if step == 0:
            # Generate noise.
            noise = np.random.normal(-1., 1., size=[batch_size, noise_dim]).astype(np.float32)
            gen_loss = generator_loss(discriminator(generator(noise)))
            disc_loss = discriminator_loss(discriminator(batch_image), discriminator(generator(noise)))
            print("initial: gen_loss: %f, disc_loss: %f" % (gen_loss, disc_loss))
            continue

        # Run the optimization.
        gen_loss, disc_loss = run_optimization(batch_image)
        step += 1
        print("step: %i, gen_loss: %f, disc_loss: %f" % (step, gen_loss, disc_loss))

但是,损失值保持不变。 从图中我们可以清楚地看到案例

4

0 回答 0