我试图用 Tensorflow 构建一个变分自动编码器。我从最简单的模型开始。我有以下方法:
def conv_layer(x, w_shape, b_shape, padding='SAME'):
W = weight_variable(w_shape)
tf.summary.histogram(W.name, W)
b = bias_variable(b_shape)
tf.summary.histogram(b.name, b)
# Note that I used a stride of 2 on purpose in order not to use max pool layer.
activations = tf.nn.relu(tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding=padding) + b)
tf.summary.histogram(activations.name, activations)
return activations
def deconv_layer(x, w_shape, b_shape, padding="SAME"):
W = weight_variable(w_shape)
tf.summary.histogram(W.name, W)
b = bias_variable(b_shape)
tf.summary.histogram('bias', b)
x_shape = tf.shape(x)
out_shape = tf.stack([x_shape[0], x_shape[1], x_shape[2], w_shape[2]])
# Note that I have used a stride of 2 since I used a stride of 2 in conv layer.
transposed_activations = tf.nn.conv2d_transpose(x, W, out_shape, [1, 1, 1, 1], padding=padding) + b
tf.summary.histogram(transposed_activations.name, transposed_activations)
return transposed_activations
整个网络的模型如下:
with tf.name_scope('conv1'):
conv1 = conv_layer(image, [3, 3, 3, 32], [32])
with tf.name_scope('conv2'):
conv2 = conv_layer(conv1, [3, 3, 32, 64], [64])
with tf.name_scope('conv3'):
conv3 = conv_layer(conv2, [3, 3, 64, 128], [128])
with tf.name_scope('conv4'):
conv4 = conv_layer(conv3, [3, 3, 128, 256], [256])
with tf.name_scope('z'):
z = conv_layer(conv4, [3, 3, 256, 256], [256])
with tf.name_scope('deconv4'):
deconv4 = deconv_layer(z, [3, 3, 128, 256], [128])
with tf.name_scope('deconv3'):
deconv3 = deconv_layer(deconv4, [3, 3, 64, 128], [64])
with tf.name_scope('deconv2'):
deconv2 = deconv_layer(deconv3, [3, 3, 32, 64], [32])
with tf.name_scope('deconv1'):
deconv_image = deconv_layer(deconv2, [3, 3, 3, 32], [3])
我从 a 获取我的图像FIFOQueue
并将它们输入到这个模型中。我的图像大小是112, 112, 3
. 我的问题是从
conv和deconv[1, 1, 1, 1] to [1, 2, 2, 1]
层中更改步幅时出现以下错误:
InvalidArgumentError (see above for traceback): Conv2DSlowBackpropInput: Size of out_backprop doesn't match computed: actual = 4, computed = 2
[[Node: deconv4/conv2d_transpose = Conv2DBackpropInput[T=DT_FLOAT, data_format="NHWC", padding="SAME", strides=[1, 2, 2, 1], use_cudnn_on_gpu=true, _device="/job:localhost/replica:0/task:0/gpu:0"](deconv4/stack, deconv4/Variable/read, z/Relu)]]
[[Node: deconv1/add/_17 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/cpu:0", send_device="/job:localhost/replica:0/task:0/gpu:0", send_device_incarnation=1, tensor_name="edge_85_deconv1/add", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
PS:我知道我在 deconv 层缺少激活函数,但我想这与我得到的错误无关。任何帮助深表感谢!!