0

我将大小为 [256 256 3] 的图像作为输入,并希望输出相同大小的图像,但显示了下面提到的错误。我试过改变形状,过滤器,步幅没有任何效果。任何其他实现相同的方法都将被理解或链接到与上述相同的东西,输入和输出都是图像。这是我的代码:

enter code here
import tensorflow as tf 
import tflearn
import os

import numpy as np
from tflearn.layers.conv import conv_2d, max_pool_2d,conv_2d_transpose
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
import loaddata

LR = 1e-3
MODEL_NAME = 'deblurr-{}-{}.model'.format(LR, '2conv-basic')
IMG_SIZE = 256
strides = [1,2,2,1]


convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 3],dtype=tf.float32, 
name='input')
print ("convnet :" , convnet.shape)


convnet = conv_2d(convnet, 6, 5,strides=strides, activation='LeakyReLU')
print ("convnet :" , convnet.shape)

convnet = conv_2d(convnet,12, 5,strides=strides, activation='LeakyReLU')
print ("convnet :" , convnet.shape)

convnet = conv_2d(convnet, 24, 5,strides=strides, activation='LeakyReLU')
print ("convnet :" , convnet.shape)


convnet = conv_2d(convnet, 48, 5,strides=strides, activation='LeakyReLU')
print ("convnet :" , convnet.shape)

convnet = conv_2d(convnet, 24, 5,strides=strides, activation='LeakyReLU')
print ("convnet :" , convnet.shape)

convnet = conv_2d_transpose(convnet, 12, 5,output_shape=
[64,64,12],strides=strides, activation='tanh')
print ("convnet :" , convnet.shape)
convnet = dropout(convnet, 0.8)
print ("convnet :" , convnet.shape)

convnet = conv_2d_transpose(convnet, 6, 5,output_shape=
[128,128,6],strides=strides, activation='tanh')
print ("convnet :" , convnet.shape)
convnet = dropout(convnet, 0.8)
print ("convnet :" , convnet.shape)

convnet = conv_2d_transpose(convnet, 3, 5,output_shape=
[256,256,3],strides=strides, activation='tanh')
print ("convnet :" , convnet.shape)
convnet = dropout(convnet, 0.8)
print ("convnet :" , convnet.shape)

convnet = regression(convnet, optimizer='adam', learning_rate=LR, 
loss='categorical_crossentropy', name='targets')
print ("convnet :" , convnet.shape)

model = tflearn.models.dnn.DNN(convnet)

if os.path.exists('{}.meta'.format(MODEL_NAME)):
model.load(MODEL_NAME)
print('model loaded!')

y_train, x_train = loaddata.load_data(data_type='train')


X = x_train
Y = y_train

y_test, x_test = loaddata.load_data(data_type='test')

test_x = y_test
test_y = x_test

model.fit({'input': X}, {'targets': Y}, n_epoch=10, validation_set=({'input': 
test_x}, {'targets': test_y}), batch_size=2,
snapshot_step=500, show_metric=True, run_id=MODEL_NAME)

错误如下:

ValueError                                Traceback (most recent call last)
C:\Users\USER\Anaconda3\lib\site-packages\tensorflow\python\framework\tensor_shape.py in merge_with(self, other)
    562         for i, dim in enumerate(self._dims):
--> 563           new_dims.append(dim.merge_with(other[i]))
    564         return TensorShape(new_dims)

C:\Users\USER\Anaconda3\lib\site-packages\tensorflow\python\framework\tensor_shape.py in merge_with(self, other)
    137     other = as_dimension(other)
--> 138     self.assert_is_compatible_with(other)
    139     if self._value is None:

C:\Users\USER\Anaconda3\lib\site-packages\tensorflow\python\framework\tensor_shape.py in assert_is_compatible_with(self, other)
    110       raise ValueError("Dimensions %s and %s are not compatible" % (self,
--> 111                                                                     other))
    112

ValueError: Dimensions 32 and 8 are not compatible

During handling of the above exception, another exception occurred:

ValueError                                Traceback (most recent call last)
E:\extra\notes\New folder\source\models.py in <module>()
     53 print ("convnet :" , convnet.shape)
     54
---> 55 model = tflearn.models.dnn.DNN(convnet)
     56
     57 if os.path.exists('{}.meta'.format(MODEL_NAME)):

C:\Users\USER\Anaconda3\lib\site-packages\tflearn\models\dnn.py in __init__(self, network, clip_gradients, tensorboard_verbose, tensorboard_dir, checkpoint_path, best_checkpoint_path, max_checkpoints, session, best_val_accuracy)
     63                                max_checkpoints=max_checkpoints,
     64                                session=session,
---> 65                                best_val_accuracy=best_val_accuracy)
     66         self.session = self.trainer.session
     67

C:\Users\USER\Anaconda3\lib\site-packages\tflearn\helpers\trainer.py in __init__(self, train_ops, graph, clip_gradients, tensorboard_dir, tensorboard_verbose, checkpoint_path, best_checkpoint_path, max_checkpoints, keep_checkpoint_every_n_hours, random_seed, session, best_val_accuracy)
    129                 train_op.initialize_training_ops(i, self.session,
    130                                                  tensorboard_verbose,
--> 131                                                  clip_gradients)
    132
    133             # Saver for saving a model

C:\Users\USER\Anaconda3\lib\site-packages\tflearn\helpers\trainer.py in initialize_training_ops(self, i, session, tensorboard_verbose, clip_gradients)
    695             # Compute gradients operations
    696             with tf.control_dependencies([loss_avg_op, acc_avg_op]):
--> 697                 self.grad = tf.gradients(total_loss, self.train_vars)
    698                 if clip_gradients > 0.0:
    699                     self.grad, self.grad_norm = \

C:\Users\USER\Anaconda3\lib\site-packages\tensorflow\python\ops\gradients_impl.py in gradients(ys, xs, grad_ys, name, colocate_gradients_with_ops, gate_gradients, aggregation_method)
    560             if (isinstance(in_grad, ops.Tensor) and
    561                 t_in.dtype != dtypes.resource):
--> 562               in_grad.set_shape(t_in.get_shape())
    563             _SetGrad(grads, t_in, in_grad)
    564         if loop_state:

C:\Users\USER\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py in set_shape(self, shape)
    376         this tensor.
    377     """
--> 378     self._shape = self._shape.merge_with(shape)
    379
    380   @property

C:\Users\USER\Anaconda3\lib\site-packages\tensorflow\python\framework\tensor_shape.py in merge_with(self, other)
    564         return TensorShape(new_dims)
    565       except ValueError:
--> 566         raise ValueError("Shapes %s and %s are not compatible" % (self, other))
    567
    568   def concatenate(self, other):

ValueError: Shapes (?, 32, 32, 24) and (?, 8, 8, 24) are not compatible
4

1 回答 1

1

如果您查看转换层的形状输出:

convnet1: (?, 256, 256, 3)
convnet2: (?, 128, 128, 6)
convnet3: (?, 64, 64, 12)
convnet4: (?, 32, 32, 24)
convnet5: (?, 16, 16, 48)
convnet6: (?, 8, 8, 24)
convnet7: (?, 64, 64, 12)
convnet8: (?, 128, 128, 6)
convnet9: (?, 256, 256, 3)
convnet10: (?, 256, 256, 3)

从 convnet5 到 convnet6,您希望将尺寸放大 8 倍。但您的代码只能在步幅中放大 2 倍:

strides = [1,2,2,1]
convnet = conv_2d_transpose(convnet, 12, 5,output_shape=
          [64,64,12],strides=strides, activation='tanh')

反向传播的梯度形状与该层不兼容。但是,如果您只是将此行更改为:

convnet = conv_2d_transpose(convnet, 12, 5,output_shape=
          [64,64,12],strides=[1,8,8,1], activation='tanh')

然后你的代码应该可以工作。

于 2017-12-21T08:54:22.130 回答