1

我已经使用命令安装了 tensorflow gpu conda create --name tf_gpu tensorflow-gpu。这已经安装了 tensorflow 版本2.1.0。这是 cuda 和 cudnn 版本的驱动程序信息: 运行命令 nvidia-smi 后

运行命令 conda list cudnn 后

当我运行以下代码时:

import os
import sys
import random
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Model, load_model, save_model
from tensorflow.keras.layers import Input,Dropout,BatchNormalization,Activation,Add,Flatten,Dense,Reshape
from tensorflow.keras.layers import Conv2D, Conv2DTranspose, LeakyReLU
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import concatenate
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras import backend as K
from tensorflow.keras import optimizers
from tensorflow.keras.preprocessing.image import array_to_img, img_to_array, load_img#,save_img 
tr = np.load("midshot.npy")
ti = np.load("ti.npy")
def down_block(x, filters, kernel_size=(3, 3), padding="same", strides=1):
    c = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, activation="relu")(x)
    c = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, activation="relu")(c)
    p = keras.layers.MaxPool2D((2, 2), (2, 2))(c)
    return c, p

def up_block(x, skip, filters, kernel_size=(3, 3), padding="same", strides=1):
    us = keras.layers.UpSampling2D((2, 2))(x)
    concat = keras.layers.Concatenate()([us, skip])
    c = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, activation="relu")(concat)
    c = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, activation="relu")(c)
    return c

def bottleneck(x, filters, kernel_size=(3, 3), padding="same", strides=1):
    c = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, activation="relu")(x)
    c = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, activation="relu")(c)
    return c


def UNet():
    f = [16, 32, 64, 128, 256]
    inputs = keras.layers.Input((502, 200, 1))
    #Matching Dimensions
    m1 = keras.layers.Conv2D(1, (3, 1), padding = "same", strides = 1)(inputs)
    m1 = keras.layers.MaxPool2D((2, 1), (2, 1))(m1)
    m2 = keras.layers.Conv2D(1, (5, 1), padding = "same", strides = 1)(m1)
    m2 = keras.layers.MaxPool2D((2, 2), (2, 2))(m2)
    m3 = Conv2DTranspose(1, (2, 10), strides=(1, 1), padding="valid")(m2)
    m4 = Conv2DTranspose(1, (2, 10), strides=(1, 1), padding="valid")(m3)
    m5 = Conv2DTranspose(1, (2, 11), strides=(1, 1), padding="valid")(m4)
    p0 = m5
    c1, p1 = down_block(p0, f[0]) #128 -> 64
    c2, p2 = down_block(p1, f[1]) #64 -> 32
    c3, p3 = down_block(p2, f[2]) #32 -> 16
    c4, p4 = down_block(p3, f[3]) #16->8
    bn = bottleneck(p4, f[4])
    u1 = up_block(bn, c4, f[3]) #8 -> 16
    u2 = up_block(u1, c3, f[2]) #16 -> 32
    u3 = up_block(u2, c2, f[1]) #32 -> 64
    u4 = up_block(u3, c1, f[0]) #64 -> 128
    u5 = Conv2DTranspose(8, (7, 7), strides=(2, 2), padding="valid")(u4)
    u6 = Conv2DTranspose(4, (7, 7), strides=(1, 1), padding="valid")(u5)
    u7 = Conv2DTranspose(2, (34, 34), strides=(1, 1), padding="valid")(u6)
    outputs = keras.layers.Conv2D(1, (1, 1), padding="same", activation="relu")(u7)
    model = keras.models.Model(inputs, outputs)
    return model

model = UNet()
model.compile(optimizer="adam", loss="mean_squared_error", metrics=['mse'])
model.summary()

#Fitting
history = model.fit(x=tr, y=ti, batch_size=32, epochs=50, verbose=1, callbacks=None, 
          validation_split=0.1, validation_data=None, shuffle=True, class_weight=None, 
          sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None)

运行后model.fit出现以下错误:

Train on 4500 samples, validate on 500 samples
Epoch 1/50
  32/4500 [..............................] - ETA: 2:47

---------------------------------------------------------------------------
UnknownError                              Traceback (most recent call last)
<ipython-input-22-607fd43b87e4> in <module>
      5 history = model.fit(x=tr, y=ti, batch_size=32, epochs=50, verbose=1, callbacks=None, 
      6           validation_split=0.1, validation_data=None, shuffle=True, class_weight=None,
----> 7           sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None)

~/anaconda3/envs/tf_gpu/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
    817         max_queue_size=max_queue_size,
    818         workers=workers,
--> 819         use_multiprocessing=use_multiprocessing)
    820 
    821   def evaluate(self,

~/anaconda3/envs/tf_gpu/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_v2.py in fit(self, model, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
    340                 mode=ModeKeys.TRAIN,
    341                 training_context=training_context,
--> 342                 total_epochs=epochs)
    343             cbks.make_logs(model, epoch_logs, training_result, ModeKeys.TRAIN)
    344 

~/anaconda3/envs/tf_gpu/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_v2.py in run_one_epoch(model, iterator, execution_function, dataset_size, batch_size, strategy, steps_per_epoch, num_samples, mode, training_context, total_epochs)
    126         step=step, mode=mode, size=current_batch_size) as batch_logs:
    127       try:
--> 128         batch_outs = execution_function(iterator)
    129       except (StopIteration, errors.OutOfRangeError):
    130         # TODO(kaftan): File bug about tf function and errors.OutOfRangeError?

~/anaconda3/envs/tf_gpu/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_v2_utils.py in execution_function(input_fn)
     96     # `numpy` translates Tensors to values in Eager mode.
     97     return nest.map_structure(_non_none_constant_value,
---> 98                               distributed_function(input_fn))
     99 
    100   return execution_function

~/anaconda3/envs/tf_gpu/lib/python3.7/site-packages/tensorflow_core/python/eager/def_function.py in __call__(self, *args, **kwds)
    566         xla_context.Exit()
    567     else:
--> 568       result = self._call(*args, **kwds)
    569 
    570     if tracing_count == self._get_tracing_count():

~/anaconda3/envs/tf_gpu/lib/python3.7/site-packages/tensorflow_core/python/eager/def_function.py in _call(self, *args, **kwds)
    630         # Lifting succeeded, so variables are initialized and we can run the
    631         # stateless function.
--> 632         return self._stateless_fn(*args, **kwds)
    633     else:
    634       canon_args, canon_kwds = \

~/anaconda3/envs/tf_gpu/lib/python3.7/site-packages/tensorflow_core/python/eager/function.py in __call__(self, *args, **kwargs)
   2361     with self._lock:
   2362       graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
-> 2363     return graph_function._filtered_call(args, kwargs)  # pylint: disable=protected-access
   2364 
   2365   @property

~/anaconda3/envs/tf_gpu/lib/python3.7/site-packages/tensorflow_core/python/eager/function.py in _filtered_call(self, args, kwargs)
   1609          if isinstance(t, (ops.Tensor,
   1610                            resource_variable_ops.BaseResourceVariable))),
-> 1611         self.captured_inputs)
   1612 
   1613   def _call_flat(self, args, captured_inputs, cancellation_manager=None):

~/anaconda3/envs/tf_gpu/lib/python3.7/site-packages/tensorflow_core/python/eager/function.py in _call_flat(self, args, captured_inputs, cancellation_manager)
   1690       # No tape is watching; skip to running the function.
   1691       return self._build_call_outputs(self._inference_function.call(
-> 1692           ctx, args, cancellation_manager=cancellation_manager))
   1693     forward_backward = self._select_forward_and_backward_functions(
   1694         args,

~/anaconda3/envs/tf_gpu/lib/python3.7/site-packages/tensorflow_core/python/eager/function.py in call(self, ctx, args, cancellation_manager)
    543               inputs=args,
    544               attrs=("executor_type", executor_type, "config_proto", config),
--> 545               ctx=ctx)
    546         else:
    547           outputs = execute.execute_with_cancellation(

~/anaconda3/envs/tf_gpu/lib/python3.7/site-packages/tensorflow_core/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
     65     else:
     66       message = e.message
---> 67     six.raise_from(core._status_to_exception(e.code, message), None)
     68   except TypeError as e:
     69     keras_symbolic_tensors = [

~/anaconda3/envs/tf_gpu/lib/python3.7/site-packages/six.py in raise_from(value, from_value)

UnknownError:  Failed to get convolution algorithm. This is probably because cuDNN failed to initialize, so try looking to see if a warning log message was printed above.
     [[node model_1/conv2d_21/Conv2D (defined at <ipython-input-22-607fd43b87e4>:7) ]] [Op:__inference_distributed_function_7357]

Function call stack:
distributed_function

我尝试了几种方法,例如降级 tf 版本或 cuda 版本,但没有任何效果。我不明白为什么会这样。我希望有一个人可以帮助我。

谢谢

4

0 回答 0