0

我想训练一个带有 Early Stopping 的 CNN,并希望使用 f1-metric 作为停止标准。当我为 CNN 模型编译代码时,我收到 a TypeErroras 错误消息。我仍在使用 Tensorflow 1.4 想避免升级到 2.0,因为我记得我以前的代码不再工作了。

错误信息如下:


TypeError                                 Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/sklearn/utils/validation.py in _num_samples(x)
158     try:
--> 159         return len(x)
160     except TypeError:

14 frames
/usr/local/lib/python3.6/dist- 
packages/tensorflow_core/python/framework/ops.py in __len__(self)
740                     "Please call `x.shape` rather than `len(x)` for "
--> 741                     "shape information.".format(self.name))
742 

TypeError: len is not well defined for symbolic Tensors. (dense_16_target:0) Please call `x.shape` rather than `len(x)` for shape information.

During handling of the above exception, another exception occurred:

TypeError                                 Traceback (most recent call last)
<ipython-input-44-cd3da16e057c> in <module>()
----> 1 model = model_cnn(False,False, False,True,6, 0.2,  0.5)
  2 X_train, X_val,  y_train, y_val =  split_data(X_train, y_train,1)
  3 cnn, ep = train_model_es(model, X_train, y_train, X_val, y_val, X_test, y_test, 50, 500,1)

<ipython-input-42-d275d9c69c03> in model_cnn(spat, extra_pool, avg_pool, cw, numb_conv, drop_conv, drop_dense)
 36   if cw == True:
 37     print("sparse categorical crossentropy")
---> 38     model.compile(loss="sparse_categorical_crossentropy", optimizer=Adam(), metrics=['accuracy', f1_metric])
 39     #model.compile(loss="sparse_categorical_crossentropy", optimizer=Adam(), metrics=['accuracy'])
 40     print("nothing")

/usr/local/lib/python3.6/dist-packages/keras/engine/training.py in compile(self, optimizer, loss, metrics, loss_weights, sample_weight_mode, weighted_metrics, target_tensors, **kwargs)
452                 output_metrics = nested_metrics[i]
453                 output_weighted_metrics = nested_weighted_metrics[i]
--> 454                 handle_metrics(output_metrics)
455                 handle_metrics(output_weighted_metrics, weights=weights)
456 

/usr/local/lib/python3.6/dist-packages/keras/engine/training.py in handle_metrics(metrics, weights)
421                     metric_result = weighted_metric_fn(y_true, y_pred,
422                                                        weights=weights,
--> 423                                                        mask=masks[i])
424 
425                 # Append to self.metrics_names, self.metric_tensors,

/usr/local/lib/python3.6/dist-packages/keras/engine/training_utils.py in weighted(y_true, y_pred, weights, mask)
426         """
427         # score_array has ndim >= 2
--> 428         score_array = fn(y_true, y_pred)
429         if mask is not None:
430             # Cast the mask to floatX to avoid float64 upcasting in Theano

<ipython-input-9-b21dc3bd89a6> in f1_metric(y_test, y_pred)
  1 def f1_metric(y_test, y_pred):
----> 2   f1 = f1_score(y_test, y_pred, average='macro')
  3   return f1

/usr/local/lib/python3.6/dist-packages/sklearn/metrics/_classification.py in f1_score(y_true, y_pred, labels, pos_label, average, sample_weight, zero_division)
   1097                        pos_label=pos_label, average=average,
   1098                        sample_weight=sample_weight,
-> 1099                        zero_division=zero_division)
   1100 
   1101 

/usr/local/lib/python3.6/dist-packages/sklearn/metrics/_classification.py in fbeta_score(y_true, y_pred, beta, labels, pos_label, average, sample_weight, zero_division)
   1224                                                  warn_for=('f-score',),
   1225                                                  sample_weight=sample_weight,
-> 1226                                                  zero_division=zero_division)
   1227     return f
   1228 

/usr/local/lib/python3.6/dist-packages/sklearn/metrics/_classification.py in precision_recall_fscore_support(y_true, y_pred, beta, labels, pos_label, average, warn_for, sample_weight, zero_division)
   1482         raise ValueError("beta should be >=0 in the F-beta score")
   1483     labels = _check_set_wise_labels(y_true, y_pred, average, labels,
-> 1484                                     pos_label)
   1485 
   1486     # Calculate tp_sum, pred_sum, true_sum ###

/usr/local/lib/python3.6/dist-packages/sklearn/metrics/_classification.py in _check_set_wise_labels(y_true, y_pred, average, labels, pos_label)
   1299                          str(average_options))
   1300 
-> 1301     y_type, y_true, y_pred = _check_targets(y_true, y_pred)
   1302     present_labels = unique_labels(y_true, y_pred)
   1303     if average == 'binary':

/usr/local/lib/python3.6/dist-packages/sklearn/metrics/_classification.py in _check_targets(y_true, y_pred)
 78     y_pred : array or indicator matrix
 79     """
---> 80     check_consistent_length(y_true, y_pred)
 81     type_true = type_of_target(y_true)
 82     type_pred = type_of_target(y_pred)

/usr/local/lib/python3.6/dist-packages/sklearn/utils/validation.py in check_consistent_length(*arrays)
206     """
207 
--> 208     lengths = [_num_samples(X) for X in arrays if X is not None]
209     uniques = np.unique(lengths)
210     if len(uniques) > 1:

/usr/local/lib/python3.6/dist-packages/sklearn/utils/validation.py in <listcomp>(.0)
206     """
207 
--> 208     lengths = [_num_samples(X) for X in arrays if X is not None]
209     uniques = np.unique(lengths)
210     if len(uniques) > 1:

/usr/local/lib/python3.6/dist-packages/sklearn/utils/validation.py in _num_samples(x)
159         return len(x)
160     except TypeError:
--> 161         raise TypeError(message)
162 
163 

TypeError: Expected sequence or array-like, got <class 'tensorflow.python.framework.ops.Tensor'>

这是相关的代码:

def f1_metric(y_test, y_pred):
   f1 = f1_score(y_test, y_pred, average='macro')
   return f1




def train_model_es(model, X, y, X_val, y_val, X_test, y_test):
   es = EarlyStopping(monitor='f1_metric', mode='max', patience=20, restore_best_weights=True)
   y = np.argmax(y, axis=1)
   y_val = np.argmax(y_val, axis=1)
   y_test = np.argmax(y_test, axis=1)
   class_weights = class_weight.compute_class_weight('balanced', np.unique(y), y)
   class_weights = dict(enumerate(class_weights))
   history = model.fit(X, y, class_weight=class_weights, batch_size=32,epochs=20, verbose=1, 
                  validation_data=(X_val, y_val), callbacks=[es])



def model_cnn():
  model = Sequential()
  model.add(Conv2D(32, kernel_size=(3,3), input_shape=(28,28,1),padding='same'))
  model.add(BatchNormalization())
  model.add(ELU())
  model.add(Conv2D(32, kernel_size=(3,3), padding='same'))
  model.add(BatchNormalization())
  model.add(ELU())
  model.add(MaxPooling2D(pool_size=(2,2)))
  model.add(Dropout(0.2))
  model.add(Flatten())
  model.add(Dense(256))
  model.add(BatchNormalization())
  model.add(ELU())
  model.add(Dropout(0.5))
  model.add(Dense(10, activation='softmax'))
  model.compile(loss="sparse_categorical_crossentroy",optimizer=Adam(),metrics=["accuracy",f1_metric])
  return model

有没有人有关于如何修复此错误消息的提示。

非常感谢每一个提示

4

1 回答 1

0

正如错误消息所示,

错误 1)您正在对符号张量执行 len() 操作。您不能对符号张量执行该操作。您可以在此处找到变量张量和符号张量之间的区别。

错误 2)您正在使用张量进行需要数组作为输入的操作。您能否将 y_true 和 y_pred 从张量转换为数组并用于 f1_score 和其他操作。

示例 -将张量转换为数组

%tensorflow_version 1.x
print(tf.__version__)
import tensorflow as tf
import numpy as np

x = tf.constant([1,2,3,4,5,6])
print("Type of x:",x)

with tf.Session() as sess:
  y = np.array(x.eval())
  print("Type of y:",y.shape,y)

输出 -

1.15.2
Type of x: Tensor("Const_24:0", shape=(6,), dtype=int32)
Type of y: (6,) [1 2 3 4 5 6]
于 2020-04-09T10:31:21.957 回答