1

我的代码在 Epoch 5 之前运行良好,我看到此警告消息 ( WARNING:tensorflow:Early stopping conditioned on metric `val_prc` which is not available. Available metrics are: loss,tp,fp,tn,fn,accuracy,precision,recall,auc,prc WARNING:tensorflow:Can save best model only with val_prc available, skipping.) 后跟错误

InvalidArgumentError:  assertion failed: [predictions must be >= 0] [Condition x >= y did not hold element-wise:] [x (model/output_layer/Sigmoid:0) = ] [[-nan][-nan][-nan]...] [y (metrics/tp/Cast_2/x:0) = ] [0]
     [[{{node metrics/tp/assert_greater_equal/Assert/AssertGuard/else/_270/Assert}}]] [Op:__inference_distributed_function_19774]

Function call stack:
distributed_function

这就是我的初始代码的样子。

def make_model(output_bias=None):
    #first input layer
    n_features=1
    vocab_size = max(mapping.values())  + 1
    click = Input(shape=(max_length_session,),name="input_event")
    embed_layer = Embedding(input_dim=vocab_size, output_dim=8, input_length=max_length_session,mask_zero=True,name="embed_layer")(click)
    layer11 = LSTM(128, activation='tanh', return_sequences=True,name="embed_layer1")(embed_layer)
    layer11_dropout = Dropout(0.1, name="embed_layer_drop1")(layer11)
    layer12 = LSTM(64, activation='tanh', return_sequences=False, name="embed_layer2")(layer11_dropout)
    #second input layer##########################
    duration = Input(shape=(max_length_session, n_features), name="input_time")
    layer21 = LSTM(128, activation='tanh', return_sequences=True, name="time_layer1")(duration)
    layer21_dropout = Dropout(0.1, name="dropout_time")(layer21)
    layer22 = LSTM(64,activation="relu", return_sequences=False, name="time_layer2")(layer21_dropout)
    #############third layer##############
    feats = Input(shape=(n_tran_feats,), name="feat_layer")
    layer31 = Embedding(input_dim=vocab_size, output_dim=4, input_length=1,name="embed_layer_feature")(feats)
    layer31 = Flatten()(layer31)
    ############################################
    layer_concat = concatenate([layer12,layer22,layer31])
    ###########################################
    layer_dense1 = Dense(64,activation="relu",name="concat_dense1")(layer_concat)
    if output_bias is not None:
        layer_dense2 = Dense(1,activation="sigmoid",bias_initializer=tf.keras.initializers.Constant(output_bias),
                             name="output_layer")(layer_dense1)
    else:
        layer_dense2 = Dense(1,activation="sigmoid",name="output_layer")(layer_dense1)     
    ####################################
    model = Model(inputs=[click,duration,feats], outputs=layer_dense2)
    ##########################
    METRICS = [
      tf.keras.metrics.TruePositives(name='tp'),
      tf.keras.metrics.FalsePositives(name='fp'),
      tf.keras.metrics.TrueNegatives(name='tn'),
      tf.keras.metrics.FalseNegatives(name='fn'), 
      tf.keras.metrics.BinaryAccuracy(name='accuracy'),
      tf.keras.metrics.Precision(name='precision'),
      tf.keras.metrics.Recall(name='recall'),
      tf.keras.metrics.AUC(name='auc'),
      tf.keras.metrics.AUC(name='prc', curve='PR'), # precision-recall curve
    ]
   
    ################################
    adam = tf.keras.optimizers.Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False, clipnorm=1.)
    model.compile(loss="binary_crossentropy", optimizer=adam, metrics=METRICS)
    return model

total = len(y_train)
pos= (y_train==1).sum()
neg=(y_train==0).sum()
initial_bias = np.log([pos/neg])

weighted_model = make_model(output_bias=initial_bias)
callback_early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_prc',mode="max",verbose=1, patience=5,restore_best_weights=True)
mc = tf.keras.callbacks.ModelCheckpoint('best_model_lstm_weighted_v2.h5', monitor='val_prc', mode='max', save_best_only=True,save_freq='epoch')
baseline_history = weighted_model.fit(x_train,y_train,
                           validation_data=(x_valid, y_valid),
                           batch_size=32, 
                           epochs=100,verbose=1,steps_per_epoch=1700,validation_steps=190, 
                           callbacks=[callback_early_stop,mc],
                           shuffle=True,
                  class_weight=class_weights)```

我看过这个问题。它不适用于我的情况,直到一个月前,我的代码一直运行良好,没有任何错误或警告,我没有更改代码和数据中的任何内容。我正在使用张量流版本“2.1.3”

4

0 回答 0