0

请我真的需要你的帮助,我正在尝试训练 EfficientNet,但我收到一条错误消息;ValueError:(x图像张量)和y(标签)应该具有相同的长度。找到:x.shape = (2205, 240, 240, 3), y.shape = (552, 240, 240, 3)

这是我下面的代码

#data.shape is (2757, 240, 240, 3)
#labels.shape is (2757, 2)
train_data, train_labels, test_data, test_labels = train_test_split(data, labels, test_size=0.2,
                                                                    stratify=labels, random_state=42)
#construct the training image generator for data augmentation
datagen = ImageDataGenerator(rotation_range=20,
                            zoom_range=0.15,
                            width_shift_range=0.2,
                            height_shift_range=0.2,
                            shear_range=0.15,
                            horizontal_flip=True,
                            fill_mode='nearest')
datagen.fit(train_data)
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Dense
from tensorflow.keras.initializers import GlorotUniform

seed = 42
BS = 32
INIT_lr = 0.0001
num_epoch = 30

baseModel = EfficientNetB1(weights='imagenet', #load pre-trained weights on imagenet
                          include_top=False, #making sure the top layer is left off
                          input_tensor=Input(shape=(240,240,3)))

#construct the head of the model that will be placed on top of the base model
headModel = baseModel.output
headModel = GlobalAveragePooling2D()(headModel)
headModel = BatchNormalization()(headModel)
headModel = Dense(1280, activation='relu', kernel_initializer=GlorotUniform(seed),
                  bias_initializer='zeros')(headModel)
headModel = BatchNormalization()(headModel)
headModel = Dense(2, activation='softmax', kernel_initializer='random_uniform', 
                 bias_initializer='zeros')(headModel)

#place the head model on top of the base model
model = Model(inputs=baseModel.input, outputs=headModel)

for layer in baseModel.layers:
    layer.trainable = False
print('Compiling the model'+'...'*5)

opt = tf.keras.optimizers.Adam(INIT_lr) #optimizer
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])

print('Training the model'+'...'*5)

History = model.fit(datagen.flow(train_data, train_labels, batch_size=BS),
                    validation_data = (test_data, test_labels),
                    steps_per_epoch=len(train_data), epochs=num_epoch)
4

0 回答 0