0

我一直在尝试用大约 2000 张图像的数据集在 keras 中训练一个 VGG16 网络。当我在每个 epoch 之后始终达到 2% 左右的非常低的准确度时,就会出现问题,并且它会一直围绕这个数字波动。但是当我用一个简单的 leNet 训练它时,它达到了 99% 左右的高精度。我不知道问题是什么。我的代码中是否存在固有缺陷?更多的深度有时会对您的准确性产生负面影响吗?这两段代码都附在下面。抱歉,我没有训练过程的屏幕截图。

VGG16 代码:

from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.models import Sequential
classifier = Sequential()

classifier.add(Conv2D(64, 3, 3, input_shape = (224,224, 3), activation = 'relu'))
classifier.add(Conv2D(64, 3, 3, activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2,2)))
classifier.add(Conv2D(128, 3, 3, activation = 'relu'))
classifier.add(Conv2D(128, 3, 3, activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2,2)))
classifier.add(Conv2D(256, 3, 3, activation = 'relu'))
classifier.add(Conv2D(256, 3, 3, activation = 'relu'))
classifier.add(Conv2D(256, 3, 3, activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2,2)))
classifier.add(Conv2D(512, 3, 3, activation = 'relu'))
classifier.add(Conv2D(512, 3, 3, activation = 'relu'))
classifier.add(Conv2D(512, 3, 3, activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2,2)))
classifier.add(Conv2D(512, 3, 3, activation = 'relu'))
classifier.add(Conv2D(512, 3, 3, activation = 'relu'))
classifier.add(Conv2D(512, 3, 3, activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2,2)))
classifier.add(Flatten())
classifier.add(Dense(4096, activation = 'relu'))
classifier.add(Dense(4096, activation = 'relu'))
classifier.add(Dense(1000, activation = 'relu'))
classifier.add(Dense(output_dim = 30, activation = 'softmax'))

classifier.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])

from keras.preprocessing.image import ImageDataGenerator

train_datagen = ImageDataGenerator(rescale=1./255,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    validation_split=0.25)

train_generator = train_datagen.flow_from_directory(
    '/home/aslt/signData/top30Classes',
    target_size=(224, 224),
    batch_size= 15,
    class_mode='categorical',
    subset='training')

validation_generator = train_datagen.flow_from_directory(
    '/home/aslt/signData/top30Classes',
    target_size=(224, 224),
    batch_size= 15,
    class_mode='categorical',
    subset='validation')

filePath="/home/aslt/modelFiles/callbacksVGGNetWeights.hdf5"
checkpoint = ModelCheckpoint(filePath, monitor='val_acc', verbose=1,     save_best_only=True, mode='max')
classifier.fit_generator(
    train_generator,
    steps_per_epoch = train_generator.samples,
    validation_data = validation_generator, 
    validation_steps = validation_generator.samples,
    epochs = 10,
    callbacks = [checkpoint])



classifier.save('aslModelVGG.h5')

乐网代码:

import keras
from keras.callbacks import ModelCheckpoint
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.models import Sequential
classifier = Sequential()

classifier.add(Conv2D(32, 3, 3, input_shape = (64,64, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2,2)))
classifier.add(Flatten())
classifier.add(Dense(1000, activation = 'relu'))
classifier.add(Dense(output_dim = 30, activation = 'softmax'))

classifier.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])

from keras.preprocessing.image import ImageDataGenerator

train_datagen = ImageDataGenerator(rescale=1./255,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    validation_split=0.25)

train_generator = train_datagen.flow_from_directory(
    '/home/aslt/signData/top30Classes',
    target_size=(64, 64),
    batch_size= 15,
    class_mode='categorical',
    subset='training')

validation_generator = train_datagen.flow_from_directory(
    '/home/aslt/signData/top30Classes',
    target_size=(64, 64),
    batch_size= 15,
    class_mode='categorical',
    subset='validation')
filepath="/home/aslt/modelFiles"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1,     save_best_only=True, mode='max')
classifier.fit_generator(
    train_generator,
    steps_per_epoch = train_generator.samples,
    validation_data = validation_generator, 
    validation_steps = validation_generator.samples,
    epochs = 10,
    callbacks = [checkpoint])



classifier.save('aslModelLeNet.h5')

另外,需要明确的是,我使用的是手语词典中的数据集。我将视频分成图像。目标是能够通过图像分类识别美国手语单词。

4

0 回答 0