0

我正在做一个二进制分类问题,总共有 440 张图像。我正在为每个批次使用“train_on_batch”函数训练 CNN。我知道 train_on_batch 用于大型数据集,但我在小型数据集上使用它以进行测试。我正在训练我的模型 (ResNet50V2) 5 个时期,批次大小为 32,并且每个时期我都收到相同的批次准确度和相同的批次损失。这可能是什么原因?

数据集结构:

dataset
   with_mask
      220 images
   without_mask
      220 images

文件:train_on_batch.py

# importing libraries
import os
import cv2
from glob import glob
import tensorflow as tf
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.optimizers import Adam
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
import numpy as np
from vgg16_keras import VGG16
import warnings
warnings.filterwarnings('ignore')

def load_data():
    # initialize the data and labels
    data = []
    labels = []
    
    images_list = glob("E:/ai/Mask Detection/dataset/*/*.PNG")
    
    # loop over the input images
    for imagePath in images_list:
        image = cv2.imread(imagePath)
        #image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        image = cv2.resize(image, (224, 224))
        image = img_to_array(image)
        data.append(image)
        
        label = imagePath.split(os.path.sep)[-2]
        labels.append(label)
    print("Data and Labels are ready to use")
    
    data = np.array(data, dtype = "float") / 255.0
    labels = np.array(labels)

    return data, labels   

def optimizer():
    return Adam(lr = 0.001)

def create_cnn():
    model = VGG16.build(224, 224, 3, 1)
    model.compile(loss = "binary_crossentropy", optimizer = optimizer(), metrics = ["accuracy"])
    return model

def get_batch(batch_size, trainX, trainY):
    size = len(trainX)

    n_batch = size // batch_size
    i = 0

    while(i < n_batch):
        batchY = trainY[(i * n_batch):(i * n_batch + batch_size)]
        batchX = trainX[(i * n_batch):(i * n_batch + batch_size)]
        batchX = batchX.reshape(batch_size, 224, 224, 3)
        i += 1
        yield batchX, batchY

def training(epoch = 5, batch_size = 32):
    data, labels = load_data()
    
    # partition the data into training and testing with 80% data to training and 20% to testing 
    (trainX, testX, trainY, testY) = train_test_split(data, labels, test_size = 0.2)

    # convert the labels from integers to vectors
    lb = LabelBinarizer()
    trainY = lb.fit_transform(trainY)
    testY = lb.fit_transform(testY)

    model = create_cnn()
    
    n_epochs = epoch
    for epoch in range(1, n_epochs+1):
        print("=" * 100)
        print("Epoch:{}/{}".format(epoch, n_epochs))
        train_acc = []
        for batchX, batchY in get_batch(batch_size, trainX, trainY):
            loss, acc = model.train_on_batch(batchX, batchY)
            print("batch accuracy: {}, batch loss: {}".format(acc, loss))
        train_acc.append(acc)
    print("Train accuracy", np.mean(train_acc)) 

training(epoch = 5, batch_size=32)

文件:vgg16_keras

# importing libraries
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.layers import Activation, Dense, Flatten, Dropout
from tensorflow.keras import backend as K

class VGG16:    
    @staticmethod
    def build(width, height, depth, classes):

        # initialize the model along with input shape to be "channels last" and the 
        # channels dimensions itself
        model = Sequential()
        input_shape = (height, width, depth)
        if K.image_data_format() == "channels_first":
            input_shape = (depth, height, width)

        # Block 1:  CONV => RELU => CONV => RELU => POOL layer set
        model.add(Conv2D(64, (3, 3), input_shape=input_shape, padding='same'))
        model.add(Activation("relu")) 
        model.add(Conv2D(64, (3, 3), padding='same'))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size = (2,2), strides = (2, 2)))

        # Block 2: CONV => RELU => CONV => RELU => POOL layer set

        model.add(Conv2D(128, (3, 3), padding='same'))
        model.add(Activation("relu")) 
        model.add(Conv2D(128, (3, 3), padding='same'))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size = (2,2), strides = (2, 2)))

        # Block 3: CONV => RELU => CONV => RELU => CONV => RELU => POOL layer set

        model.add(Conv2D(256, (3, 3), padding='same'))
        model.add(Activation("relu")) 
        model.add(Conv2D(256, (3, 3), padding='same'))
        model.add(Activation("relu"))
        model.add(Conv2D(256, (3, 3), padding='same'))
        model.add(Activation("relu")) 
        model.add(MaxPooling2D(pool_size = (2,2), strides = (2, 2)))

        # Block 4: CONV => RELU => CONV => RELU => CONV => RELU => POOL layer set
        model.add(Conv2D(512, (3, 3), padding='same'))
        model.add(Activation("relu")) 
        model.add(Conv2D(512, (3, 3), padding='same'))
        model.add(Activation("relu"))
        model.add(Conv2D(512, (3, 3), padding='same'))
        model.add(Activation("relu")) 
        model.add(MaxPooling2D(pool_size = (2,2), strides = (2, 2)))

        # Block 5: CONV => RELU => CONV => RELU => CONV => RELU => POOL layer set

        model.add(Conv2D(512, (3, 3), padding='same'))
        model.add(Activation("relu")) 
        model.add(Conv2D(512, (3, 3), padding='same'))
        model.add(Activation("relu"))
        model.add(Conv2D(512, (3, 3), padding='same'))
        model.add(Activation("relu")) 
        model.add(MaxPooling2D(pool_size = (2,2), strides = (2, 2)))

        # Block 6: first set of FC => RELU layers

        model.add(Flatten())
        model.add(Dense(4096))
        model.add(Activation("relu"))
        model.add(Dropout(0.5))

        # Block 7: second set of FC => RELU layers

        model.add(Dense(4096))
        model.add(Activation("relu"))
        model.add(Dropout(0.5))

        # Softmax classifier

        model.add(Dense(classes))
        model.add(Activation("softmax"))
        
        return model

输出:

Epoch:1/5
batch accuracy: 0.5625, batch loss: 6.708292007446289
batch accuracy: 0.40625, batch loss: 9.104110717773438
batch accuracy: 0.40625, batch loss: 9.104110717773438
batch accuracy: 0.375, batch loss: 9.583274841308594
batch accuracy: 0.375, batch loss: 9.583274841308594
batch accuracy: 0.3125, batch loss: 10.54160213470459
batch accuracy: 0.34375, batch loss: 10.06243896484375
batch accuracy: 0.4375, batch loss: 8.624947547912598
batch accuracy: 0.53125, batch loss: 7.187456130981445
batch accuracy: 0.625, batch loss: 5.749964714050293
batch accuracy: 0.625, batch loss: 5.749964714050293
====================================================================================================
Epoch:2/5
batch accuracy: 0.5625, batch loss: 6.708292007446289
batch accuracy: 0.40625, batch loss: 9.104110717773438
batch accuracy: 0.40625, batch loss: 9.104110717773438
batch accuracy: 0.375, batch loss: 9.583274841308594
batch accuracy: 0.375, batch loss: 9.583274841308594
batch accuracy: 0.3125, batch loss: 10.54160213470459
batch accuracy: 0.34375, batch loss: 10.06243896484375
batch accuracy: 0.4375, batch loss: 8.624947547912598
batch accuracy: 0.53125, batch loss: 7.187456130981445
batch accuracy: 0.625, batch loss: 5.749964714050293
batch accuracy: 0.625, batch loss: 5.749964714050293
====================================================================================================
Epoch:3/5
batch accuracy: 0.5625, batch loss: 6.708292007446289
batch accuracy: 0.40625, batch loss: 9.104110717773438
batch accuracy: 0.40625, batch loss: 9.104110717773438
batch accuracy: 0.375, batch loss: 9.583274841308594
batch accuracy: 0.375, batch loss: 9.583274841308594
batch accuracy: 0.3125, batch loss: 10.54160213470459
batch accuracy: 0.34375, batch loss: 10.06243896484375
batch accuracy: 0.4375, batch loss: 8.624947547912598
batch accuracy: 0.53125, batch loss: 7.187456130981445
batch accuracy: 0.625, batch loss: 5.749964714050293
batch accuracy: 0.625, batch loss: 5.749964714050293
====================================================================================================
Epoch:4/5
batch accuracy: 0.5625, batch loss: 6.708292007446289
batch accuracy: 0.40625, batch loss: 9.104110717773438
batch accuracy: 0.40625, batch loss: 9.104110717773438
batch accuracy: 0.375, batch loss: 9.583274841308594
batch accuracy: 0.375, batch loss: 9.583274841308594
batch accuracy: 0.3125, batch loss: 10.54160213470459
batch accuracy: 0.34375, batch loss: 10.06243896484375
batch accuracy: 0.4375, batch loss: 8.624947547912598
batch accuracy: 0.53125, batch loss: 7.187456130981445
batch accuracy: 0.625, batch loss: 5.749964714050293
batch accuracy: 0.625, batch loss: 5.749964714050293
====================================================================================================
Epoch:5/5
batch accuracy: 0.5625, batch loss: 6.708292007446289
batch accuracy: 0.40625, batch loss: 9.104110717773438
batch accuracy: 0.40625, batch loss: 9.104110717773438
batch accuracy: 0.375, batch loss: 9.583274841308594
batch accuracy: 0.375, batch loss: 9.583274841308594
batch accuracy: 0.3125, batch loss: 10.54160213470459
batch accuracy: 0.34375, batch loss: 10.06243896484375
batch accuracy: 0.4375, batch loss: 8.624947547912598
batch accuracy: 0.53125, batch loss: 7.187456130981445
batch accuracy: 0.625, batch loss: 5.749964714050293
batch accuracy: 0.625, batch loss: 5.749964714050293
Train accuracy 0.625

如果我在每个 epoch 之后更改学习率它仍然会给我相同的结果。我正在使用 TensorFlow 1.14 版

我需要在自定义批次上使用 train_on_batch 训练分类模型。如果您可以参考一些执行相同操作的示例。

4

1 回答 1

0

我希望现在还不算晚。我认为当你添加acc到向量时train_acc不在'for'里面。

于 2021-05-25T13:37:34.587 回答