0

我正在尝试为数据集(0 类 = 77 个图像,1 类 = 41 个图像)创建一个二进制 CNN 分类器,我想做 5 倍交叉验证。在每一折中,使用验证集保存最佳模型,并共享相同的模型、超参数和训练策略。这是我的结果。

  • fold - 测试集的准确性
  • 折叠 0 - 0.68
  • 折叠1 - 0.71
  • 折叠 2 - 0.91
  • 折叠 3 - 0.96
  • 折叠 4 - 0.64

我的问题是:通过更改超参数进行微调。发现 fold2 和 fold3 每次都表现更好,但 fold0 和 fold4 表现不佳。是什么愿意造成它,我应该怎么做。

可能的问题是每次初始化都是随机的。

谢谢大家的答案。

import os
import torch
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data.sampler import WeightedRandomSampler

import monai
from monai.data import NiftiDataset
from monai.transforms import Compose, AddChannel, ScaleIntensity, RandFlip, RandRotate, ToTensor
from monai.data import CSVSaver

from data_process import read_csv, get_sample_weights


def train(train_file, val_file, stage='exp0'):
    '''
    :param train_file:
    :param val_file:
    :param stage:
    :return:
    '''

    os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'

    img_src_path = '../samples/T1c_images/'  # 

    img_list_train, label_list_train = read_csv(train_file)
    img_list_val, label_list_val = read_csv(val_file)

   
    img_train = [os.path.join(img_src_path, i) for i in img_list_train]
    labels_train = [int(i) for i in label_list_train]

    img_val = [os.path.join(img_src_path, i) for i in img_list_val]
    labels_val = [int(i) for i in label_list_val]
    print('val images: ', len(img_val))

    # Define transforms
    # train_transforms = Compose([ScaleIntensity(), AddChannel(), Resize((182, 218, 182)),  RandRotate90(), ToTensor()])
    # val_transforms = Compose([ScaleIntensity(), AddChannel(),  Resize((182, 218, 182)), ToTensor()])
    train_transforms = Compose([ScaleIntensity(), RandRotate(range_x=45, range_y=45, range_z=45, prob=0.5),
                                RandFlip(prob=0.5, spatial_axis=1),  
                                AddChannel(), ToTensor()])  # if x=y=z   RandRotate90()
    val_transforms = Compose([ScaleIntensity(), AddChannel(), ToTensor()])

    train_ds = NiftiDataset(image_files=img_train, labels=labels_train, transform=train_transforms, image_only=False)
    train_loader = DataLoader(train_ds, batch_size=4, shuffle=True, num_workers=2,
                              pin_memory=torch.cuda.is_available())

    # create a validation data_process loader
    val_ds = NiftiDataset(image_files=img_val, labels=labels_val, transform=val_transforms, image_only=False)
    val_loader = DataLoader(val_ds, batch_size=4, num_workers=2, pin_memory=torch.cuda.is_available())

    # Create DenseNet121, CrossEntropyLoss and Adam optimizer
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2).to(device)
    model = torch.nn.DataParallel(model)
    loss_function = torch.nn.CrossEntropyLoss(weight=torch.Tensor([1, 1.2])).cuda()
    optimizer = torch.optim.Adam(model.parameters(), 1e-5)

    # start a typical PyTorch training
    epochs = 50
    val_interval = 1
    best_metric = -1
    best_metric_epoch = -1
    writer = SummaryWriter()
    for epoch in range(epochs):
        print("-" * 10)
        print(f"epoch {epoch + 1}/{epochs}")
        model.train()
        epoch_loss = 0
        step = 0
        t_metric_count = 0
        t_num_correct = 0
        for batch_data in train_loader:
            step += 1
            # ptrint images name
            # print('image name', batch_data[2]['filename_or_obj'])
            inputs = batch_data[0].to(device)
            # print(inputs.shape)
            labels = batch_data[1].to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = loss_function(outputs, labels)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
            epoch_len = len(train_ds) // train_loader.batch_size
            # train acc
            t_value = torch.eq(outputs.argmax(dim=1), labels)
            t_metric_count += len(t_value)  # 
            t_num_correct += t_value.sum().item()  # 
           
            # print(f"{step}/{epoch_len}, train_loss: {loss.item():.4f}")
        epoch_loss /= step
        t_metric = t_num_correct / t_metric_count
        writer.add_scalar("train_loss", epoch_loss, epoch + 1)
        writer.add_scalar("train_acc", t_metric, epoch + 1)

        print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}")

        if (epoch + 1) % val_interval == 0:
            model.eval()
            with torch.no_grad():
                num_correct = 0.0
                metric_count = 0
                for val_data in val_loader:
                    val_images, val_labels = val_data[0].to(device), val_data[1].to(device)
                    val_outputs = model(val_images)
                    value = torch.eq(val_outputs.argmax(dim=1), val_labels)
                    metric_count += len(value)  # 
                    num_correct += value.sum().item()  # 
                metric = num_correct / metric_count
             
                if metric > best_metric:
                    best_metric = metric
                    best_metric_epoch = epoch + 1
                    save_path = 'checkpoint_07201/' + stage + '_' + str(epoch + 1) + "_best_metric_model.pth"
                    torch.save(model.state_dict(), save_path)
                    print("saved new best metric model")
                
                print(
                    "current epoch: {} current accuracy: {:.4f} best val accuracy: {:.4f} at epoch {}".format(
                        epoch + 1, metric, best_metric, best_metric_epoch
                    ))
                print('current train accuracy: {:.4f}, num_correct: {}, num_count:{}'.
                      format(t_metric, t_num_correct, t_metric_count ))
                    
                writer.add_scalar("val_accuracy", metric, epoch + 1)
    print(f"train completed, best_metric: {best_metric:.4f} at epoch: {best_metric_epoch}")
    writer.close()

if __name__ == "__main__":
    # 5 folder
        for i in range(5):
            folder = 'exp'+str(i)
            train_path = './data/'+ folder +'/train.csv'
            val_path = './data/'+ folder + '/val.csv'
            train(train_path, val_path, stage=folder)
4

0 回答 0