2

我正在尝试 pytorch 中的 impelement 代码,但出现以下错误。我的 python 版本是 3.6,我的操作系统是 linux ubuntu 16.04 lts。我在 mac os 旁边安装了我的 linux。我们将使用 torchvision 和 torch.utils.data 包来加载数据。每个类有 75 个验证图像。

OSError                                   Traceback (most recent call    last)
<ipython-input-4-e0e3a841f698> in <module>()
     62 
 63 # Get a batch of training data
---> 64 inputs, classes = next(iter(dset_loaders['train']))
 65 
 66 # Make a grid from batch

/home/zeinab/anaconda3/lib/python3.6/site-packages/torch/utils/data/dataloader.py in __next__(self)
172                 self.reorder_dict[idx] = batch
173                 continue
--> 174             return self._process_next_batch(batch)
175 
176     next = __next__  # Python 2 compatibility

/home/zeinab/anaconda3/lib/python3.6/site-packages/torch/utils/data/dataloader.py in _process_next_batch(self, batch)
196         self._put_indices()
197         if isinstance(batch, ExceptionWrapper):
--> 198             raise batch.exc_type(batch.exc_msg)
199         return batch
200 

OSError: Traceback (most recent call last):
  File "/home/zeinab/anaconda3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 32, in _worker_loop
samples = collate_fn([dataset[i] for i in batch_indices])
  File "/home/zeinab/anaconda3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 32, in <listcomp>
samples = collate_fn([dataset[i] for i in batch_indices])
  File "/home/zeinab/anaconda3/lib/python3.6/site-packages/torchvision-0.1.7-py3.6.egg/torchvision/datasets/folder.py", line 57, in __getitem__
img = self.loader(os.path.join(self.root, path))
  File "/home/zeinab/anaconda3/lib/python3.6/site-packages/torchvision-0.1.7-py3.6.egg/torchvision/datasets/folder.py", line 38, in default_loader
return Image.open(path).convert('RGB')
  File "/home/zeinab/anaconda3/lib/python3.6/site-packages/PIL/Image.py", line 2349, in open
% (filename if filename else fp))
OSError: cannot identify image file 'hymenoptera_data/train/ants/._154124431_65460430f2.jpg'

我的代码如下:

%matplotlib inline
# License: BSD
# Author: Sasank Chilamkurthy

from __future__ import print_function, division

import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import copy
import os
from PIL import Image
import os, sys

plt.ion()   # interactive mode
# Data augmentation and normalization for training 
# Just normalization for validation
data_transforms = {
    'train': transforms.Compose([
        transforms.RandomSizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
    'val': transforms.Compose([
    transforms.Scale(256),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}

 data_dir = 'hymenoptera_data'
dsets = {x: datasets.ImageFolder(os.path.join(data_dir, x),         data_transforms[x])
     for x in ['train', 'val']}
dset_loaders = {x: torch.utils.data.DataLoader(dsets[x], batch_size=4,
                                           shuffle=True, num_workers=4)
            for x in ['train', 'val']}
dset_sizes = {x: len(dsets[x]) for x in ['train', 'val']}
dset_classes = dsets['train'].classes

use_gpu = torch.cuda.is_available()
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))

mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
plt.imshow(inp)
if title is not None:
    plt.title(title)
plt.pause(0.001)  # pause a bit so that plots are updated


# Get a batch of training data
inputs, classes = next(iter(dset_loaders['train']))

# Make a grid from batch
out = torchvision.utils.make_grid(inputs)

imshow(out, title=[dset_classes[x] for x in classes])
4

1 回答 1

0

我猜图片或包含图片的文件夹是在 MacOS 中创建的,而您在 Windows 环境中使用枕头。手动创建文件夹(如“train/ants”)并将图像复制到新文件夹即可解决。

于 2021-02-23T01:41:34.990 回答