0

我正在尝试使用image_size = 256(使用 PyTorch)实现 DCGAN。我已经实现了生成器和鉴别器代码,如下所示:

class Generator(nn.Module):
def init(self, ngpu):
super(Generator, self).init()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d( nz, ngf * 32, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 32),
nn.ReLU(True),
# state size. (ngf32) x 4 x 4
nn.ConvTranspose2d( ngf * 32, ngf * 16, 4, 2, 0, bias=False),
nn.BatchNorm2d(ngf * 16),
nn.ReLU(True),
# state size. (ngf16) x 8 x 8
nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf8) x 16 x 16
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf4) x 32 x 32
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 64 x 64
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 128 x 128
nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 256 x 256
)

class Discriminator(nn.Module):
def init(self, ngpu):
super(Discriminator, self).init()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is (nc) x 256 x 256
nn.Conv2d(nc, ndf, 4, stride=2, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 128 x 128
nn.Conv2d(ndf, ndf * 2, 4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf2) x 64 x 64
nn.Conv2d(ndf * 2, ndf * 4, 4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf4) x 32 x 32
nn.Conv2d(ndf * 4, ndf * 8, 4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf8) x 16 x 16
nn.Conv2d(ndf * 8, ndf * 16, 4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(ndf * 16),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf6) x 8 x 8
nn.Conv2d(ndf * 16, ndf * 32, 4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(ndf * 32),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*32) x 4 x 4
nn.Conv2d(ndf * 32, 1, 4, stride=1, padding=0, bias=False),
nn.Sigmoid()
# state size. 1
)

在 GAN 论坛上阅读了一些内容后,我发现它batch_size一定很低,考虑到我使用的是具有4GB内存的GTX 1050 Ti(实际上,我的变量设置为)。 batch_size5

我发现错误:

RuntimeError: CUDA error: out of memory

有人可以帮我吗?

更新 2:调整代码大小

# Create the dataset
dataset = dset.ImageFolder(root=dataroot,
                       transform=transforms.Compose([
                           transforms.Resize(image_size),
                           transforms.CenterCrop(image_size),
                           transforms.ToTensor(),
                           transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                       ]))
# Create the dataloader
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
                                     shuffle=True, num_workers=workers)
4

0 回答 0