我有一个预定义 nz=10, ngf= 64 的 python 测试代码
def test_Generator_shapes():
nz = 10
netG = Generator(nz, ngf=64, nc=1)
batch_size = 32
noise = torch.randn(batch_size, nz, 1, 1)
out = netG(noise, verbose=True)
assert out.shape == torch.Size([batch_size, 1, 28, 28]), f"Bad shape of out: out.shape={out.shape}"
print('Success')
test_Generator_shapes()
现在我需要重置隐藏层和其他参数才能输出大小为 28x28 的图像,即 -torch.Size([batch_size, 1, 28, 28])
请有人建议我应该在以下代码中进行哪些更改,以便能够生成 28x28 而不是 64x64 目前的图像
class Generator(nn.Module):
def __init__(self, nz=10, ngf=28, nc=1, ndf=28):
"""GAN generator.
Args:
nz: Number of elements in the latent code.
ngf: Base size (number of channels) of the generator layers.
nc: Number of channels in the generated images.
"""
ngf=28
super(Generator, self).__init__()
self.ngpu = 0
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d( ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d( ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 64 x 64
)
# YOUR CODE HERE
#raise NotImplementedError()
def forward(self, z, verbose=False):
"""Generate images by transforming the given noise tensor.
Args:
z of shape (batch_size, nz, 1, 1): Tensor of noise samples. We use the last two singleton dimensions
so that we can feed z to the generator without reshaping.
verbose (bool): Whether to print intermediate shapes (True) or not (False).
Returns:
out of shape (batch_size, nc, 28, 28): Generated images.
"""
# YOUR CODE HERE
x = self.main(z)
print (x.size())
return x
#raise NotImplementedError()