1

我正在尝试contractive autoencoder在 Pytorch 中创建一个。我找到了这个线程并根据它进行了尝试。这是我根据提到的线程编写的片段:

import datetime
import numpy as np 
import torch
import torchvision
from torchvision import datasets, transforms
from torchvision.utils import save_image, make_grid
import torch.nn as nn 
import torch.nn.functional as F 
import torch.optim as optim
import matplotlib.pyplot as plt 
%matplotlib inline

dataset_train = datasets.MNIST(root='MNIST',
                               train=True,
                               transform = transforms.ToTensor(),
                               download=True)
dataset_test  = datasets.MNIST(root='MNIST', 
                               train=False, 
                               transform = transforms.ToTensor(),
                               download=True)
batch_size = 128
num_workers = 2
dataloader_train = torch.utils.data.DataLoader(dataset_train,
                                               batch_size = batch_size,
                                               shuffle=True,
                                               num_workers = num_workers, 
                                               pin_memory=True)

dataloader_test = torch.utils.data.DataLoader(dataset_test,
                                               batch_size = batch_size,
                                               num_workers = num_workers,
                                               pin_memory=True)

def view_images(imgs, labels, rows = 4, cols =11):
    imgs = imgs.detach().cpu().numpy().transpose(0,2,3,1)
    fig = plt.figure(figsize=(8,4))
    for i in range(imgs.shape[0]):
        ax = fig.add_subplot(rows, cols, i+1, xticks=[], yticks=[])
        ax.imshow(imgs[i].squeeze(), cmap='Greys_r')
        ax.set_title(labels[i].item())


# now let's view some 
imgs, labels = next(iter(dataloader_train))
view_images(imgs, labels,13,10)

class Contractive_AutoEncoder(nn.Module):
    def __init__(self):
        super().__init__()
        self.encoder = nn.Linear(784, 512)
        self.decoder = nn.Linear(512, 784)

    def forward(self, input):
        # flatten the input
        shape = input.shape
        input = input.view(input.size(0), -1)
        output_e = F.relu(self.encoder(input))
        output = F.sigmoid(self.decoder(output_e))
        output = output.view(*shape)
        return output_e, output

def loss_function(output_e, outputs, imgs, device):
    output_e.backward(torch.ones(output_e.size()).to(device), retain_graph=True)
    criterion = nn.MSELoss()
    assert outputs.shape == imgs.shape ,f'outputs.shape : {outputs.shape} != imgs.shape : {imgs.shape}'
    
    imgs.grad.requires_grad = True 
    loss1 = criterion(outputs, imgs)
    print(imgs.grad)
    loss2 = torch.mean(pow(imgs.grad,2))
    loss = loss1 + loss2 
    return loss 

epochs = 50 
interval = 2000
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = Contractive_AutoEncoder().to(device)
optimizer = optim.Adam(model.parameters(), lr =0.001)

for e in range(epochs):
    for i, (imgs, labels) in enumerate(dataloader_train):
        imgs = imgs.to(device)
        labels = labels.to(device)

        outputs_e, outputs = model(imgs)
        loss = loss_function(outputs_e, outputs, imgs,device)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if i%interval: 
            print('')

    print(f'epoch/epoechs: {e}/{epochs} loss : {loss.item():.4f} ')

为了简洁起见,我只为编码器和解码器使用了一层。显然,无论其中任何一个的层数如何,它都应该起作用!

但这里的问题是,除了我不知道这是否是这样做的正确方法(计算相对于输入的梯度)之外,我得到一个错误,这使得前一个解决方案错误/不适用。

那是:

imgs.grad.requires_grad = True

产生错误:

AttributeError:“NoneType”对象没有属性“requires_grad”

我还尝试了该线程中建议的第二种方法,如下所示:

class Contractive_Encoder(nn.Module):
    def __init__(self):
        super().__init__()
        self.encoder = nn.Linear(784, 512)
        
    def forward(self, input):
        # flatten the input
        input = input.view(input.size(0), -1)
        output_e = F.relu(self.encoder(input))
        return output_e

class Contractive_Decoder(nn.Module):
    def __init__(self):
        super().__init__()
        self.decoder = nn.Linear(512, 784)

    def forward(self, input):
        # flatten the input
        output = F.sigmoid(self.decoder(input))
        output = output.view(-1,1,28,28)
        return output


epochs = 50 
interval = 2000
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

model_enc = Contractive_Encoder().to(device)
model_dec = Contractive_Decoder().to(device)

optimizer = optim.Adam([{"params":model_enc.parameters()},
                        {"params":model_dec.parameters()}], lr =0.001)

optimizer_cond = optim.Adam(model_enc.parameters(), lr = 0.001)

criterion = nn.MSELoss()

for e in range(epochs):
    for i, (imgs, labels) in enumerate(dataloader_train):
        imgs = imgs.to(device)
        labels = labels.to(device)

        outputs_e = model_enc(imgs)
        outputs = model_dec(outputs_e)
        loss_rec = criterion(outputs, imgs)
        optimizer.zero_grad()
        loss_rec.backward()
        optimizer.step()

        imgs.requires_grad_(True)
        y = model_enc(imgs)
        optimizer_cond.zero_grad()
        y.backward(torch.ones(imgs.view(-1,28*28).size()))

        imgs.grad.requires_grad = True
        loss = torch.mean([pow(imgs.grad,2)])
        optimizer_cond.zero_grad()
        loss.backward()
        optimizer_cond.step()
        
        if i%interval: 
            print('')

    print(f'epoch/epoechs: {e}/{epochs} loss : {loss.item():.4f} ')

但我面临错误:

RuntimeError: invalid gradient at index 0 - got [128, 784] but expected shape compatible with [128, 512]

我应该如何在 Pytorch 中解决这个问题?

4

2 回答 2

1

概括

我写的收缩损失的最终实现如下:

def loss_function(output_e, outputs, imgs, lamda = 1e-4, device=torch.device('cuda')):

    criterion = nn.MSELoss()
    assert outputs.shape == imgs.shape ,f'outputs.shape : {outputs.shape} != imgs.shape : {imgs.shape}'
    loss1 = criterion(outputs, imgs)

    output_e.backward(torch.ones(outputs_e.size()).to(device), retain_graph=True)    
    # Frobenious norm, the square root of sum of all elements (square value)
    # in a jacobian matrix 
    loss2 = torch.sqrt(torch.sum(torch.pow(imgs.grad,2)))
    imgs.grad.data.zero_()
    loss = loss1 + (lamda*loss2) 
    return loss 

在训练循环中,您需要执行以下操作:

for e in range(epochs):
    for i, (imgs, labels) in enumerate(dataloader_train):
        imgs = imgs.to(device)
        labels = labels.to(device)

        imgs.retain_grad()
        imgs.requires_grad_(True)

        outputs_e, outputs = model(imgs)
        loss = loss_function(outputs_e, outputs, imgs, lam,device)

        imgs.requires_grad_(False)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    print(f'epoch/epochs: {e}/{epochs} loss: {loss.item():.4f}')

完整解释

事实证明,@akshayk07 在评论中正确地指出,在 Pytorch 论坛中发现的实现在多个地方都是错误的。值得注意的是,它没有实现收缩自动编码器中引入的实际收缩损失:特征提取期间的显式不变性!除此之外,由于显而易见的原因,该实现根本不起作用,稍后将解释。

变化是显而易见的,所以我试着解释这里发生了什么。首先注意imgs不是叶子节点,所以梯度不会保留在图像.grad属性中。

为了保留非叶节点的梯度,您应该使用retain_graph(). grad仅针对叶张量填充。也imgs.retain_grad()应该在执行之前调用,forward()因为它会指示autograd将毕业生存储到非叶节点中。

更新

感谢@Michael 指出 Frobenius Norm 的正确计算实际上是(来自ScienceDirect):

所有矩阵项的平方和的平方根

不是

所有矩阵条目的绝对值之和的平方根,如此处所述

于 2019-10-07T08:17:19.400 回答
0

实现收缩自动编码器的主要挑战是计算雅可比的 Frobenius 范数,这是代码或瓶颈层(向量)相对于输入层(向量)的梯度。这是损失函数中的正则化项。幸运的是,您为我解决了这个问题。谢谢!您在第一学期使用 MSE 损失。有时使用交叉熵损失代替。值得考虑。我认为您几乎可以使用 Frobenius 范数,除了您需要取雅可比行列式平方和的平方根,您正在计算绝对值之和的平方根。这是我定义损失函数的方式(抱歉,我稍微更改了符号以保持自己的直截了当):

def cae_loss_fcn(code, img_out, img_in, lamda=1e-4, device=torch.device('cuda')):

    # First term in the loss function, for ensuring representational fidelity
    criterion=nn.MSELoss()
    assert img_out.shape == img_in.shape, f'img_out.shape : {img_out.shape} != img_in.shape : {img_in.shape}'
    loss1 = criterion(img_out, img_in)

    # Second term in the loss function, for enforcing contraction of representation
    code.backward(torch.ones(code.size()).to(device), retain_graph=True)
    # Frobenius norm of Jacobian of code with respect to input image
    loss2 = torch.sqrt(torch.sum(torch.pow(img_in.grad, 2))) # THE CORRECTION
    img_in.grad.data.zero_()

    # Total loss, the sum of the two loss terms, with weight applied to second term
    loss = loss1 + (lamda*loss2)

    return loss
于 2019-11-06T17:15:19.640 回答