0
class pu_fc(nn.Module):

    def __init__(self, input_dim):
        super(pu_fc, self).__init__()
        self.input_dim = input_dim
        
        self.fc1 = nn.Linear(input_dim, 50)
        self.fc2 = nn.Linear(50, 2) 

        self.loss_fn = custom_NLL()

        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        self.bias = torch.autograd.Variable(torch.rand(1,1), requires_grad=True).to(device)

    def forward(self, x):
        out = self.fc1(x)
        out = F.relu(out, inplace=True)
        out = self.fc2(out)
        out[..., 1] = out[..., 1] + self.bias
        print('bias: ', self.bias)

        return out

从代码中可以看出,我想在第二个输出通道中添加一个偏置项。但是,我的实现不起作用。偏置项根本不更新。它在训练期间保持不变,我认为它在训练期间是不可学习的。所以问题是我怎样才能让偏差项变得可学习?是否有可能做到这一点?下面是训练期间偏差的一些输出。任何提示不胜感激,在此先感谢!

bias:  tensor([[0.0930]], device='cuda:0', grad_fn=<CopyBackwards>)
bias:  tensor([[0.0930]], device='cuda:0', grad_fn=<CopyBackwards>)
bias:  tensor([[0.0930]], device='cuda:0', grad_fn=<CopyBackwards>)
bias:  tensor([[0.0930]], device='cuda:0', grad_fn=<CopyBackwards>)
bias:  tensor([[0.0930]], device='cuda:0', grad_fn=<CopyBackwards>)
Current Epoch: 1
Epoch loss:  0.4424589276313782
bias:  tensor([[0.0930]], device='cuda:0', grad_fn=<CopyBackwards>)
bias:  tensor([[0.0930]], device='cuda:0', grad_fn=<CopyBackwards>)
bias:  tensor([[0.0930]], device='cuda:0', grad_fn=<CopyBackwards>)
bias:  tensor([[0.0930]], device='cuda:0', grad_fn=<CopyBackwards>)
bias:  tensor([[0.0930]], device='cuda:0', grad_fn=<CopyBackwards>)
bias:  tensor([[0.0930]], device='cuda:0', grad_fn=<CopyBackwards>)
bias:  tensor([[0.0930]], device='cuda:0', grad_fn=<CopyBackwards>)
bias:  tensor([[0.0930]], device='cuda:0', grad_fn=<CopyBackwards>)
bias:  tensor([[0.0930]], device='cuda:0', grad_fn=<CopyBackwards>)
bias:  tensor([[0.0930]], device='cuda:0', grad_fn=<CopyBackwards>)
Current Epoch: 2
Epoch loss:  0.3476297199726105
bias:  tensor([[0.0930]], device='cuda:0', grad_fn=<CopyBackwards>)
bias:  tensor([[0.0930]], device='cuda:0', grad_fn=<CopyBackwards>)
bias:  tensor([[0.0930]], device='cuda:0', grad_fn=<CopyBackwards>)
bias:  tensor([[0.0930]], device='cuda:0', grad_fn=<CopyBackwards>)
bias:  tensor([[0.0930]], device='cuda:0', grad_fn=<CopyBackwards>)
bias:  tensor([[0.0930]], device='cuda:0', grad_fn=<CopyBackwards>)
4

1 回答 1

1

bias应该是nn.Parameter一个. 作为参数意味着它会model.parameters()在调用时显示并自动传输到指定的设备model.to(device)

self.bias = nn.Parameter(torch.rand(1,1))

注意:不要使用Variable,它已被 2 多年前发布的 PyTorch 0.4.0 弃用,并且它的所有功能都已合并到张量中。

于 2020-06-20T14:30:27.803 回答