您可以实现自定义层,类似于nn.Linear
:
import math
import torch
from torch import nn
class ElementWiseLinear(nn.Module):
__constants__ = ['n_features']
n_features: int
weight: torch.Tensor
def __init__(self, n_features: int, bias: bool = True) -> None:
super(ElementWiseLinear, self).__init__()
self.n_features = n_features
self.weight = nn.Parameter(torch.Tensor(1, n_features))
if bias:
self.bias = nn.Parameter(torch.Tensor(n_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, input: torch.Tensor) -> torch.Tensor:
output = torch.mul(input, self.weight)
if self.bias is not None:
output += self.bias
return output
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(
self.n_features, self.n_features, self.bias is not None
)
并像这样使用它:
x = torch.rand(3)
layer = ElementWiseLinear(3, bias=False)
output = layer(x)
当然,你让事情变得比这更简单:)