1

我正在使用 Pytorch 解决 LSTM 编码器-解码器序列到序列预测问题。作为第一步,我想从多元输入预测二维轨迹(轨迹 x,轨迹 y) - 二维或更多(轨迹 x、轨迹 y、速度、旋转等)

我正在关注以下笔记本(链接):

带有注意力的 seq2seq

这里摘录(编码器,解码器,注意):

class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size, n_layers=1, dropout=0.1):
    super(EncoderRNN, self).__init__()

    self.input_size = input_size
    self.hidden_size = hidden_size
    self.n_layers = n_layers
    self.dropout = dropout

    self.embedding = nn.Embedding(input_size, hidden_size)
    self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=self.dropout, bidirectional=True)

def forward(self, input_seqs, input_lengths, hidden=None):
    # Note: we run this all at once (over multiple batches of multiple sequences)
    embedded = self.embedding(input_seqs)
    packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, input_lengths)
    outputs, hidden = self.gru(packed, hidden)
    outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(outputs) # unpack (back to padded)
    outputs = outputs[:, :, :self.hidden_size] + outputs[:, : ,self.hidden_size:] # Sum bidirectional outputs
    return outputs, hidden


class LuongAttnDecoderRNN(nn.Module):
def __init__(self, attn_model, hidden_size, output_size, n_layers=1, dropout=0.1):
    super(LuongAttnDecoderRNN, self).__init__()

    # Keep for reference
    self.attn_model = attn_model
    self.hidden_size = hidden_size
    self.output_size = output_size
    self.n_layers = n_layers
    self.dropout = dropout

    # Define layers
    self.embedding = nn.Embedding(output_size, hidden_size)
    self.embedding_dropout = nn.Dropout(dropout)
    self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=dropout)
    self.concat = nn.Linear(hidden_size * 2, hidden_size)
    self.out = nn.Linear(hidden_size, output_size)

    # Choose attention model
    if attn_model != 'none':
        self.attn = Attn(attn_model, hidden_size)

def forward(self, input_seq, last_hidden, encoder_outputs):
    # Note: we run this one step at a time

    # Get the embedding of the current input word (last output word)
    batch_size = input_seq.size(0)
    embedded = self.embedding(input_seq)
    embedded = self.embedding_dropout(embedded)
    embedded = embedded.view(1, batch_size, self.hidden_size) # S=1 x B x N

    # Get current hidden state from input word and last hidden state
    rnn_output, hidden = self.gru(embedded, last_hidden)

    # Calculate attention from current RNN state and all encoder outputs;
    # apply to encoder outputs to get weighted average
    attn_weights = self.attn(rnn_output, encoder_outputs)
    context = attn_weights.bmm(encoder_outputs.transpose(0, 1)) # B x S=1 x N

    # Attentional vector using the RNN hidden state and context vector
    # concatenated together (Luong eq. 5)
    rnn_output = rnn_output.squeeze(0) # S=1 x B x N -> B x N
    context = context.squeeze(1)       # B x S=1 x N -> B x N
    concat_input = torch.cat((rnn_output, context), 1)
    concat_output = F.tanh(self.concat(concat_input))

    # Finally predict next token (Luong eq. 6, without softmax)
    output = self.out(concat_output)

    # Return final output, hidden state, and attention weights (for visualization)
    return output, hidden, attn_weights

为了在解码器阶段计算注意力,编码器隐藏状态和编码器输出被输入并使用如下:

class Attn(nn.Module):
def __init__(self, method, hidden_size):
    super(Attn, self).__init__()

    self.method = method
    self.hidden_size = hidden_size

    if self.method == 'general':
        self.attn = nn.Linear(self.hidden_size, hidden_size)

    elif self.method == 'concat':
        self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
        self.v = nn.Parameter(torch.FloatTensor(1, hidden_size))

def forward(self, hidden, encoder_outputs):
    max_len = encoder_outputs.size(0)
    this_batch_size = encoder_outputs.size(1)

    # Create variable to store attention energies
    attn_energies = Variable(torch.zeros(this_batch_size, max_len)) # B x S

    if USE_CUDA:
        attn_energies = attn_energies.cuda()

    # For each batch of encoder outputs
    for b in range(this_batch_size):
        # Calculate energy for each encoder output
        for i in range(max_len):
            attn_energies[b, i] = self.score(hidden[:, b], encoder_outputs[i, b].unsqueeze(0))

    # Normalize energies to weights in range 0 to 1, resize to 1 x B x S
    return F.softmax(attn_energies).unsqueeze(1)

def score(self, hidden, encoder_output):

    if self.method == 'dot':
        energy = hidden.dot(encoder_output)
        return energy

    elif self.method == 'general':
        energy = self.attn(encoder_output)
        energy = hidden.dot(energy)
        return energy

    elif self.method == 'concat':
        energy = self.attn(torch.cat((hidden, encoder_output), 1))
        energy = self.v.dot(energy)
        return energy

我的实际目标是通过添加要馈送到解码器的更多信息来扩展该方法,例如每个输入时间步的图像数据。从技术上讲,我想使用两个(或更多)编码器,一个用于上面链接中的轨迹,另一个用于图像数据(卷积编码器)。

我通过连接由轨迹编码器和卷积编码器(以及单元状态等)产生的嵌入并将连接的张量馈送到解码器来做到这一点。

例如,图像嵌入(256 长度的张量)与轨迹数据嵌入(256 长度的张量)相连接会产生 512 长度的嵌入。

我的问题是:如果我使用来自这些不同来源的级联编码器隐藏状态、级联编码器单元状态和级联编码器输出,而不是来自单个源的隐藏状态、单元格、输出,那么注意力计算是否会出现问题?

使这项工作发生的注意事项或预处理是什么?

非常感谢您提前。

4

0 回答 0