9

在 Python 中,给定一个二维张量,我们可以使用tensor[:,:2]对矩阵左上角前两个元素的 2x2 矩阵进行切片,例如:

x = torch.tensor([[-1.4673,  0.9980, -2.1427, -1.1798, -0.0646, -0.2635, -2.8930, -0.2563,
          0.4559, -0.7947, -0.4540,  3.3224,  0.2295,  5.5568, -8.0451, -2.4529,
          4.8724,  2.1640,  3.3255,  0.6693, -1.2362,  4.4713, -3.5547, -0.0528,
          0.1031, -1.2472, -1.6014,  1.8134],
        [ 2.1636, -1.1497, -5.0298,  2.8261, -0.5684,  0.6389,  2.9009, -5.1609,
          1.7358, -3.1819, -0.9877,  5.5598,  6.7142,  4.5704, -1.2683, -5.3046,
          3.0454,  3.2757, -3.2541,  3.6619, -3.6391, -0.2002,  5.7175,  5.7130,
          0.6632, -0.0744, -0.3502,  4.8116]])

y, z = x[:,:2].chunk(2,1)

print(y)

print(z)

[出去]:

tensor([[-1.4673],
        [ 2.1636]])
tensor([[ 0.9980],
        [-1.1497]])

对于 PyTorch 的 ATen,在 C++ 中做这件事的正确方法是什么?

例如,在 LSTM 中,https: //github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/RNN.cpp#L253 有 gate.chunk(4,1) 函数

如果我想做一个gate[:,:2].chunk(2,1)提取门的不同部分,例如auto partial_gates = gates[:,:2].chunk(4, 1);,怎么做?

template <typename cell_params>
struct LSTMCell : Cell<std::tuple<Tensor, Tensor>, cell_params> {
  using hidden_type = std::tuple<Tensor, Tensor>;
  hidden_type operator()(const Tensor& input, const hidden_type& hidden, const cell_params& params) const override {
    auto hx = std::get<0>(hidden);
    auto cx = std::get<1>(hidden);

    if (input.is_cuda()) {
      auto igates = params.matmul_ih(input);
      auto hgates = params.matmul_hh(hx);
      auto result = at::_thnn_fused_lstm_cell(igates, hgates, cx, params.b_ih, params.b_hh);
      // Slice off the workspace argument (it's needed only for AD).
      return std::make_tuple(std::get<0>(result), std::get<1>(result));
    }

    auto gates = params.linear_ih(input) + params.linear_hh(hx);
    auto chunked_gates = gates.chunk(4, 1);

    auto partial_gates = gates[:,:2].chunk(4, 1);

    auto ingate = chunked_gates[0].sigmoid();
    auto forgetgate = chunked_gates[1].sigmoid();
    auto cellgate = chunked_gates[2].tanh();
    auto outgate = chunked_gates[3].sigmoid();

    auto cy = (forgetgate * cx) + (ingate * cellgate);
    auto hy = outgate * cy.tanh();

    return std::make_tuple(hy, cy);
  }
};
4

2 回答 2

8

1.你也可以使用.slice

Tensor::slice(int64_t dim, int64_t start, int64_t end, int64_t step)

auto partial_gates = gates.slice(1, 0, 3).chunk(4, 1); 

2. Pytorch 1.5 使用Tensor::indexTensor::index_put_

using namespace torch::indexing;
auto partial_gates = gates.index({"...", Slice(None, 2)}).chunk(4, 1); 

还支持多维索引

Tensor::index和的一般翻译Tensor::index_put_

Python             C++ (assuming `using namespace torch::indexing`)
-------------------------------------------------------------------
0                  0
None               None
...                "..." or Ellipsis
:                  Slice()
start:stop:step    Slice(start, stop, step)
True / False       true / false
[[1, 2]]           torch::tensor({{1, 2}})
于 2020-02-04T14:36:11.793 回答
3

.narrow()来自https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/TensorShape.cpp#L364

auto partial_gates = gates.narrow(1,0,2).chunk(4, 1);
于 2019-06-21T20:45:54.843 回答