1

我不知道为什么我会收到这个错误。我看到了一些更改state_is_tuple=False的帖子,但它给了我一些其他错误。我认为错误在于我定义 lstm 单元格的方式,但不确定我应该改变什么?我关注了这个具有相似代码结构的链接。

这是我的代码:

Required placeholders
    n_hidden = args.rnn_size
    n_layers = args.num_layers
    max_sequence_length = args.max_sequence_length
    encoderEmbeddingsize = args.encoderEmbeddingsize
    decoderEmbeddingsize = args.decoderEmbeddingsize
    queVocabsize = len(question_vocab_to_int)
    ansVocabsize = len(answer_vocab_to_int)
    batch_size = args.batch_size
    # Input Embedding for Encoder ## CHECK THE VOCAB SIZE!!!

    encoder_input = tf.contrib.layers.embed_sequence(input_data, queVocabsize, encoderEmbeddingsize,
                                              initializer=tf.random_uniform_initializer(0, 1))

    print('encoder_input', encoder_input)

    # Layers for the model
    lstm_cell = rnn.BasicLSTMCell(n_hidden) # lstm layer
    dropout = rnn.DropoutWrapper(lstm_cell, input_keep_prob=keep_prob) # dropout layer

    # Encoder Model

    # Make two layer encoder
    encoder_multirnn_cell = rnn.MultiRNNCell([dropout]*n_layers)

    # Make it bidirectional
    print(sequence_length)
    encoder_output, encoder_state = tf.nn.dynamic_rnn(encoder_multirnn_cell,
                                                        inputs=encoder_input, dtype=tf.float32) # sequence_length=sequence_length,

    print('encoder_output', encoder_output)
    print('encoder_state', encoder_state)
    # preprocessing encoder input
    initial_tensor = tf.strided_slice(target, [0, 0], [batch_size, -1], [1, 1])

    decoder_input = tf.concat([tf.fill([batch_size, 1], question_vocab_to_int['<GO>']), initial_tensor], 1)

    print('decoder_input', decoder_input)
    ## Input Embedding for the Decoder
    decoder_embedding = tf.Variable(tf.random_uniform([queVocabsize+1, decoderEmbeddingsize], 0, 1))
    decoder_embedded_input = tf.nn.embedding_lookup(decoder_embedding, decoder_input)
    print('check')
    print(decoder_embedded_input)
    print(decoder_embedding)
    ## Decoder Model
    #with tf.variable_scope("decoding") as decoding_scope:
    lstm_decoder_cell = rnn.BasicLSTMCell(n_hidden)  # lstm layer
    dropout_decoder = rnn.DropoutWrapper(lstm_decoder_cell, input_keep_prob=keep_prob)  # droput layer

    # decoder

    # Make two layer encoder
    decoder_multirnn_cell = rnn.MultiRNNCell([dropout_decoder] * n_layers)

  #  weights = tf.truncated_normal_initializer(stddev=0.1)
   # biases = tf.zeros_initializer()

    output_layer_function =  layers_core.Dense(
        ansVocabsize, use_bias=False) #lambda x: tf.contrib.layers.fully_connected(x, queVocabsize, scope=decoding_scope,
                             #                                           weights_initializer=weights,
                             #                                           biases_initializer=biases)
    #print(decoder_multirnn_cell.output_size)
    #decoding_scope.reuse_variables()

    print('output_kayer_function', output_layer_function)

    # training vs inference!
    encoder_output = tf.transpose(encoder_output, [1, 0, 2])
    attention_state = tf.zeros([batch_size, 1, decoder_multirnn_cell.output_size * 2])

    attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
        num_units=decoder_multirnn_cell.output_size, memory=encoder_output)

    lstm_decoder_cell = tf.contrib.seq2seq.AttentionWrapper(lstm_decoder_cell,
                                                            attention_mechanism=attention_mechanism)

    attn_zero = lstm_decoder_cell.zero_state(batch_size=batch_size, dtype=tf.float32)
    init_state = attn_zero.clone(cell_state=encoder_state)

    print(('sequence!!!!!!!!1', sequence_length))
    helper = tf.contrib.seq2seq.TrainingHelper(decoder_embedded_input, sequence_length)

    # decoder
    decoder = tf.contrib.seq2seq.BasicDecoder(lstm_decoder_cell, helper, initial_state=init_state,
                                              output_layer= output_layer_function)

    print(decoder)
    final_outputs, _final_state, _final_sequence_lengths = tf.contrib.seq2seq.dynamic_decode(decoder)

    train_pred_drop = tf.nn.dropout(final_outputs, keep_prob)
    logits = train_pred_drop.rnn_output

现在,我在tf.contrib.seq2seq.dynamic_decode(decoder)中收到错误,如下所示:

Traceback (most recent call last):
  File "test_model.py", line 272, in <module>
    train_logits, infer_logits = load_model(args, tf.reverse(input_data, [-1]), target, learning_rate, sequence_length, question_vocab_to_int, answer_vocab_to_int, keep_prob ) ## load model here!
  File "test_model.py", line 165, in load_model
    final_outputs, _final_state, _final_sequence_lengths = tf.contrib.seq2seq.dynamic_decode(decoder)
  File "/home/saurabh/tfnightly/lib/python3.5/site-packages/tensorflow/contrib/seq2seq/python/ops/decoder.py", line 286, in dynamic_decode
    swap_memory=swap_memory)
  File "/home/saurabh/tfnightly/lib/python3.5/site-packages/tensorflow/python/ops/control_flow_ops.py", line 2816, in while_loop
    result = loop_context.BuildLoop(cond, body, loop_vars, shape_invariants)
  File "/home/saurabh/tfnightly/lib/python3.5/site-packages/tensorflow/python/ops/control_flow_ops.py", line 2640, in BuildLoop
    pred, body, original_loop_vars, loop_vars, shape_invariants)
  File "/home/saurabh/tfnightly/lib/python3.5/site-packages/tensorflow/python/ops/control_flow_ops.py", line 2590, in _BuildLoop
    body_result = body(*packed_vars_for_body)
  File "/home/saurabh/tfnightly/lib/python3.5/site-packages/tensorflow/contrib/seq2seq/python/ops/decoder.py", line 234, in body
    decoder_finished) = decoder.step(time, inputs, state)
  File "/home/saurabh/tfnightly/lib/python3.5/site-packages/tensorflow/contrib/seq2seq/python/ops/basic_decoder.py", line 138, in step
    cell_outputs, cell_state = self._cell(inputs, state)
  File "/home/saurabh/tfnightly/lib/python3.5/site-packages/tensorflow/python/ops/rnn_cell_impl.py", line 183, in __call__
    return super(RNNCell, self).__call__(inputs, state)
  File "/home/saurabh/tfnightly/lib/python3.5/site-packages/tensorflow/python/layers/base.py", line 575, in __call__
    outputs = self.call(inputs, *args, **kwargs)
  File "/home/saurabh/tfnightly/lib/python3.5/site-packages/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py", line 1295, in call
    cell_output, next_cell_state = self._cell(cell_inputs, cell_state)
  File "/home/saurabh/tfnightly/lib/python3.5/site-packages/tensorflow/python/ops/rnn_cell_impl.py", line 183, in __call__
    return super(RNNCell, self).__call__(inputs, state)
  File "/home/saurabh/tfnightly/lib/python3.5/site-packages/tensorflow/python/layers/base.py", line 575, in __call__
    outputs = self.call(inputs, *args, **kwargs)
  File "/home/saurabh/tfnightly/lib/python3.5/site-packages/tensorflow/python/ops/rnn_cell_impl.py", line 438, in call
    self._linear = _Linear([inputs, h], 4 * self._num_units, True)
  File "/home/saurabh/tfnightly/lib/python3.5/site-packages/tensorflow/python/ops/rnn_cell_impl.py", line 1154, in __init__
    shapes = [a.get_shape() for a in args]
  File "/home/saurabh/tfnightly/lib/python3.5/site-packages/tensorflow/python/ops/rnn_cell_impl.py", line 1154, in <listcomp>
    shapes = [a.get_shape() for a in args]
AttributeError: 'LSTMStateTuple' object has no attribute 'get_shape'
4

0 回答 0