0

我正在尝试使用tensorflow_probability构建一个 mcmc 链。这是我的代码:

    chain_states, kernel_results = tfp.mcmc.sample_chain(
    num_results=tf.constant(1e3, dtype=tf.int32),
    num_burnin_steps=tf.constant(1e2, dtype=tf.int32),
    parallel_iterations=tf.constant(10, dtype=tf.int32),
    current_state=current_state,
    kernel=tfp.mcmc.MetropolisHastings(
        inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
        target_log_prob_fn=joint_log_prob,
        num_leapfrog_steps=tf.constant(2, dtype=tf.int32),
        step_size=tf.Variable(1.),
        step_size_update_fn=tfp.mcmc.make_simple_step_size_update_policy()
        )))

但我得到了这个错误::

>     InvalidArgumentError                      Traceback (most recent call last) <ipython-input-13-7e972cc65053> in <module>()
> ----> 1 make_model(well_complex, well_ligand, fi_complex, fi_ligand)
> 
> ~/Documents/GitHub/assaytools2/assaytools2/assaytools2/inference.py in
> make_model(well_complex, well_ligand, fi_complex, fi_ligand)
>     162             num_leapfrog_steps=tf.constant(2, dtype=tf.int32),
>     163             step_size=tf.Variable(1.),
> --> 164             step_size_update_fn=tfp.mcmc.make_simple_step_size_update_policy()
>     165             )))
>     166 
> 
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow_probability/python/mcmc/sample.py
> in sample_chain(num_results, current_state, previous_kernel_results,
> kernel, num_burnin_steps, num_steps_between_results,
> parallel_iterations, name)
>     238 
>     239     if previous_kernel_results is None:
> --> 240       previous_kernel_results = kernel.bootstrap_results(current_state)
>     241     return tf.scan(
>     242         fn=_scan_body,
> 
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow_probability/python/mcmc/metropolis_hastings.py
> in bootstrap_results(self, init_state)
>     261         name=mcmc_util.make_name(self.name, 'mh', 'bootstrap_results'),
>     262         values=[init_state]):
> --> 263       pkr = self.inner_kernel.bootstrap_results(init_state)
>     264       if not has_target_log_prob(pkr):
>     265         raise ValueError(
> 
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow_probability/python/mcmc/hmc.py
> in bootstrap_results(self, init_state)
>     506   def bootstrap_results(self, init_state):
>     507     """Creates initial `previous_kernel_results` using a supplied `state`."""
> --> 508     kernel_results = self._impl.bootstrap_results(init_state)
>     509     if self.step_size_update_fn is not None:
>     510       step_size_assign = self.step_size_update_fn(self.step_size, None)  # pylint:
> disable=not-callable
> 
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow_probability/python/mcmc/metropolis_hastings.py
> in bootstrap_results(self, init_state)
>     261         name=mcmc_util.make_name(self.name, 'mh', 'bootstrap_results'),
>     262         values=[init_state]):
> --> 263       pkr = self.inner_kernel.bootstrap_results(init_state)
>     264       if not has_target_log_prob(pkr):
>     265         raise ValueError(
> 
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow_probability/python/mcmc/hmc.py
> in bootstrap_results(self, init_state)
>     672           init_target_log_prob,
>     673           init_grads_target_log_prob,
> --> 674       ] = mcmc_util.maybe_call_fn_and_grads(self.target_log_prob_fn, init_state)
>     675       return UncalibratedHamiltonianMonteCarloKernelResults(
>     676           log_acceptance_correction=tf.zeros_like(init_target_log_prob),
> 
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow_probability/python/mcmc/util.py
> in maybe_call_fn_and_grads(fn, fn_arg_list, result, grads,
> check_non_none_grads, name)
>     232     fn_arg_list = (list(fn_arg_list) if is_list_like(fn_arg_list)
>     233                    else [fn_arg_list])
> --> 234     result, grads = _value_and_gradients(fn, fn_arg_list, result, grads)
>     235     if not all(r.dtype.is_floating
>     236                for r in (result if is_list_like(result) else [result])):  # pylint: disable=superfluous-parens
> 
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow_probability/python/mcmc/util.py
> in _value_and_gradients(fn, fn_arg_list, result, grads, name)
>     207         ]
>     208       else:
> --> 209         grads = tfe.gradients_function(fn)(*fn_arg_list)
>     210     else:
>     211       if is_list_like(result) and len(result) == len(fn_arg_list):
> 
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/eager/backprop.py
> in decorated(*args, **kwds)
>     368     """Computes the gradient of the decorated function."""
>     369 
> --> 370     _, grad = val_and_grad_function(f, params=params)(*args, **kwds)
>     371     return grad
>     372 
> 
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/eager/backprop.py
> in decorated(*args, **kwds)
>     469                        "receive keyword arguments.")
>     470     val, vjp = make_vjp(f, params)(*args, **kwds)
> --> 471     return val, vjp(dy=dy)
>     472 
>     473   return decorated
> 
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/eager/backprop.py
> in vjp(dy)
>     539       return imperative_grad.imperative_grad(
>     540           _default_vspace, this_tape, nest.flatten(result), sources,
> --> 541           output_gradients=dy)
>     542     return result, vjp
>     543 
> 
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/eager/imperative_grad.py
> in imperative_grad(vspace, tape, target, sources, output_gradients)
>      61   """
>      62   return pywrap_tensorflow.TFE_Py_TapeGradient(
> ---> 63       tape._tape, vspace, target, sources, output_gradients)  # pylint: disable=protected-access
> 
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/eager/backprop.py
> in _gradient_function(op_name, attr_tuple, num_inputs, inputs,
> outputs, out_grads)
>     115     return [None] * num_inputs
>     116 
> --> 117   return grad_fn(mock_op, *out_grads)
>     118 
>     119 
> 
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/ops/math_grad.py
> in _ProdGrad(op, grad)
>     158   with ops.device("/cpu:0"):
>     159     rank = array_ops.rank(op.inputs[0])
> --> 160     reduction_indices = (reduction_indices + rank) % rank
>     161     reduced = math_ops.cast(reduction_indices, dtypes.int32)
>     162     idx = math_ops.range(0, rank)
> 
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/ops/math_ops.py
> in binary_op_wrapper(x, y)
>     860     with ops.name_scope(None, op_name, [x, y]) as name:
>     861       if isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor):
> --> 862         return func(x, y, name=name)
>     863       elif not isinstance(y, sparse_tensor.SparseTensor):
>     864         try:
> 
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/ops/gen_math_ops.py
> in add(x, y, name)
>     322       else:
>     323         message = e.message
> --> 324       _six.raise_from(_core._status_to_exception(e.code, message), None)
>     325 
>     326 
> 
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/six.py in
> raise_from(value, from_value)
> 
> InvalidArgumentError: cannot compute Add as input #0(zero-based) was
> expected to be a int32 tensor but is a int64 tensor [Op:Add] name:
> mcmc_sample_chain/mh_bootstrap_results/mh_bootstrap_results/hmc_kernel_bootstrap_results/maybe_call_fn_and_grads/value_and_gradients/add/

I doubled checked and none of my initial tensors were of integer type.

I wonder where I did it wrong.

Thanks!
4

0 回答 0