我是 Jax 的新手,也不是 Python 专家。
我在我的 Mac 笔记本电脑上运行 jax 版本“0.2.14”。请在下面找到一个简单的代码,至少对我来说给出了一些结果。
但是,正如评论jax_metropolis_sampler
方法中所述,我想保存中间结果“位置”,但我不知道正确地使用它jax_fori_loop
,我想像我所做的那样做肯定是可怕的。
我很确定有人可以给我一个更好的利用 jax 并行性的解决方案。目前,我还没有研究 MixtureModel_jax 的前向/后向差异。
提前致谢
import jax
import jax.numpy as jnp
from functools import partial
class MixtureModel_jax():
def __init__(self, locs, scales, weights, *args, **kwargs):
super().__init__(*args, **kwargs)
self.loc = jnp.array([locs]).T
self.scale = jnp.array([scales]).T
self.weights = jnp.array([weights]).T
norm = jnp.sum(self.weights)
self.weights = self.weights/norm
self.num_distr = len(locs)
def pdf(self, x):
probs = jax.scipy.stats.norm.pdf(x,loc=self.loc, scale=self.scale)
return jnp.dot(self.weights.T,probs).squeeze()
def logpdf(self, x):
log_probs = jax.scipy.stats.norm.logpdf(x,loc=self.loc, scale=self.scale)
return jax.scipy.special.logsumexp(np.log(self.weights) + log_probs, axis=0)
@partial(jax.jit, static_argnums=(1,))
def jax_metropolis_kernel(rng_key, logpdf, position, log_prob):
key, subkey = jax.random.split(rng_key)
"""Moves the chain by one step using the Random Walk Metropolis algorithm."""
move_proposals = jax.random.normal(key, shape=position.shape) * 0.1
proposal = position + move_proposals
proposal_log_prob = logpdf(proposal)
log_uniform = jnp.log(jax.random.uniform(subkey))
do_accept = log_uniform < proposal_log_prob - log_prob
position = jnp.where(do_accept, proposal, position)
log_prob = jnp.where(do_accept, proposal_log_prob, log_prob)
return position, log_prob
@partial(jax.jit, static_argnums=(1, 2))
def jax_metropolis_sampler(rng_key, n_samples, logpdf, initial_position):
"""Generate samples using the Random Walk Metropolis algorithm."""
def mh_update(i, state):
key, position, log_prob = state
_, key = jax.random.split(key)
new_position, new_log_prob = jax_metropolis_kernel(key, logpdf, position, log_prob)
return (key, new_position, new_log_prob)
logp = logpdf(initial_position)
# Just return the last position
# rng_key, position, log_prob = jax.lax.fori_loop(0, n_samples,
# mh_update,
# (rng_key, initial_position, logp))
# return position
# Porposal to save intermediate positions: slow and horrible I guess !
spls = []
state = (rng_key, initial_position, logp)
for i in range(n_samples):
state = mh_update(i, state)
spls.append(state[1])
return spls
mixture_gaussian_model = MixtureModel_jax([0,1.5],[0.5,0.1],[8,2])
n_dim = 1
n_samples = 50
n_chains = 7
rng_key = jax.random.PRNGKey(42)
rng_keys = jax.random.split(rng_key, n_chains)
initial_position = jnp.zeros((n_dim, n_chains))
run_mcmc = jax.vmap(jax_metropolis_sampler,
in_axes=(0, None, None, 1),
out_axes=0)
positions = run_mcmc(rng_keys, n_samples,
mixture_gaussian_modelbda x: mixture_gaussian_model.logpdf(x),
initial_position)
print(len(positions))
print(positions[0].shape)