我有一个分层模型,用于学习只有单个隐藏层的贝叶斯网络。网络参数分为 4 组输入到隐藏和隐藏到输出的权重和偏差。在每个参数组上定义一个高斯先验。超参数,这些先验的标准差,具有参数 alpha=1 的 Gamma 分布。和β= 1/60。输出噪声也是高斯噪声;Gamma(alpha=1., beta=200) 超过其标准偏差。NUTS 阶跃函数用于采样,其缩放参数设置为仅参数(不包括超参数)的最大后验。数据是一维的,来自 [0,1],其中使用简单的一维正弦函数提供观察结果。我希望这组采样网络能够对数据进行插值,并在与这些观察点的距离增加时开始不同意/发散,从而创建类似于高斯过程模型产生的形状。令人惊讶的是,结果与我的预期不同。看起来一些烦人的约束阻止了采样器做得很好并从整个后部采样: (红线是 MAP 网络产生的,黑线是底层函数,3 个小红点是数据) pymc3 伙计们,你们对这个问题的原因有什么解释吗?我该如何解决?
import numpy as np
import theano
import theano.tensor as T
import pymc3 as pm
import matplotlib.pyplot as plt
import scipy
#
co = 3 # 4 ,5,6,7 ,8
numHiddenUnits = 100
numObservations= 3 # 6 ,7, 8
randomSeed = 1235
numSamples = 5500
def z_score(x,mean=None,std=None):
if mean is None or std is None:
mean,std = np.mean(x,axis=0),np.std(x,axis=0)
return x - mean/std,mean,std
def sample(nHiddenUnts,X,Y):
'''
samples a set of ANNs from the posterior
'''
nFeatures = X.shape[1]
with pm.Model() as model:
#Gamma Hyperpriors
alpha,beta = 1.,1./60.
# standard deviation: Bias(Hidden-out)
bhoSd = pm.Gamma('bhoSd',alpha=alpha,beta=beta)
#standard deviation: Weights (Hidden-out)
whoSd = pm.Gamma('whoSd',alpha=alpha,beta=beta)
bihSd = pm.Gamma('bihSd',alpha=alpha,beta=beta)
#standard deviation: Bias (input-hidden)
wihSd = pm.Gamma('wihSd',alpha=alpha,beta=beta)
#standard deviation: output noise
noiseSd = pm.Gamma('noiseSd',alpha=alpha,beta=200.)
wihSd.tag.test_value= bihSd.tag.test_value= whoSd.tag.test_value= bhoSd.tag.test_value = 200
noiseSd.tag.test_value = 0.002
#priors
#Bias (HiddenOut)
bho = pm.Normal('bho',mu=0,sd=bhoSd)
bho.tag.test_value = 1
who = pm.Normal('who',mu=0,sd=whoSd,shape=(nHiddenUnts,1) )
who.tag.test_value = np.random.normal(size=nHiddenUnts,loc=0,scale=1).reshape(nHiddenUnts,1) #np.ones(shape=(nHiddenUnts,1))
#Bias input-hidden
bih = pm.Normal('bih',mu=0,sd=bihSd ,shape=nHiddenUnts)
bih.tag.test_value =np.random.normal(size=nHiddenUnts,loc=0,scale=1)#np.ones(shape=nHiddenUnts)
wih= pm.Normal('wih',mu=0,sd=wihSd ,shape= (nFeatures,nHiddenUnts))
wih.tag.test_value =np.random.normal(size=nFeatures*nHiddenUnts,loc=0,scale=1).reshape(nFeatures,nHiddenUnts)#np.ones(shape= (nFeatures,nHiddenUnts))
netOut=T.dot( T.nnet.sigmoid( T.dot( X , wih ) + bih ) , who ) + bho
#likelihood
likelihood = pm.Normal('likelihood',mu=netOut,sd=noiseSd,observed= Y)
print("model built")
#==================================================================
start1 = pm.find_MAP(fmin=scipy.optimize.fmin_l_bfgs_b, vars=[bho,who,bih,wih],model=model)
#start2 = pm.find_MAP(start=start1, fmin=scipy.optimize.fmin_l_bfgs_b, vars=[noiseSd,wihSd,bihSd ,whoSd,bhoSd],model=model)
step = pm.NUTS(scaling=start1)
#step = pm.HamiltonianMC(scaling=start1,path_length=5.,step_scale=.05,)
trace = pm.sample(10,step,start=start1, progressbar=True,random_seed=1234)[:]
step1 = pm.NUTS(scaling=trace[-1])
print '-'
trace = pm.sample(numSamples,step1,start=trace[-1], progressbar=True,random_seed=1234)[100:]
#========================================================================
return trace,start1
#underlying function
def g(x):
global co
return np.prod( x+np.sin(co*np.pi*x),axis=1)
np.random.seed(randomSeed)
XX= np.atleast_2d(np.random.uniform(0,1.,size =numObservations)).T
Y = np.atleast_2d(g(XX)).T
X,mean,std = z_score(XX)
trace,map_= sample(numHiddenUnits, X, Y)
data =np.atleast_2d( np.linspace(0., 1., 100)).T
theano.config.compute_test_value = 'off'
d = T.dmatrix()
w= T.dmatrix()
b = T.vector()
bo = T.dscalar()
wo = T.dmatrix()
y= T.dot( T.nnet.sigmoid( T.dot(d,w)+b),wo)+bo
f = theano.function([d,w,b,wo,bo],y)
data1,mean,std = z_score(data, mean, std)
print trace['wih'].shape
for s in trace[::1]:
plt.plot(data, f(data1,s['wih'],s['bih'],s['who'],s['bho']),c='blue',alpha =0.15)
plt.plot(data,g(data),'black')
# prediction of maximum a posteriori network
plt.plot(data, f(data1,map_['wih'],map_['bih'],map_['who'],map_['bho']),c='red')
plt.plot(XX,Y,'r.',markersize=10)
plt.show()
更新:我按以下方式更改代码:首先,分配模型参数的test_values似乎很麻烦!但是没有 'test_value' 的值, find_MAP 不会收敛到正确的点,所以我删除了 test_value 分配并为 find_MAP() 提供了一个起点(initpoint)。其次,为了让一切更简单,我用 Half_Normals 替换了 Gamma 超先验。Step-method 也被 Metropolis 取代。知道示例函数如下所示: def sample(nHiddenUnts,X,Y): nFeatures = X.shape 1 with pm.Model() 作为模型:
bhoSd = pm.HalfNormal('bhoSd',sd=100**2)
whoSd = pm.HalfNormal('whoSd',sd=100**2)
bihSd = pm.HalfNormal('bihSd',sd=100**2)
wihSd = pm.HalfNormal('wihSd',sd=100**2)
noiseSd = pm.HalfNormal('noiseSd',sd=0.001)
#priors
bho = pm.Normal('bho',mu=0,sd=bhoSd)
who = pm.Normal('who',mu=0,sd=whoSd,shape=(nHiddenUnts,1) )
bih = pm.Normal('bih',mu=0,sd=bihSd ,shape=nHiddenUnts)
wih= pm.Normal('wih',mu=0,sd=wihSd ,shape= (nFeatures,nHiddenUnts))
netOut=T.dot( T.nnet.sigmoid( T.dot( X , wih ) + bih ) , who ) + bho
#likelihood
likelihood = pm.Normal('likelihood',mu=netOut,sd=noiseSd,observed= Y)
#========================================================
initpoint = {'bho':1,
'who':np.random.normal(size=nHiddenUnts,loc=0,scale=1).reshape(nHiddenUnts,1),
'bih':np.random.normal(size=nHiddenUnts,loc=0,scale=1),
'wih':np.random.normal(size=nFeatures*nHiddenUnts,loc=0,scale=1).reshape(nFeatures,nHiddenUnts),
'bhoSd':100,
'bihSd':100,
'whoSd':100,
'wihSd':100,
'noiseSd':0.1
}
start1 = pm.find_MAP(start=initpoint,fmin=scipy.optimize.fmin_l_bfgs_b, vars=[bho,who,bih,wih],model=model)
step = pm.Metropolis(tune=True,tune_interval=10000)
trace = pm.sample(numSamples,step,start=start1,progressbar=True,random_seed=1234)[10000::5]
#========================================================
return trace,start1
绘制 15000 个样本后的结果是这样的: 只有当我在 initpoint(find_MAP 的起点)中将 NoiseSd 超参数和“noisSd”的标准差增加到 0.1 时,结果才会变成这样: 但是这么高噪音水平是不可取的。