0

我对 GPy 和 gpflow 非常陌生,并且一直在学习使用他们网页上的常见示例来实现多输出多输入功能。为此,我使用了 Goldstein 函数,尽管其他维度已作为常数给出,因此它简化为 1D。我的问题是,我不确定如何获得 RMSE 和 MAE 误差指标?是否有任何可用的方法可以给出这些结果,如果没有,那么我如何获得 ystar(未知位置的函数值)来自己计算 rmse/mae/R2 误差?

任何人都可以给出任何关于相同的指示吗?

import gpflow
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pylab as pb
import GPy
from gpflow.ci_utils import ci_niter
from pykrige import OrdinaryKriging
from math import pi as pi
import pyKriging  
from pyKriging.krige import kriging  
from pyKriging.samplingplan import samplingplan
import pyDOE as pyd

plt.rcParams.update(plt.rcParamsDefault)
np.random.seed(123)
np.set_printoptions(suppress=True)

#%%
bounds1 = np.array([[-1,1]]) 

X = pyd.lhs(3, 60, criterion='centermaximin') # 3 cols of 60 samples
X= X*(np.max(bounds1)-np.min(bounds1))+np.min(bounds1)

#training set
x1_train = X[:,0]
x2_train = X[:,1]
x3_train = X[:,2]

#testing set
n_testing_samples = 40
x1_test = np.linspace(0,1, n_testing_samples)
x2_test = np.linspace(0,1, n_testing_samples)
x3_test = np.linspace(0,1, n_testing_samples)

n=100
x2 = np.linspace(0,1,n)
x3 = np.linspace(0,1,n)

bounds_goldstein = np.array([-2,2])
x4 = np.linspace(-2,2,n)


gaussian_noise = np.random.randn(*x4.shape,1) * 0.25
noise60 = np.random.choice(gaussian_noise[0],60)[:,None]
noise40 = np.random.choice(gaussian_noise[0],40)[:,None]
    
#%% Goldstein lambda
f_output1 = lambda x :(1+(-2+x+1)**2 * (19-14*-2+3*-2**2-14*x + 6*-2*x + 3*x**2))* (30+(2*-2-3*x)**2 * (18-32*-2+12*-2**2 + 48*x - 36*-2*x + 27*x**2)) 
f_output2 = lambda x :(1+(0+x+1)**2 * (19-14*0+3*0**2-14*x + 6*0*x + 3*x**2))* (30+(2*0-3*x)**2 * (18-32*0+12*0**2 + 48*x - 36*0*x + 27*x**2))
f_output3 = lambda x :(1+(2+x+1)**2 * (19-14*2+3*2**2-14*x + 6*2*x + 3*x**2))* (30+(2*2-3*x)**2 * (18-32*2+12*2**2 + 48*x - 36*2*x + 27*x**2))

#%% plot train and test

def plot_2outputs(m,xlim,ylim):
    fig = pb.figure(figsize=(12,10))
    #Output 1
    ax2 = fig.add_subplot(311)
    ax2.set_xlim(xlim)
    ax2.set_title('Output 1')
    m.plot(plot_limits=xlim,fixed_inputs=[(1,0)],which_data_rows=slice(0,30),ax=ax2, label='icm')
    ax2.plot(x4_test1[:,:1],f_output1(x4_test1[:,:1]+noise40),'rx',mew=1.5, label='test x=-2')
    plt.legend(loc='best')
    plt.tight_layout()
    #Output 2
    ax3 = fig.add_subplot(312)
    ax3.set_xlim(xlim)
    ax3.set_title('Output 2')
    m.plot(plot_limits=xlim,fixed_inputs=[(1,1)],which_data_rows=slice(60,90),ax=ax3, label='icm')
    ax3.plot(x4_test2[:,:1],f_output2(x4_test2[:,:1]+noise40),'rx',mew=1.5, label='test x=0')
    plt.legend(loc='best')
    plt.tight_layout()
    #Output 3
    ax4 = fig.add_subplot(313)
    ax4.set_xlim(xlim)
    ax4.set_title('Output 3')
    m.plot(plot_limits=xlim,fixed_inputs=[(1,2)],which_data_rows=slice(120,150),ax=ax4, label='icm')
    ax4.plot(x4_test3[:,:1],f_output3(x4_test3[:,:1]+noise40),'rx',mew=1.5, label='test x=2')
    plt.legend(loc='best')
    plt.tight_layout()

#%%
bounds = np.array([[-2,2]]) 

Xa = pyd.lhs(3, 60, criterion='centermaximin') # 3 cols of 60 samples
Xa= Xa*(np.max(bounds)-np.min(bounds))+np.min(bounds)

x4_train1 = Xa[:,0]
x4_train1 = np.reshape(x4_train1, (len(x4_train1),1))

x4_train2 = Xa[:,1]
x4_train2 = np.reshape(x4_train2, (len(x4_train2),1))

x4_train3 = Xa[:,2]
x4_train3 = np.reshape(x4_train3, (len(x4_train3),1))

yg1 = f_output1(x4_train1)
yg2 = f_output2(x4_train2)
yg3 = f_output3(x4_train3)

Xb = pyd.lhs(3,40, criterion ='centermaximin')
Xb = Xb*(np.max(bounds)-np.min(bounds))+np.min(bounds)

x4_test1 = Xb[:,0]
x4_test1 = np.reshape(x4_test1, (len(x4_test1),1))
x4_test2 = Xb[:,1]
x4_test2 = np.reshape(x4_test2, (len(x4_test2),1))
x4_test3 = Xb[:,2]
x4_test3 = np.reshape(x4_test3, (len(x4_test3),1))

ygt1 = f_output1(x4_test1)
ygt2 = f_output2(x4_test2)
ygt3 = f_output3(x4_test3)

fig = pb.figure(figsize=(12,8))
ax1 = fig.add_subplot(111)
ax1.plot(x4_train1[:,:1],f_output1(x4_train1[:,:1]+noise60),'kx',mew=1.5,label='Train set x=-2')
ax1.plot(x4_train2[:,:1],f_output2(x4_train2[:,:1]+noise60),'kx',mew=1.5,label='Train set x=0')
ax1.plot(x4_train3[:,:1],f_output3(x4_train3[:,:1]+noise60),'kx',mew=1.5,label='Train set x=2')
ax1.plot(x4,f_output1(x4),mew=1.5,label='True fx x=-2')
ax1.plot(x4,f_output2(x4),mew=1.5,label='True fx x=0')
ax1.plot(x4,f_output3(x4),mew=1.5,label='True fx x=2')
ax1.plot(x4_test1[:,:1],f_output1(x4_test1[:,:1]+noise40),'+',mew=3,label='test set x=-2')
ax1.plot(x4_test2[:,:1],f_output2(x4_test2[:,:1]+noise40),'+',mew=3,label='test set x=0')
ax1.plot(x4_test3[:,:1],f_output3(x4_test3[:,:1]+noise40),'+',mew=3,label='test set x=2')

plt.legend(loc='best')
plt.show()


y_true = [f_output1(x4), f_output2(x4), f_output3(x4)]
K=GPy.kern.RBF(1)

B = GPy.kern.Coregionalize(input_dim=1,output_dim=3) 
multkernel = K.prod(B,name='B.K')
print(multkernel)
print( 'W matrix\nm',B.W)
print( '\nkappa vector\n',B.kappa)
print( '\nB matrix\n',B.B)

icm = GPy.util.multioutput.ICM(input_dim=1,num_outputs=3,kernel=K)
print('icm \n',icm)

a=[x4_train1, x4_train2, x4_train3]
b = [f_output1(x4_train1)+noise60, f_output2(x4_train2)+noise60, f_output3(x4_train3)+noise60]
m = GPy.models.GPCoregionalizedRegression(a,b,kernel=icm)
m['.*rbf.var'].constrain_fixed(1.)
m.optimize()
print(m)
plot_2outputs(m,xlim=(-2,2),ylim=(-3e5,8e5))
plt.show()
4

0 回答 0