如果您正在使用gp_minimize
,您可以将隐藏层的数量和每层的神经元作为参数包含在Space
. 在目标函数的定义中,您可以手动创建超参数hidden_layer_sizes
。
这是来自scikit-optimize 主页的示例,现在使用MLPRegressor
:
import numpy as np
from sklearn.datasets import load_boston
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import cross_val_score
from skopt.space import Real, Integer, Categorical
from skopt.utils import use_named_args
from skopt import gp_minimize
boston = load_boston()
X, y = boston.data, boston.target
n_features = X.shape[1]
reg = MLPRegressor(random_state=0)
space=[
Categorical(['tanh','relu'],name='activation'),
Integer(1,4,name='n_hidden_layer'),
Integer(200,2000,name='n_neurons_per_layer')]
@use_named_args(space)
def objective(**params):
n_neurons=params['n_neurons_per_layer']
n_layers=params['n_hidden_layer']
# create the hidden layers as a tuple with length n_layers and n_neurons per layer
params['hidden_layer_sizes']=(n_neurons,)*n_layers
# the parameters are deleted to avoid an error from the MLPRegressor
params.pop('n_neurons_per_layer')
params.pop('n_hidden_layer')
reg.set_params(**params)
return -np.mean(cross_val_score(reg, X, y, cv=5, n_jobs=-1,
scoring="neg_mean_absolute_error"))
res_gp = gp_minimize(objective, space, n_calls=50, random_state=0)