3

我正在尝试使用kerastuner。

这是我的代码和一个可重现的例子:

import kerastuner as kt

from kerastuner.tuners.bayesian import BayesianOptimization

(x_train, y_train), (x_test, y_test) = tf.keras.datasets.boston_housing.load_data(
    path="boston_housing.npz", test_split=0.2, seed=113
)

params = {}
params['shape'] = x_train[0].shape

def build_model(hp):



    number_of_layers = hp.Choice('number_of_layers', values = [2, 3], default = 2)

    if number_of_layers == 2:

        nodes2 = hp.Int('nodes2', 64, 524 , 64, default = 64)
        nodes3 = hp.Int('nodes3', 32, np.min([nodes2//2, 128]) , 32, default = 32)   
        nodes_list = [nodes2, nodes3]

        dropout2 = hp.Float('dropout2', 0., 0.2, 0.05, default = 0.)
        dropout3 = hp.Float('dropout3', 0.2, 0.5, 0.05, default = 0.5)

        dropouts_list = [dropout2, dropout3]

    else:

        nodes1 = hp.Int('nodes1', 128, 1024, 128, default = 128)
        nodes2 = hp.Int('nodes2', 64, np.min([nodes1//2, 524]) , 64, default = 64)
        nodes3 = hp.Int('nodes3', 32, np.min([nodes2//2, 128]) , 32, default = 32)

        nodes_list = [nodes1, nodes2, nodes3]

        dropout1 = hp.Float('dropout1', 0., 0.2, 0.05, default = 0.)
        dropout2 = hp.Float('dropout2', 0., 0.2, 0.05, default = 0.)
        dropout3 = hp.Float('dropout3', 0.2, 0.5, 0.05, default = 0.5)

        dropouts_list = [dropout1, dropout2, dropout3]

    inputs = Input(shape = params['shape'])

    x = inputs

    for i in range(len(nodes_list)):

        nodes = nodes_list[i]

        dropout = dropouts_list[i]

        x = Dense(nodes, activation = 'relu')(x)

        x = Dropout(dropout)(x)

    prediction = Dense(1)(x)

    model = Model(inputs, prediction)

    model.compile(

        optimizer = tf.keras.optimizers.Adam(hp.Float('learning_rate', 1e-4, 1e-2, sampling = 'log')),

        loss = 'mse'


    )

    return(model)

tuner = BayesianOptimization(
    build_model,
    objective='val_loss',

    max_trials = 100)

tuner.search(x_train, y_train, validation_split = 0.2, callbacks = [tf.keras.callbacks.EarlyStopping(patience = 10)] )

INFO:tensorflow:Reloading Oracle from existing project .\untitled_project\oracle.json
INFO:tensorflow:Reloading Tuner from .\untitled_project\tuner0.json
---------------------------------------------------------------------------
ZeroDivisionError                         Traceback (most recent call last)
<ipython-input-120-3bfac2133c4d> in <module>
     68     max_trials = 100)
     69 
---> 70 tuner.search(x_train, y_train, validation_split = 0.2, callbacks = [tf.keras.callbacks.EarlyStopping(patience = 10)] )

~\Anaconda3\envs\tf2\lib\site-packages\kerastuner\engine\base_tuner.py in search(self, *fit_args, **fit_kwargs)
    118         self.on_search_begin()
    119         while True:
--> 120             trial = self.oracle.create_trial(self.tuner_id)
    121             if trial.status == trial_module.TrialStatus.STOPPED:
    122                 # Oracle triggered exit.

~\Anaconda3\envs\tf2\lib\site-packages\kerastuner\engine\oracle.py in create_trial(self, tuner_id)
    147             values = None
    148         else:
--> 149             response = self._populate_space(trial_id)
    150             status = response['status']
    151             values = response['values'] if 'values' in response else None

~\Anaconda3\envs\tf2\lib\site-packages\kerastuner\tuners\bayesian.py in _populate_space(self, trial_id)
     99 
    100         # Fit a GPR to the completed trials and return the predicted optimum values.
--> 101         x, y = self._vectorize_trials()
    102         try:
    103             self.gpr.fit(x, y)

~\Anaconda3\envs\tf2\lib\site-packages\kerastuner\tuners\bayesian.py in _vectorize_trials(self)
    204 
    205                 # Embed an HP value into the continuous space [0, 1].
--> 206                 prob = hp_module.value_to_cumulative_prob(trial_value, hp)
    207                 vector.append(prob)
    208 

~\Anaconda3\envs\tf2\lib\site-packages\kerastuner\engine\hyperparameters.py in value_to_cumulative_prob(value, hp)
   1044         sampling = hp.sampling or 'linear'
   1045         if sampling == 'linear':
-> 1046             return (value - hp.min_value) / (hp.max_value - hp.min_value)
   1047         elif sampling == 'log':
   1048             return (math.log(value / hp.min_value) /

ZeroDivisionError: division by zero
4

3 回答 3

1

可能的原因(由于上面粘贴的代码的可读性低)可能是使用不同的数据集和保存的模型。我建议您添加构建代码块overwrite=TrueBayesianOptimization让我知道它是否有帮助。

于 2020-08-25T10:47:40.203 回答
0

检查 hp 中的“step”或“default value”参数,它们不应该为零

于 2020-11-15T07:27:08.900 回答
0

如果 Keras Tuner 选择,则hp和hpnodes2 = 64的选项都将是 32(因为)。min_valuemax_valuenodes3max_value=64/2

如果我们再看看错误信息:

-> 1046             return (value - hp.min_value) / (hp.max_value - hp.min_value)
ZeroDivisionError: division by zero

我认为这就是错误的来源。填写分母中的参数:(value - hp.min_value) / 0

语义上的问题是 keras 调谐器无法识别当 min_value 和 max_value 相等时只有一个选项可供选择。

于 2021-09-14T21:33:47.777 回答