1

我想用 [skopt] ( https://scikit-optimize.github.io/stable/auto_examples/bayesian-optimization.html ) 计算贝叶斯搜索。我的数据集是一个时间序列,t 是我的时间步长。

但我有一个错误:recursionerror:比较中超出了最大递归深度

这是我的代码:

def Grid_search_class(X_train=X_train[:,0:t+1,:]
                     ,y_train=y_train
                     ,X_test=X_test[:,0:t+1,:],
                     y_test=y_test
                     ,n_calls=20, 
                print_score=False,t=t):
    """ INPUTS : Train Test data
n_calls Number of calls to func"""
    import tensorflow as tf
    Adam = tf.keras.optimizers.Adam(learning_rate=0.007)
    Adagrad = tf.keras.optimizers.Adagrad(learning_rate=0.007)
    dim_num_input_text = Categorical([16,32,64,128,256,512,1024,2048], name='num_dense_layers_text')
    dim_num_dense_text = Integer(low=0, high=5, name='num_HLD_nodes_text')
    dim_drop_text = Categorical([0.01,0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4], name='drop_text')
    dim_num_input_temp = Categorical([16,32,64,128,256,512,1024,2048], name='num_dense_layers_temp')
    dim_num_dense_temp = Integer(low=0, high=5, name='num_HLD_nodes_temp')
    dim_drop_temp = Categorical([0.01,0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4], name='drop_temp')
    dim_num_input_fixe = Categorical([16,32,64,128,256,512,1024,2048], name='num_dense_layers_fixe')
    dim_num_dense_fixe = Integer(low=0, high=5, name='num_HLD_nodes_fixe')
    dim_drop_fixe = Categorical([0.01,0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4], name='drop_fixe')
    dim_num_input_merge = Categorical([16,32,64,128,256,512,1024,2048], name='num_dense_layers_merge')
    dim_num_dense_merge = Integer(low=0, high=5, name='num_HLD_nodes_merge')
    dim_drop_merge = Categorical([0.01,0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4], name='drop_merge')
    dim_optim=Categorical([Adam,Adagrad], name='optim')

    
    dimensions = [dim_num_input_text,
                  dim_num_dense_text,
                  dim_drop_text,
                  dim_num_input_temp,
                  dim_num_dense_temp,
                  dim_drop_temp,
                  dim_num_input_fixe,
                  dim_num_dense_fixe,
                  dim_drop_fixe,
                  dim_num_input_merge,
                  dim_num_dense_merge,
                  dim_drop_merge,
                  dim_optim
                 ]
    default_parameters = [512,0,0.1,512,0,0.1,512,0,0.1,512,0,0.1,Adam]

   

    def create_model(num_dense_layers_text,num_HLD_nodes_text,drop_text,
             num_dense_layers_temp,num_HLD_nodes_temp,drop_temp,
             num_dense_layers_fixe,num_HLD_nodes_fixe,drop_fixe,
             num_dense_layers_merge,num_HLD_nodes_merge,drop_merge,optim,t=t):
        x_text = model_text.layers[ind_list[-1]-1].output
        if num_dense_layers_text>0:
            for i in range(num_dense_layers_text):
                x_text =tf.keras.layers.Dense(num_HLD_nodes_text,activation='relu')(x_text)
                x_text=tf.keras.layers.Dropout(drop_text)(x_text)

        x_temp = model_temp[t].layers[ind_list[t]].output
        if num_dense_layers_temp>0:
            for i in range(num_dense_layers_temp):
                x_temp =tf.keras.layers.Dense(num_HLD_nodes_temp,activation='relu')(x_temp)
                x_temp=tf.keras.layers.Dropout(drop_temp)(x_temp)

        x_fixe= model_fixe.layers[1].output
        if num_dense_layers_fixe>0:
            for i in range(num_dense_layers_fixe):
                x_fixe =tf.keras.layers.Dense(num_HLD_nodes_fixe,activation='relu')(x_fixe)
                x_fixe=tf.keras.layers.Dropout(drop_fixe)(x_fixe)

        merge = tf.keras.layers.concatenate([x_text,x_temp,x_fixe])

        if num_dense_layers_merge>0:
            for i in range(num_dense_layers_merge):
                merge =tf.keras.layers.Dense(num_HLD_nodes_merge,activation='relu')(merge)
                merge=tf.keras.layers.Dropout(drop_merge)(merge)
        #add our classification layer.
        predictions      = tf.keras.layers.Dense(3,activation='softmax')(merge)

        model = tf.keras.Model(inputs = [model_text.input,model_temp[t].input,model_fixe.input], outputs = predictions)

        #setup our optimizer and compile

        model.compile(optimizer=optim, loss=ncce,
                     metrics=[      tf.keras.metrics.Precision(name='precision'),
          tf.keras.metrics.Recall(name='recall'),F1Score(num_classes=3,name='F1',average='macro')])
        return model 

    score='val_F1'
    @use_named_args(dimensions=dimensions)



    def fitness(num_dense_layers_text,num_HLD_nodes_text,drop_text,
                 num_dense_layers_temp,num_HLD_nodes_temp,drop_temp,
                 num_dense_layers_fixe,num_HLD_nodes_fixe,drop_fixe,
                 num_dense_layers_merge,num_HLD_nodes_merge,drop_merge,optim):
        print(num_dense_layers_text,num_HLD_nodes_text,drop_text,
                 num_dense_layers_temp,num_HLD_nodes_temp,drop_temp,
                 num_dense_layers_fixe,num_HLD_nodes_fixe,drop_fixe,
                 num_dense_layers_merge,num_HLD_nodes_merge,drop_merge,optim)
   
        model = create_model(num_dense_layers_text=num_dense_layers_text,
                             num_HLD_nodes_text=num_HLD_nodes_text,drop_text=drop_text,
                 num_dense_layers_temp=num_dense_layers_temp,
                             num_HLD_nodes_temp=num_HLD_nodes_temp,drop_temp=drop_temp,
                 num_dense_layers_fixe=num_dense_layers_fixe,
                             num_HLD_nodes_fixe=num_HLD_nodes_fixe,drop_fixe=drop_fixe,
                 num_dense_layers_merge=num_dense_layers_merge,drop_merge=drop_merge,
                             num_HLD_nodes_merge=num_HLD_nodes_merge,optim=optim,t=t)

        callback=tf.keras.callbacks.EarlyStopping(
        monitor=score, min_delta=0.01, patience=1, verbose=0, mode='auto',
        baseline=0, restore_best_weights=False
        )
        #named blackbox becuase it represents the structure

        blackbox = model.fit(x=X_train,
                            y=y_train,verbose=1,
                            epochs=2, 
                            batch_size=32,
                            validation_data=(X_test,y_test)
                            )
        #return the validation accuracy for the last epoch.
        val_loss = blackbox.history[score][-1]
        if score=='val_F1':
            val_loss=-val_loss

        # Print the classification accuracy.
        if print_score :
            print()
            print("val_score: {}".format(val_loss))
            print()


        # Delete the Keras model with these hyper-parameters from memory.
        del model

        # Clear the Keras session, otherwise it will keep adding new
        # models to the same TensorFlow graph each time we create
        # a model with a different set of hyper-parameters.
        tf.keras.backend.clear_session()
        tf.compat.v1.reset_default_graph()

        # the optimizer aims for the lowest score, so we return our negative accuracy
        return -val_loss


    gp_result = gp_minimize(fitness,
                                dimensions=dimensions,
                                n_calls=n_calls,n_random_starts=7,
                                noise= 0.01,
                                x0=default_parameters)
    a=pd.concat([pd.DataFrame(gp_result.x_iters, columns = ["dense layers text","HLD nodes text","drop text",
                                                        "dense layers temp","HLD nodes temp","drop temp",
                                                        "dense layers fixe","HLD nodes fixe","drop fixe",
                                                            "dense layers merge","HLD nodes merge","drop merge",
                                                            "optim","batch size"]),
    (pd.Series(gp_result.func_vals*-1, name="val_loss"))], axis=1)
    a.sort_values(by=['val_loss'], inplace=True,ascending=False)
    print(a.iloc[:10])

    return a

这一步是寻找最佳参数a step t

def Run_Grid_search_temp(j=0,n_calls=25):
    while j<X_train.shape[1] :
        temp=Grid_search_class(t=j,n_calls=n_calls)
        print(temp)
        j+=1
    return

而这一步是一个循环。

4

1 回答 1

1
sys.setrecursionlimit(10000)

似乎解决了我的问题。

于 2020-06-22T11:08:03.400 回答