I am new to Keras tuner and I wanted to ask you please about my implementation.
I want to make sure that I have implemented it correctly. My question is in the first dense layer I have specified the input shape and this layer has no optimization parameter as every parameter has been specified. Is it possible to add this layer in the for loop to be optimized? Also is the way of defining the model is correct?
Thanks for your input, and much appreciated :)
def build_model(hp):
model = keras.Sequential()
# Tune the number of layers.
model.add(Dense(input_dim=train_x.shape[1],
activation='relu',units=64))
model.add(BatchNormalization())
model.add(Dropout(rate=0.2))
for i in range(hp.Int("num_layers", 1,6)):
model.add(
layers.Dense(
# Tune number of units separately.
units=hp.Int(f"units_{i}", min_value=32, max_value=512, step=32),
activation=hp.Choice("activation", ["relu", "tanh"]),
)
)
if hp.Boolean("dropout"):
model.add(layers.Dropout(rate=0.25))
model.add(layers.Dense(2, activation="softmax"))
learning_rate = hp.Float("lr", min_value=1e-4, max_value=1e-2, sampling="log")
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=learning_rate),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
return model
tuner = kt.RandomSearch(
hypermodel=build_model,
objective="val_accuracy",
max_trials=4,
executions_per_trial=4,
overwrite=True,
directory="/content/drive/MyDrive/SPDL",
project_name="helloworld",
)
tuner.search(train_x, train_y, epochs=5, validation_split=0.33)