def kl_divergence(p, p_hat):
return (p * K.log(p / p_hat)) + ((1 - p) * K.log((1 - p) / (1 - p_hat)))
class SparseActivityRegularizer(Regularizer):
sparsityBeta = None
def __init__(self, l1=0., l2=0., p=0.01, sparsityBeta=0.1):
self.p = p
self.sparsityBeta = sparsityBeta
def set_layer(self, layer):
self.layer = layer
def __call__(self,loss):
#p_hat needs to be the average activation of the units in the hidden layer.
p_hat = T.sum(T.mean(self.layer.get_output(True) , axis=0))
loss += self.sparsityBeta * kl_divergence(self.p, p_hat)
return loss
def get_config(self):
return {"name": self.__class__.__name__,
"p": self.l1}
当我在模型中调用这个自定义正则化器时,如下所示
dr=0.5
inputs = Input(shape=(392,))
x = Dense(1000,activation='relu',activity_regularizer=SparseActivityRegularizer())(inputs)
x=Dropout(dr)(x)
out= Dense(392, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=out)
model.compile(loss=euc_dist_keras,
optimizer='adadelta', metrics=["accuracy"])
model.summary()
filepath="weightdae.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1,
save_best_only=True, mode='min')
callbacks_list = [checkpoint,TensorBoard(log_dir='/tmp/autoencoder')]
hist = model.fit(ptilde, p,
nb_epoch=40,
shuffle=True,
validation_data=(ptilde_val,p_val),
batch_size=32,
callbacks=callbacks_list)
我收到以下错误
AttributeError: 'SparseActivityRegularizer' object has no attribute 'layer'
有人可以帮我解决这个错误吗?我检查了正则化器的实现,keras 中的活动正则化器也以相同的方式实现。但是在这里它以某种方式找不到属性“层”并引发此错误。