2
def kl_divergence(p, p_hat):
    return (p * K.log(p / p_hat)) + ((1 - p) * K.log((1 - p) / (1 - p_hat)))

class SparseActivityRegularizer(Regularizer):
    sparsityBeta = None

def __init__(self, l1=0., l2=0., p=0.01, sparsityBeta=0.1):
    self.p = p
    self.sparsityBeta = sparsityBeta

def set_layer(self, layer):
    self.layer = layer

def __call__(self,loss):
    #p_hat needs to be the average activation of the units in the hidden layer.      
    p_hat = T.sum(T.mean(self.layer.get_output(True) , axis=0))

    loss += self.sparsityBeta * kl_divergence(self.p, p_hat)
    return loss

def get_config(self):
    return {"name": self.__class__.__name__,
        "p": self.l1}

当我在模型中调用这个自定义正则化器时,如下所示

dr=0.5
inputs = Input(shape=(392,))
x = Dense(1000,activation='relu',activity_regularizer=SparseActivityRegularizer())(inputs)
x=Dropout(dr)(x)
out= Dense(392, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=out)



model.compile(loss=euc_dist_keras,
         optimizer='adadelta', metrics=["accuracy"])
model.summary()

filepath="weightdae.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, 
save_best_only=True, mode='min')
callbacks_list = [checkpoint,TensorBoard(log_dir='/tmp/autoencoder')]
hist = model.fit(ptilde, p,
             nb_epoch=40,
             shuffle=True,
             validation_data=(ptilde_val,p_val),
            batch_size=32,
            callbacks=callbacks_list) 

我收到以下错误

AttributeError: 'SparseActivityRegularizer' object has no attribute 'layer'

有人可以帮我解决这个错误吗?我检查了正则化器的实现,keras 中的活动正则化器也以相同的方式实现。但是在这里它以某种方式找不到属性“层”并引发此错误。

4

2 回答 2

1

这种类型的正则化声明在这里已弃用。从Keras 1.2.0 开始,您必须将正则化实现为像这里这样的函数或像这里这样的可调用类

于 2018-08-23T08:27:05.313 回答
0

尝试这个:

class SparseRegularizer(keras.regularizers.Regularizer):
    
    def __init__(self, rho = 0.01,beta = 1):
        """
        rho  : Desired average activation of the hidden units
        beta : Weight of sparsity penalty term
        """
        self.rho = rho
        self.beta = beta
        

    def __call__(self, activation):
        rho = self.rho
        beta = self.beta
        # sigmoid because we need the probability distributions
        activation = tf.nn.sigmoid(activation)
        # average over the batch samples
        rho_bar = K.mean(activation, axis=0)
        # Avoid division by 0
        rho_bar = K.maximum(rho_bar,1e-10) 
        KLs = rho*K.log(rho/rho_bar) + (1-rho)*K.log((1-rho)/(1-rho_bar))
        return beta * K.sum(KLs) # sum over the layer units

    def get_config(self):
        return {
            'rho': self.rho,
            'beta': self.beta
        }
于 2020-11-17T08:57:04.230 回答