0

我知道 Keras 学习率调度程序和 tf.keras.optimizers.schedules.InverseTimeDecay,但它们只将当前时期或仅当前步骤作为参数,我希望我的学习率保持初始到第十个时期例如,然后开始应用逆时间衰减调度程序。有没有办法将时代和当前步骤都作为参数。我已经从 Tensorflow 的源代码中尝试过这个方法来轻松地增加步长和纪元计数器,然后将其作为回调传递,但它似乎并没有改变学习率:

class CustomLearningRateScheduler(tf.keras.callbacks.Callback):

    def __init__(self, initialLearnRate, decay):
        super(CustomLearningRateScheduler, self).__init__()
        self.initialLearnRate = initialLearnRate
        self.totalBatches=0
        self.decay=decay

    #def on_epoch_begin(self, epoch, logs=None):
    def on_train_batch_end(self, batch, logs=None):
        if not hasattr(self.model.optimizer, "lr"):
            raise ValueError('Optimizer must have a "lr" attribute.')
        
        #pdb.set_trace()
        # Get the current learning rate from model's optimizer.
        lr = float(tf.keras.backend.get_value(self.model.optimizer.learning_rate))
        # Call schedule function to get the scheduled learning rate.
        scheduled_lr = self.initialLearnRate/(self.decay*self.totalBatches+1)
        self.totalBatches=self.totalBatches+1
        # Set the value back to the optimizer before this epoch starts
        tf.keras.backend.set_value(self.model.optimizer.lr, scheduled_lr)
        #print("\nEpoch %05d: Learning rate is %6.4f." % (epoch, scheduled_lr))

lrSchedule = CustomLearningRateScheduler(initialLearnRate,decay_rate)
#start of training for 30 epoch
modified_vgg.fit(x=train_batches_seq,
    steps_per_epoch=len(train_batches_seq),
    validation_data=valid_batches_seq,
    validation_steps=len(valid_batches_seq),
    epochs=epochNummber,
    callbacks=[lrSchedule],
    verbose=1
)
4

0 回答 0