3

我有这个自定义回调来在我的自定义矢量化环境中记录奖励,但是奖励一如既往地出现在控制台中 [0] 并且根本没有记录在张量板中

class TensorboardCallback(BaseCallback):
    """
    Custom callback for plotting additional values in tensorboard.
    """

    def __init__(self, verbose=0):
        super(TensorboardCallback, self).__init__(verbose)

    def _on_step(self) -> bool:                
        self.logger.record('reward', self.training_env.get_attr('total_reward'))
        return True

这是主要功能的一部分

model = PPO(
        "MlpPolicy", env,
        learning_rate=3e-4,
        policy_kwargs=policy_kwargs,
        verbose=1,

# as the environment is not serializable, we need to set a new instance of the environment
loaded_model = model = PPO.load("model", env=env)
loaded_model.set_env(env)

# and continue training
loaded_model.learn(1e+6, callback=TensorboardCallback())
        tensorboard_log="./tensorboard/")
4

1 回答 1

1

您需要添加[0]为索引,

所以你写的地方self.logger.record('reward', self.training_env.get_attr('total_reward'))只需要用self.logger.record('reward', self.training_env.get_attr ('total_reward')[0])

class TensorboardCallback(BaseCallback):
    """
    Custom callback for plotting additional values in tensorboard.
    """

    def __init__(self, verbose=0):
        super(TensorboardCallback, self).__init__(verbose)

    def _on_step(self) -> bool:                
        self.logger.record('reward', self.training_env.get_attr('total_reward')[0])

        return True
于 2021-12-25T01:10:38.623 回答