2

我最近一直在研究一种自平衡的双腿,它应该试图防止角色摔倒。每次 AgentReset,腿都会重置其所有必要的因素,例如 pos、rot 和 vel,并且角色下方的地板会随机改变旋转小于 5 度。尽管如此,无论我如何处理代理接受的观察次数,他似乎仍然没有真正从他的错误中吸取教训。现在,我是机器学习的新手,所以放轻松!我错过了什么?谢谢!

一些注意事项:我不太确定 RayPerceptionSensorComponent3D 是如何工作的。如果这可能有帮助,也许有人可以让我朝着正确的方向前进。

培训 检查员图像描述中的代理在这里 张量板数据




代理脚本:

using MLAgents;
using System;
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using MLAgents.Sensor;
using Random = UnityEngine.Random;

public class BalanceAgent : Agent
{
    private BalancingArea area;
    public GameObject floor;
    public GameObject waist;
    public GameObject buttR;
    public GameObject buttL;
    public GameObject thighR;
    public GameObject thighL;
    public GameObject legR;
    public GameObject legL;
    public GameObject footR;
    public GameObject footL;

    public GameObject[] bodyParts = new GameObject[9];
    public HingeJoint[] hingeParts = new HingeJoint[9];
    public JointLimits[] jntLimParts = new JointLimits[9];

    public Vector3[] posStart = new Vector3[9];
    public Vector3[] eulerStart = new Vector3[9];

    public RayPerceptionSensorComponent3D raySensors;

    float rayDist = 0;
    float rayAngle = 0;
    Vector3 rayFloorAngle = new Vector3(0,0,0);
    Vector3 rayPoint = new Vector3(0,0,0);

    int rotAgent = 0;

    public void Start() {
        bodyParts = new GameObject[] { waist, buttR, buttL, thighR, thighL, legR, legL, footR, footL };             //Waist = 0, footL = 8.

        for (int i = 0; i < bodyParts.Length; i++) {
            posStart[i] = bodyParts[i].transform.position;
            eulerStart[i] = bodyParts[i].transform.eulerAngles;
            if (bodyParts[i].GetComponent<HingeJoint>() != null) {
                hingeParts[i] = bodyParts[i].GetComponent<HingeJoint>();
                jntLimParts[i] = hingeParts[i].limits;
            }
        }
    }

    public override void InitializeAgent() {
        base.InitializeAgent();
        area = GetComponentInParent<BalancingArea>();
    }

    public override void AgentReset() {
        floor.transform.eulerAngles = new Vector3(Random.Range(-5, 5), 0, Random.Range(-5, 5));
        print("Reset! - " + rotAgent);
        for (int i = 0; i < bodyParts.Length; i++) {
            bodyParts[i].transform.position = posStart[i];
            bodyParts[i].transform.eulerAngles = eulerStart[i];
            if (bodyParts[i].GetComponent<HingeJoint>() != null) {
                jntLimParts[i].max = 1;
                jntLimParts[i].min = -1;
            }
            bodyParts[i].GetComponent<Rigidbody>().velocity = Vector3.zero;
            bodyParts[i].GetComponent<Rigidbody>().angularVelocity = Vector3.zero;
        }
    }

    public override void AgentAction(float[] vectorAction) {

        float buttRDir = 0;
        int buttRVec = (int)vectorAction[0];
        switch (buttRVec) {
            case 1:
                buttRDir = 0;
                break;
            case 2:
                buttRDir = .2f;
                break;
            case 3:
                buttRDir = -.2f;
                break;
        }
        jntLimParts[1].max += buttRDir;
        jntLimParts[1].min = jntLimParts[1].max - 1;
        hingeParts[1].limits = jntLimParts[1];

        float buttLDir = 0;
        int buttLVec = (int)vectorAction[1];
        switch (buttLVec) {
            case 1:
                buttLDir = 0;
                break;
            case 2:
                buttLDir = .2f;
                break;
            case 3:
                buttLDir = -.2f;
                break;
        }
        jntLimParts[2].max += buttLDir;
        jntLimParts[2].min = jntLimParts[2].max - 1;
        hingeParts[2].limits = jntLimParts[2];

        float thighRDir = 0;
        int thighRVec = (int)vectorAction[2];
        switch (thighRVec) {
            case 1:
                thighRDir = 0;
                break;
            case 2:
                thighRDir = .2f;
                break;
            case 3:
                thighRDir = -.2f;
                break;
        }
        jntLimParts[3].max += thighRDir;
        jntLimParts[3].min = jntLimParts[3].max - 1;
        hingeParts[3].limits = jntLimParts[3];

        float thighLDir = 0;
        int thighLVec = (int)vectorAction[3];
        switch (thighLVec) {
            case 1:
                thighLDir = 0;
                break;
            case 2:
                thighLDir = .2f;
                break;
            case 3:
                thighLDir = -.2f;
                break;
        }
        jntLimParts[4].max += thighLDir;
        jntLimParts[4].min = jntLimParts[4].max - 1;
        hingeParts[4].limits = jntLimParts[4];

        float legRDir = 0;
        int legRVec = (int)vectorAction[4];
        switch (legRVec) {
            case 1:
                legRDir = 0;
                break;
            case 2:
                legRDir = .2f;
                break;
            case 3:
                legRDir = -.2f;
                break;
        }
        jntLimParts[5].max += legRDir;
        jntLimParts[5].min = jntLimParts[5].max - 1;
        hingeParts[5].limits = jntLimParts[5];

        float legLDir = 0;
        int legLVec = (int)vectorAction[5];
        switch (legLVec) {
            case 1:
                legLDir = 0;
                break;
            case 2:
                legLDir = .2f;
                break;
            case 3:
                legLDir = -.2f;
                break;
        }
        jntLimParts[6].max += legLDir;
        jntLimParts[6].min = jntLimParts[6].max - 1;
        hingeParts[6].limits = jntLimParts[6];

        float footRDir = 0;
        int footRVec = (int)vectorAction[6];
        switch (footRVec) {
            case 1:
                footRDir = 0;
                break;
            case 2:
                footRDir = .2f;
                break;
            case 3:
                footRDir = -.2f;
                break;
        }
        jntLimParts[7].max += footRDir;
        jntLimParts[7].min = jntLimParts[7].max - 1;
        hingeParts[7].limits = jntLimParts[7];

        float footLDir = 0;
        int footLVec = (int)vectorAction[7];
        switch (footLVec) {
            case 1:
                footLDir = 0;
                break;
            case 2:
                footLDir = .2f;
                break;
            case 3:
                footLDir = -.2f;
                break;
        }
        jntLimParts[8].max += footLDir;
        jntLimParts[8].min = jntLimParts[8].max - 1;
        hingeParts[8].limits = jntLimParts[8];

        float waistDir = 0;
        int waistVec = (int)vectorAction[8];
        switch (footLVec) {
            case 1:
                waistDir = 0;
                break;
            case 2:
                waistDir = .2f;
                break;
            case 3:
                waistDir = -.2f;
                break;
        }
       // waist.transform.Rotate(0, waistDir, 0);

        //buttR = vectorAction[0]; //Right or none
        //if (buttR == 2) buttR = -1f; //Left

        if (waist.transform.position.y > -1.4f) {
            AddReward(.02f);
        }
        else {
            AddReward(-.03f);
        }

        if (waist.transform.position.y <= -3) {
            Done();
            print("He fell too far...");
        }

        RaycastHit hit;
        Ray r;
        if (Physics.Raycast(waist.transform.position, -waist.transform.up, out hit)) {
            rayDist = hit.distance;
            rayPoint = hit.point;
            rayAngle = Vector3.Angle(waist.transform.position, hit.normal);
            rayFloorAngle = hit.collider.transform.eulerAngles;
        }


    }

    public override void CollectObservations() {

        for(int i = 0; i < bodyParts.Length; i++) {
            AddVectorObs(bodyParts[i].transform.position);
            AddVectorObs(bodyParts[i].transform.eulerAngles);
            AddVectorObs(bodyParts[i].GetComponent<Rigidbody>().velocity);
            AddVectorObs(bodyParts[i].GetComponent<Rigidbody>().angularVelocity);
            AddVectorObs(jntLimParts[i].max);
            AddVectorObs(jntLimParts[i].min);
            AddVectorObs(raySensors);
            AddVectorObs(rayDist);
            AddVectorObs(rayPoint);
            AddVectorObs(rayAngle);
            AddVectorObs(rayFloorAngle);
        }
    }
}




区域脚本:

using MLAgents;
using System.Collections;
using System.Collections.Generic;
using System.Linq;
using UnityEngine;

public class BalancingArea : Area
{
    public List<BalanceAgent> BalanceAgent { get; private set; }
    public BalanceAcademy BalanceAcademy { get; private set; }
    public GameObject area;

    private void Awake() {
        BalanceAgent = transform.GetComponentsInChildren<BalanceAgent>().ToList();              //Grabs all agents in area
        BalanceAcademy = FindObjectOfType<BalanceAcademy>();                //Grabs balance acedemy
    }

    private void Start() {

    }

    public void ResetAgentPosition(BalanceAgent agent) {
        //agent.transform.position = new Vector3(area.transform.position.x, 0, area.transform.position.z);
       // agent.transform.eulerAngles = new Vector3(0,0,0);
    }

    // Update is called once per frame
    void Update()
    {

    }
}




学院剧本:

using MLAgents;
using System.Collections;
using System.Collections.Generic;
using UnityEngine;

public class BalanceAcademy : Academy
{

}
4

5 回答 5

1

我建议查看 ML-Agents github repo 中的Crawler 示例。您可能希望使用与 Crawler 相同的观察结果:“每个肢体的位置、旋转、速度和角速度加上身体的加速度和角加速度。”

我还建议从 Crawler 复制 trainer_config.yaml 设置,以便您获得超参数的最佳起点。

作为旁注,RayPerceptionSensorComponent3D 在某种程度上独立工作,您不需要使用 AddVectorObs() 将其添加到观察中,因为它会在幕后自动执行此操作。您也不再需要为 RayPerception 的空间大小添加任何值。也就是说,您可能根本不想使用 RayPerception 来解决这个问题。

于 2019-12-17T15:09:59.357 回答
0

为什么不尝试使用 transform.positions 设置rewards 并在此腰部接触地面时结束情节(例如检查腰部和地面之间的碰撞)(或)检查腰部和其他部分是否在相同的 transform.position 和 setrewards 相应.

你也可以贴出你的统一项目文件的链接吗?我想看看你在那里做了什么。

于 2020-04-05T19:11:20.277 回答
0

对于 207 的观察空间大小,我强烈建议让您的代理训练时间超过 10k 步。500 万或更多可能是合理的。真的,只需尝试更长时间的训练,例如过夜,看看是否会发生任何不同的情况。一次拥有多个机器学习机器人训练副本以更快地训练也有很大帮助。尝试制作环境的多个副本。(我建议将机器人和飞机放在一个空的游戏对象中,然后复制 8-16 次)您不必做任何特别的事情,只需复制并正常运行,其他副本也会训练。

于 2021-03-10T20:16:24.930 回答
0

我认为人们很难快速遵循您的代码。强化学习(rf)有几个主要问题:

  • 你用什么样的模型来存储你的学习?流行的是 q-tables,还有使用 q-learning 的深度神经网络
  • 你的奖励函数是什么,你使用任何惩罚吗?

对我有帮助的是检查当某事成功时进行了哪些更新以及何时应用负奖励。在这种情况下,还要检查此更新对您的模型意味着什么。它是否也奖励了之前取得成功的步骤?

重要的是模型可以逐步达到目标。例如,此示例的良好奖励将是时间流逝和完美平衡指标的组合。

我希望它有助于将您定位在正确的方向。

顺便说一句,与它所建立的构建块(如监督学习方法)相比,强化学习非常具有挑战性。它也有助于检查它们。我真的可以推荐免费的 fastai 课程,并且是我见过的最重要的机器学习领域的最佳课程。

于 2019-12-17T07:43:05.287 回答
0

把它作为一种可能性扔掉,但你忘了--train输入你的mlagents-learn命令吗?这让我好几次了。

于 2020-06-12T19:26:53.987 回答