3

如何将 PyBullet 中的对象位置转换为像素坐标并使用 PyBullet 和 OpenCV 在框架上画一条线?

我们想这样做是因为 PyBullet 原生addUserDebugLine()函数在 DIRECT 模式下不可用。

import pybullet as p
import numpy as np
import time
import pybullet_data
import cv2


VIDEO_RESOLUTION = (1280, 720)
MY_COLORS = [(255,0,0), (0,255,0), (0,0,255)]
def capture_frame(base_pos=[0,0,0], _cam_dist=3, _cam_yaw=45, _cam_pitch=-45):
        _render_width, _render_height = VIDEO_RESOLUTION
        view_matrix = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=base_pos,
            distance=_cam_dist,
            yaw=_cam_yaw,
            pitch=_cam_pitch,
            roll=0,
            upAxisIndex=2)
        proj_matrix = p.computeProjectionMatrixFOV(
            fov=90, aspect=float(_render_width) / _render_height,
            nearVal=0.01, farVal=100.0)
        (_, _, px, _, _) = p.getCameraImage(
            width=_render_width, height=_render_height, viewMatrix=view_matrix,
            projectionMatrix=proj_matrix, renderer=p.ER_TINY_RENDERER)  # ER_BULLET_HARDWARE_OPENGL)
        rgb_array = np.array(px, dtype=np.uint8)
        rgb_array = np.reshape(rgb_array, (_render_height, _render_width, 4))
        rgb_array = rgb_array[:, :, :3]
        return rgb_array, view_matrix, proj_matrix
def render():
    frame, vmat, pmat = capture_frame()
    p1, cubeOrn = p.getBasePositionAndOrientation(1)
    p2, cubeOrn = p.getBasePositionAndOrientation(2)
    frame, view_matrix,  proj_matrix = capture_frame()
    frame = cv2.resize(frame, VIDEO_RESOLUTION)
    points = {}

    # reshape matrices
    my_order = 'C'
    pmat = np.array(proj_matrix).reshape((4,4), order=my_order)
    vmat = np.array(view_matrix).reshape((4,4), order=my_order)
    fmat = vmat.T @ pmat.T

    # compute origin from origin point in simulation
    origin = np.array([0,0,0,1])
    frame_origin = (fmat @ origin)[:3]*np.array([1280, 640, 0]) + np.array([640, 360, 0])

    # define unit vectors
    unit_vectors = [ np.array([1,0,0,1]),
                     np.array([0,1,0,1]), 
                     np.array([0,0,1,1]) ]

    for col_id, unit_vector in enumerate(unit_vectors):
        cur_point = (fmat @ unit_vector)[:3]*np.array([1280, 640, 0]) + np.array([640, 360, 0])
        cv2.line(frame, (640,360), (int(cur_point[0]),int(cur_point[1])), color=MY_COLORS[col_id], thickness=2)
    cv2.imwrite("my_rendering.jpg", frame)
    print(p1,p2)
if __name__ == '__main__':
    physicsClient = p.connect(p.DIRECT)#or p.DIRECT for non-graphical version
    p.setAdditionalSearchPath(pybullet_data.getDataPath()) #optionally
    p.setGravity(0,0,-10)
    planeId = p.loadURDF("plane.urdf")
    startPos = [1,0,0.2]
    startOrientation = p.getQuaternionFromEuler([0,0,0])
    boxId = p.loadURDF("r2d2.urdf",startPos, startOrientation)
    startPos = [0,2,0.2]
    boxId = p.loadURDF("r2d2.urdf",startPos, startOrientation)
    #set the center of mass frame (loadURDF sets base link frame) startPos/Ornp.resetBasePositionAndOrientation(boxId, startPos, startOrientation)
    for i in range (2400):
        if i == 2399:
            render()
        p.stepSimulation()


    p.disconnect()

预期的输出将是以下帧,但原点坐标系绘制正确。例如,X、Y 和 Z 轴分别用红色、蓝色和绿色着色。

由于两个 R2D2 机器人分别位于 [1,0,0] 和 [0,1,0],我们可以看到坐标系是关闭的。(见下图) 在此处输入图像描述

我们尝试了以下方法:

  • 转置矩阵
  • 不转置矩阵
  • 改变我们计算 fmat 的顺序,例如 pmat @ vmat 而不是 vmat @ pmat 等。

任何帮助表示赞赏。

4

1 回答 1

1

经过一番折腾,我得出了一个解决方案。玩了一会儿,我发现它看起来几乎没问题,除了偏航角给出的轴旋转。因此,我对 computeViewMatrixFromYawPitchRoll 进行了第二次调用,但使用了相反的偏航,以便计算轴的转换。不幸的是,我不确定为什么会这样......但它确实有效!注意:base_pos、_cam_dist、_cam_yaw 和 _cam_pitch 已被替换为 render() 另请注意:向上方向也已反转(不要问为什么...... :-))一个相当混乱的解释,我必须承认......

import pybullet as p
import numpy as np
import time
import pybullet_data
import cv2
import os

VIDEO_RESOLUTION = (1280, 720)
MY_COLORS = [(255,0,0), (0,255,0), (0,0,255)]
K=np.array([[1280,0,0],[0,720,0],[0,0,1]])

def capture_frame(base_pos, _cam_dist, _cam_yaw, _cam_pitch):
        _render_width, _render_height = VIDEO_RESOLUTION
        view_matrix = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=base_pos,
            distance=_cam_dist,
            yaw=_cam_yaw,
            pitch=_cam_pitch,
            roll=0,
            upAxisIndex=2)
        proj_matrix = p.computeProjectionMatrixFOV(
            fov=90, aspect=float(_render_width) / _render_height,
            nearVal=0.01, farVal=100.0)
        (_, _, px, _, _) = p.getCameraImage(
            width=_render_width, height=_render_height, viewMatrix=view_matrix,
            projectionMatrix=proj_matrix, renderer=p.ER_TINY_RENDERER)  # ER_BULLET_HARDWARE_OPENGL)
        rgb_array = np.array(px, dtype=np.uint8)
        rgb_array = np.reshape(rgb_array, (_render_height, _render_width, 4))
        rgb_array = rgb_array[:, :, :3]
        return rgb_array, view_matrix, proj_matrix
def render():
    p1, cubeOrn = p.getBasePositionAndOrientation(1)
    p2, cubeOrn = p.getBasePositionAndOrientation(2)
    base_pos=[0,0,0]
    _cam_dist=3
    _cam_yaw=45
    _cam_pitch=-30
    frame, view_matrix,  proj_matrix = capture_frame(base_pos, _cam_dist, _cam_yaw, _cam_pitch)
    frame = cv2.resize(frame, VIDEO_RESOLUTION)
    points = {}

    # inverse transform
    view_matrix = p.computeViewMatrixFromYawPitchRoll(
        cameraTargetPosition=base_pos,
        distance=_cam_dist,
        yaw=-_cam_yaw,
        pitch=_cam_pitch,
        roll=0,
        upAxisIndex=2)    
    

    my_order = 'C'
    pmat = np.array(proj_matrix).reshape((4,4), order=my_order)
    vmat = np.array(view_matrix).reshape((4,4), order=my_order)

    fmat = pmat @ vmat.T

    # compute origin from origin point in simulation
    origin = np.array([0,0,0,1])
    frame_origin = (fmat @ origin)[:3]*np.array([1280, 720, 0]) + np.array([640, 360, 0])

    # define unit vectors
    unit_vectors = [ np.array([1,0,0,1]),
                     np.array([0,1,0,1]), 
                     np.array([0,0,-1,1]) ]  

    for col_id, unit_vector in enumerate(unit_vectors):
        cur_point = (fmat @ unit_vector)[:3]*np.array([1280, 720, 0]) + np.array([640, 360, 0])
        cv2.line(frame, (640,360), (int(cur_point[0]),int(cur_point[1])), color=MY_COLORS[col_id], thickness=2)
    cv2.imwrite("my_rendering.jpg", frame)
    print(p1,p2)
if __name__ == '__main__':

    physicsClient = p.connect(p.DIRECT)#or p.DIRECT for non-graphical version
    #physicsClient = p.connect(p.GUI)#or p.DIRECT for non-graphical version
    p.setAdditionalSearchPath(pybullet_data.getDataPath()) #optionally
    p.setGravity(0,0,-10)
    planeId = p.loadURDF("plane.urdf")
    #arrows = p.loadURDF("arrows.urdf")

    startPos = [1,0,0.2]
    startOrientation = p.getQuaternionFromEuler([0,0,0])
    boxId = p.loadURDF("r2d2.urdf",startPos, startOrientation)
    startPos = [0,2,0.2]
    boxId = p.loadURDF("r2d2.urdf",startPos, startOrientation)
    #set the center of mass frame (loadURDF sets base link frame) startPos/Ornp.resetBasePositionAndOrientation(boxId, startPos, startOrientation)
    for i in range (2400):
        if i == 2399:
            render()
        p.stepSimulation()

    p.disconnect()

结果如下:最好的问候。 在此处输入图像描述

于 2021-11-15T22:31:28.170 回答