1

我正在进行一个研究项目,我需要在拾取和放置任务中实时估计物体的 6DOF 姿势。必须实时估计姿势,并且对象可以一个在另一个之上并且相同,因此我必须获得顶部对象的位置和方向。问题是物体是相同的(PPVC块,在建筑领域),但好的是它们的形状很规则。

那么如何将RGBD Camera的单张2D点云图像中已知的3D CAD模型与顶部的物体进行比较,从而可以实时得到6DOF位姿。可以使用任何方法,但必须非常准确,因为需要非常高的精度。

我知道可以使用深度学习来定位对象,然后与 3D CAD 模型进行比较或处理点云(分割,如果需要聚类,PCA...),但是如果深度学习失败并且无法检测到怎么办RGBD 相机的对象?那我有什么选择?OpenCv中的表面匹配算法https://docs.opencv.org/3.0-beta/modules/surface_matching/doc/surface_matching.html是一个选项吗?

我从 PCL 开始,将 3D CAD 对象模型与实际对象使用 ICP 匹配的 PCL 场景进行比较如何使用迭代最近点

我的示例代码在这里

void cloud_cb(const sensor_msgs::PointCloud2::ConstPtr &msg){


   // Convert to pcl point cloud
   pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloudIn (new pcl::PointCloud<pcl::PointXYZRGB>);
   pcl::fromROSMsg(*msg,*cloudIn);

   //Deklaration cloud pointers
   pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloudOut (new pcl::PointCloud<pcl::PointXYZRGB>);
   pcl::PointCloud<pcl::PointXYZRGB> finalCloud;
   pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloudOut_new (new pcl::PointCloud<pcl::PointXYZRGB>) ;


   //Load CAD file
   if(pcl::io::loadPLYFile ("/home/admini/darknet_ros/src/darknet_ros/gb_visual_detection_3d/darknet_ros_3d/darknet_ros_3d/src/rs_box.ply", *cloudOut)==-1)
   {
     PCL_ERROR ("Couldn't read second input file! \n");
    // return (-1);
   }


  //std::cout << "\nLoaded file "  << " (" << cloudOut->size () << " points) in " << time.toc () << " ms\n" << std::endl;
  
  pcl::IterativeClosestPoint<pcl::PointXYZRGB, pcl::PointXYZRGB> icp;
  icp.setInputCloud(cloudOut);
  icp.setInputTarget(cloudIn);
  icp.setMaximumIterations (30);// 30
  icp.setTransformationEpsilon (1e-9);
  icp.setMaxCorrespondenceDistance (2);//2
  icp.setEuclideanFitnessEpsilon (0.6);// 0.0001
  icp.setRANSACOutlierRejectionThreshold (80);//20
  //icp.setRANSACIterations(800);//800

  icp.align(finalCloud);

  if (icp.hasConverged())
  {
      std::cout << "ICP converged." << std::endl
                << "The score is " << icp.getFitnessScore() << std::endl;
      std::cout << "Transformation matrix:" << std::endl;
      std::cout << icp.getFinalTransformation() << std::endl;
  }
  else std::cout << "ICP did not converge." << std::endl;

  Eigen::Matrix4f transformationMatrix = icp.getFinalTransformation ();
  std::cout<<"trans %n"<<transformationMatrix<<std::endl;

  //pcl::transformPointCloud( *cloudOut, *cloudOut_new, transformationMatrix);
  pcl::transformPointCloud (finalCloud, *cloudOut_new, transformationMatrix);

   //sensor_msgs::PointCloud2 cloud_icp;
   //pcl::toROSMsg (*cloudOut , cloud_icp);
   //cloudOut ->header.frame_id = "/zed_left_camera_frame";
   //pub.publish (cloud_icp);


   //Covert rotation matrix to quaternion
   //First convert Transforamtion Matrix to Rotation Matrix
    //Eigen::Matrix3f Rotation_matrix = transformationMatrix.topLeftCorner<3,3>();
    /*Eigen::Matrix3d rotation_matrix;
    rotation_matrix (0, 0) = transformationMatrix (0,0);
    rotation_matrix (0, 1) = transformationMatrix (0,1);
    rotation_matrix (0, 2) = transformationMatrix (0,2);
    rotation_matrix (1, 0) = transformationMatrix (1,0);
    rotation_matrix (1, 1) = transformationMatrix (1,1);
    rotation_matrix (1, 2) = transformationMatrix (1,2);
    rotation_matrix (2, 0) = transformationMatrix (2,0);
    rotation_matrix (2, 1) = transformationMatrix (2,1);
    rotation_matrix (2, 2) = transformationMatrix (2,2);*/

    //Convert Rotation Matrix to Quaternion
    //Quaternionf q(Rotation_matrix);
    Eigen::Matrix3f rot_matrix = transformationMatrix.topLeftCorner<3,3>();
    //Eigen::Quaternionf q;
    Eigen::Quaternionf q(rot_matrix);
    q.normalized();
    //Eigen::Quaternionf q.normalized(transformationMatrix.topLeftCorner<3,3>());

    cout << " postion x  = " << transformationMatrix(0,3) <<  endl;
    cout << " postion y  = " << transformationMatrix(1,3) <<  endl;
    cout << " postion z  = " << transformationMatrix(2,3) <<  endl;
    
   //Convert Quaternion to Euler angle
    double roll, pitch, yaw;
    double PI=3.1415926535;
    auto euler = q.toRotationMatrix().eulerAngles(0,1,2);
    std::cout << "Euler from quaternion in roll, pitch, yaw (degree)"<< std::endl << euler*180/PI << std::endl;

  


   //Visualisation Marker
    std::string ns; 
    float r; 
    float g; 
    float b;
    visualization_msgs::MarkerArray msg_marker;
    visualization_msgs::Marker bbx_marker;
    bbx_marker.header.frame_id = "zed_left_camera_frame";
    bbx_marker.header.stamp = ros::Time::now();
    bbx_marker.ns = ns;
    bbx_marker.type = visualization_msgs::Marker::CUBE;
    bbx_marker.action = visualization_msgs::Marker::ADD;
    bbx_marker.pose.position.x =  transformationMatrix(0,3);
    bbx_marker.pose.position.y =  transformationMatrix(1,3);;
    bbx_marker.pose.position.z =  transformationMatrix(2,3);;
    bbx_marker.pose.orientation.x = (90/PI)*q.x();///2
    bbx_marker.pose.orientation.y = -(360/PI)*q.y();//-2*
    bbx_marker.pose.orientation.z = -(360/PI)*q.z();//-2*
    bbx_marker.pose.orientation.w = q.w();
    bbx_marker.scale.x = 0.09;//0.13
    bbx_marker.scale.y = 0.22;//0.34
    bbx_marker.scale.z = 0.21;//0.25
    bbx_marker.color.b = 0.3*b;
    bbx_marker.color.g = 0.1*g;
    bbx_marker.color.r = 0.1*r;
    bbx_marker.color.a = 0.8;
    bbx_marker.lifetime = ros::Duration();
    msg_marker.markers.push_back(bbx_marker);
    markers_pub_.publish(msg_marker);


}

int
main (int argc, char** argv)
{
  // Initialize ROS
  ros::init (argc, argv, "my_pcl_tutorial");
  ros::NodeHandle nh;
  // Create a ROS subscriber for the input point cloud
  ros::Subscriber sub = nh.subscribe ("/zed/zed_node/point_cloud/cloud_registered", 200, cloud_cb);
  // Create a ROS publisher for the output point cloud
  pub = nh.advertise<sensor_msgs::PointCloud2> ("cloud_icp", 100);
  markers_pub_ = nh.advertise<visualization_msgs::MarkerArray> ("msg_marker", 200);
  ros::spin();

}

但是代码和算法中有两点需要进一步改进。首先,可以更准确。其次,需要提高更新率,假设目标是 25-30fps 左右。有关如何实现这两个进一步改进点的任何帮助?

谢谢

4

1 回答 1

0

本质上,有两个主要步骤:

  1. 分割:从场景中提取对象列表
  2. 识别:将提取的对象与数据库中的已知对象进行匹配

PCL 库中已经有几种方法可以用于这两个步骤。

看一下本教程作为初学者,其中包括两个步骤。

https://pcl.readthedocs.io/projects/tutorials/en/latest/correspondence_grouping.html#correspondence-grouping

于 2020-07-08T14:36:44.390 回答