我正在使用g2o
库对一些已知数据进行捆绑调整。
我的函数基于 中的一个OrbSlam2
,并采用两个相机姿势,以及一个std::vector
具有相应 2d 点的 3d 点,或每个相机姿势中的“观察”。
它运行良好,当我使用已知(正确)的相机姿势时,重新投影的点看起来是正确的。然而,当我向第二个相机姿势添加噪声时,它仍然运行良好,并完成了优化,但生成的姿势看起来与嘈杂的输入数据完全相同。完整的功能是:
void BundleAdjustment::Solve(const std::vector<KeyFrame *> &vpKFs, const std::vector<MapPoint *> &allMapPoints,cv::Mat intrinsic, cv::Mat dist,
std::vector<cv::Point2f> &projPnts)
{
dogleg = false;
int64 t0 = cv::getTickCount();
g2o::SparseOptimizer optimizer;
optimizer.setVerbose(false);
std::unique_ptr<g2o::BlockSolver_6_3::LinearSolverType> linearSolver;
if (dense)
{
linearSolver = g2o::make_unique<g2o::LinearSolverDense<g2o::BlockSolver_6_3::PoseMatrixType>>();
}
else
{
linearSolver = g2o::make_unique < g2o::LinearSolverEigen<g2o::BlockSolver_6_3::PoseMatrixType>>();
}
if (dogleg)
{
g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg(
g2o::make_unique<g2o::BlockSolver_6_3>(std::move(linearSolver)));
optimizer.setAlgorithm(solver);
}
else
{
g2o::OptimizationAlgorithmDogleg* solver = new g2o::OptimizationAlgorithmDogleg(
g2o::make_unique<g2o::BlockSolver_6_3>(std::move(linearSolver)));
optimizer.setAlgorithm(solver);
}
int num_cameras_ = vpKFs.size();
int num_points_ = allMapPoints.size();
const float thHuber2D = sqrt(5.99);
const int nExpectedSize = (vpKFs.size() + allMapPoints.size());
vector<g2o::EdgeSE3ProjectXYZ*> vpEdgesMono;
vpEdgesMono.reserve(nExpectedSize);
//camera poses
for (size_t i = 0; i < vpKFs.size(); i++)
{
KeyFrame *pKF = vpKFs[i];
g2o::VertexSE3Expmap *vSE3 = new g2o::VertexSE3Expmap();
g2o::SE3Quat squat;
Eigen::Quaterniond qq(pKF->quat.w(), pKF->quat.x(), pKF->quat.y(), pKF->quat.z());
squat.setRotation(qq);
Eigen::Vector3d pp(pKF->pos.x(), pKF->pos.y(), pKF->pos.z());
if (pKF->id == 1)
{
pp.x() += 1.2; //ADD NOISE
}
squat.setTranslation(pp);
vSE3->setEstimate(squat);
vSE3->setId(pKF->id);
if (pKF->id == 0)
{
vSE3->setFixed(true);
}
optimizer.addVertex(vSE3);
}
// set point vertex
for (size_t i = 0; i < allMapPoints.size(); i++)
{
MapPoint* pMP = allMapPoints[i];
g2o::VertexSBAPointXYZ* vPoint = new g2o::VertexSBAPointXYZ();
g2o::Vector3 pp(pMP->pos.x, pMP->pos.y, pMP->pos.z);
vPoint->setEstimate(pp);
vPoint->setId(i + num_cameras_);
vPoint->setMarginalized(true);
optimizer.addVertex(vPoint);
//
//SET EDGES
const map<KeyFrame*, size_t> observations = pMP->mObs;
for (std::map<KeyFrame *, size_t>::const_iterator mit = observations.begin(); mit != observations.end(); mit++)
{
KeyFrame *pKF = mit->first;
const cv::Point2f &kpUn = pKF->vObs_pos[mit->second];
Eigen::Matrix<double, 2, 1> obs;
obs << kpUn.x, kpUn.y;
g2o::EdgeSE3ProjectXYZ *e = new g2o::EdgeSE3ProjectXYZ();
// g2o::RobustKernelHuber* rk = new g2o::RobustKernelHuber;
// g2o::RobustKernelDCS* rk = new g2o::RobustKernelDCS;
g2o::RobustKernelCauchy* rk = new g2o::RobustKernelCauchy;
rk->setDelta(thHuber2D);
e->setRobustKernel(rk);
const int camera_id = pKF->id;
int point_id = i + num_cameras_;
e->setVertex(0, dynamic_cast<g2o::OptimizableGraph::Vertex *>(optimizer.vertex(point_id)));
e->setVertex(1, dynamic_cast<g2o::OptimizableGraph::Vertex *>(optimizer.vertex(camera_id)));
e->setMeasurement(obs);
e->setInformation(Eigen::Matrix2d::Identity());
e->fx = pKF->fx;
e->fy = pKF->fy;
e->cx = pKF->cx;
e->cy = pKF->cy;
vpEdgesMono.push_back(e);
optimizer.addEdge(e);
}
}
// start optimization
std::cout << "Optimization starts..." << std::endl;
optimizer.initializeOptimization();
optimizer.optimize(20);
std::cout << "Optimization completed!" << std::endl;
std::cout << "Writing camera result" << std::endl;
for (int i = 0; i < num_cameras_; i++)
{
g2o::VertexSE3Expmap *pCamera = dynamic_cast<g2o::VertexSE3Expmap*>(optimizer.vertex(i));
g2o::SE3Quat SE3quat = pCamera->estimate();
g2o::Quaternion gQuat = SE3quat.rotation();
g2o::Vector3 gTrans = SE3quat.translation();
if (i == 1)
{
Eigen::Matrix<double, 3, 3> er(gQuat);
cv::Mat R;
cv::Mat t;
cv::Mat rvec;
eigen2cv(er, R);
eigen2cv(gTrans, t);
cv::Rodrigues(R, rvec);
std::vector<cv::Point3f> Pnts;
for (int x = 1; x < allMapPoints.size(); x++)
{
Pnts.push_back(allMapPoints[x]->pos);
}
cv::projectPoints(Pnts, rvec, t, intrinsic, dist, projPnts);
std::cout << i << " " << gTrans.x() << " " << gTrans.y() << " " << gTrans.z() << " " << gQuat.x() << " " << gQuat.y() << " " << gQuat.z() << " " << gQuat.w() << std::endl;
}
}
}
我哪里错了?我的数据没有优化吗?还是我错误地读回了数据?