orb_slam3_details/src/LocalMapping.cc

1702 lines
67 KiB
C++
Raw Blame History

This file contains ambiguous Unicode characters!

This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.

/**
* This file is part of ORB-SLAM3
*
* Copyright (C) 2017-2020 Carlos Campos, Richard Elvira, Juan J. Gómez Rodríguez, José M.M. Montiel and Juan D. Tardós, University of Zaragoza.
* Copyright (C) 2014-2016 Raúl Mur-Artal, José M.M. Montiel and Juan D. Tardós, University of Zaragoza.
*
* ORB-SLAM3 is free software: you can redistribute it and/or modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* ORB-SLAM3 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
* the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with ORB-SLAM3.
* If not, see <http://www.gnu.org/licenses/>.
*/
#include "LocalMapping.h"
#include "LoopClosing.h"
#include "ORBmatcher.h"
#include "Optimizer.h"
#include "Converter.h"
#include<mutex>
#include<chrono>
namespace ORB_SLAM3
{
LocalMapping::LocalMapping(System* pSys, Atlas *pAtlas, const float bMonocular, bool bInertial, const string &_strSeqName):
mpSystem(pSys), mbMonocular(bMonocular), mbInertial(bInertial), mbResetRequested(false), mbResetRequestedActiveMap(false), mbFinishRequested(false), mbFinished(true), mpAtlas(pAtlas), bInitializing(false),
mbAbortBA(false), mbStopped(false), mbStopRequested(false), mbNotStop(false), mbAcceptKeyFrames(true),
mbNewInit(false), mIdxInit(0), mScale(1.0), mInitSect(0), mbNotBA1(true), mbNotBA2(true), mIdxIteration(0), infoInertial(Eigen::MatrixXd::Zero(9,9))
{
/*
* mbStopRequested 外部线程调用为true表示外部线程请求停止 local mapping
* mbStopped 为true表示可以并终止localmapping 线程
* mbNotStop true表示不要停止 localmapping 线程,因为要插入关键帧了。需要和 mbStopped 结合使用
* mbAcceptKeyFrames true允许接受关键帧。tracking 和local mapping 之间的关键帧调度
* mbAbortBA 是否流产BA优化的标志位
* mbFinishRequested 请求终止当前线程的标志。注意只是请求,不一定终止。终止要看 mbFinished
* mbResetRequested 请求当前线程复位的标志。true表示一直请求复位但复位还未完成表示复位完成为false
* mbFinished 判断最终LocalMapping::Run() 是否完成的标志。
*/
mnMatchesInliers = 0;
mbBadImu = false;
mTinit = 0.f;
mNumLM = 0;
mNumKFCulling=0;
//DEBUG: times and data from LocalMapping in each frame
strSequence = "";//_strSeqName;
//f_lm.open("localMapping_times" + strSequence + ".txt");
/*f_lm.open("localMapping_times.txt");
f_lm << "# Timestamp KF, Num CovKFs, Num KFs, Num RecentMPs, Num MPs, processKF, MPCulling, CreateMP, SearchNeigh, BA, KFCulling, [numFixKF_LBA]" << endl;
f_lm << fixed;*/
}
// 设置回环检测线程句柄
void LocalMapping::SetLoopCloser(LoopClosing* pLoopCloser)
{
mpLoopCloser = pLoopCloser;
}
// 设置追踪线程句柄
void LocalMapping::SetTracker(Tracking *pTracker)
{
mpTracker=pTracker;
}
// 线程主函数
void LocalMapping::Run()
{
// 标记状态表示当前run函数正在运行尚未结束
mbFinished = false;
// 主循环
while(1)
{
// Tracking will see that Local Mapping is busy
// Step 1 告诉TrackingLocalMapping正处于繁忙状态请不要给我发送关键帧打扰我
// LocalMapping线程处理的关键帧都是Tracking线程发过的
SetAcceptKeyFrames(false);
// Check if there are keyframes in the queue
if(CheckNewKeyFrames() && !mbBadImu)
{
// std::cout << "LM" << std::endl;
std::chrono::steady_clock::time_point t0 = std::chrono::steady_clock::now();
// BoW conversion and insertion in Map
// Step 2 处理列表中的关键帧包括计算BoW、更新观测、描述子、共视图插入到地图等
ProcessNewKeyFrame();
std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now();
// Check recent MapPoints
// Step 3 根据地图点的观测情况剔除质量不好的地图点
MapPointCulling();
std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now();
// Triangulate new MapPoints
// Step 4 当前关键帧与相邻关键帧通过三角化产生新的地图点,使得跟踪更稳
CreateNewMapPoints();
std::chrono::steady_clock::time_point t3 = std::chrono::steady_clock::now();
// Save here:
// # Cov KFs
// # tot Kfs
// # recent added MPs
// # tot MPs
// # localMPs in LBA
// # fixedKFs in LBA
mbAbortBA = false;
// 已经处理完队列中的最后的一个关键帧
if(!CheckNewKeyFrames())
{
// Find more matches in neighbor keyframes and fuse point duplications
// Step 5 检查并融合当前关键帧与相邻关键帧帧(两级相邻)中重复的地图点
SearchInNeighbors();
}
std::chrono::steady_clock::time_point t4 = std::chrono::steady_clock::now();
std::chrono::steady_clock::time_point t5 = t4, t6 = t4;
// mbAbortBA = false;
//DEBUG--
/*f_lm << setprecision(0);
f_lm << mpCurrentKeyFrame->mTimeStamp*1e9 << ",";
f_lm << mpCurrentKeyFrame->GetVectorCovisibleKeyFrames().size() << ",";
f_lm << mpCurrentKeyFrame->GetMap()->GetAllKeyFrames().size() << ",";
f_lm << mlpRecentAddedMapPoints.size() << ",";
f_lm << mpCurrentKeyFrame->GetMap()->GetAllMapPoints().size() << ",";*/
//--
int num_FixedKF_BA = 0;
// 已经处理完队列中的最后的一个关键帧并且闭环检测没有请求停止LocalMapping
if(!CheckNewKeyFrames() && !stopRequested())
{
if(mpAtlas->KeyFramesInMap()>2)
{
if(mbInertial && mpCurrentKeyFrame->GetMap()->isImuInitialized())
{
float dist = cv::norm(mpCurrentKeyFrame->mPrevKF->GetCameraCenter() - mpCurrentKeyFrame->GetCameraCenter()) +
cv::norm(mpCurrentKeyFrame->mPrevKF->mPrevKF->GetCameraCenter() - mpCurrentKeyFrame->mPrevKF->GetCameraCenter());
if(dist>0.05)
mTinit += mpCurrentKeyFrame->mTimeStamp - mpCurrentKeyFrame->mPrevKF->mTimeStamp;
if(!mpCurrentKeyFrame->GetMap()->GetIniertialBA2())
{
if((mTinit<10.f) && (dist<0.02))
{
cout << "Not enough motion for initializing. Reseting..." << endl;
unique_lock<mutex> lock(mMutexReset);
mbResetRequestedActiveMap = true;
mpMapToReset = mpCurrentKeyFrame->GetMap();
mbBadImu = true;
}
}
bool bLarge = ((mpTracker->GetMatchesInliers()>75)&&mbMonocular)||((mpTracker->GetMatchesInliers()>100)&&!mbMonocular);
Optimizer::LocalInertialBA(mpCurrentKeyFrame, &mbAbortBA, mpCurrentKeyFrame->GetMap(), bLarge, !mpCurrentKeyFrame->GetMap()->GetIniertialBA2());
}
else
{
std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now();
// 注意这里的第二个参数是按地址传递的,当这里的 mbAbortBA 状态发生变化时,能够及时执行/停止BA
Optimizer::LocalBundleAdjustment(mpCurrentKeyFrame,&mbAbortBA, mpCurrentKeyFrame->GetMap(),num_FixedKF_BA);
std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
}
}
t5 = std::chrono::steady_clock::now();
// Initialize IMU here
if(!mpCurrentKeyFrame->GetMap()->isImuInitialized() && mbInertial)
{
if (mbMonocular)
InitializeIMU(1e2, 1e10, true);
else
InitializeIMU(1e2, 1e5, true);
}
// Check redundant local Keyframes
KeyFrameCulling();
t6 = std::chrono::steady_clock::now();
if ((mTinit<100.0f) && mbInertial)
{
if(mpCurrentKeyFrame->GetMap()->isImuInitialized() && mpTracker->mState==Tracking::OK) // Enter here everytime local-mapping is called
{
if(!mpCurrentKeyFrame->GetMap()->GetIniertialBA1()){
if (mTinit>5.0f)
{
cout << "start VIBA 1" << endl;
mpCurrentKeyFrame->GetMap()->SetIniertialBA1();
if (mbMonocular)
InitializeIMU(1.f, 1e5, true); // 1.f, 1e5
else
InitializeIMU(1.f, 1e5, true); // 1.f, 1e5
cout << "end VIBA 1" << endl;
}
}
//else if (mbNotBA2){
else if(!mpCurrentKeyFrame->GetMap()->GetIniertialBA2()){
if (mTinit>15.0f){ // 15.0f
cout << "start VIBA 2" << endl;
mpCurrentKeyFrame->GetMap()->SetIniertialBA2();
if (mbMonocular)
InitializeIMU(0.f, 0.f, true); // 0.f, 0.f
else
InitializeIMU(0.f, 0.f, true);
cout << "end VIBA 2" << endl;
}
}
// scale refinement
if (((mpAtlas->KeyFramesInMap())<=100) &&
((mTinit>25.0f && mTinit<25.5f)||
(mTinit>35.0f && mTinit<35.5f)||
(mTinit>45.0f && mTinit<45.5f)||
(mTinit>55.0f && mTinit<55.5f)||
(mTinit>65.0f && mTinit<65.5f)||
(mTinit>75.0f && mTinit<75.5f))){
cout << "start scale ref" << endl;
if (mbMonocular)
ScaleRefinement();
cout << "end scale ref" << endl;
}
}
}
}
std::chrono::steady_clock::time_point t7 = std::chrono::steady_clock::now();
// Step 8 将当前帧加入到闭环检测队列中
mpLoopCloser->InsertKeyFrame(mpCurrentKeyFrame);
std::chrono::steady_clock::time_point t8 = std::chrono::steady_clock::now();
double t_procKF = std::chrono::duration_cast<std::chrono::duration<double,std::milli> >(t1 - t0).count();
double t_MPcull = std::chrono::duration_cast<std::chrono::duration<double,std::milli> >(t2 - t1).count();
double t_CheckMP = std::chrono::duration_cast<std::chrono::duration<double,std::milli> >(t3 - t2).count();
double t_searchNeigh = std::chrono::duration_cast<std::chrono::duration<double,std::milli> >(t4 - t3).count();
double t_Opt = std::chrono::duration_cast<std::chrono::duration<double,std::milli> >(t5 - t4).count();
double t_KF_cull = std::chrono::duration_cast<std::chrono::duration<double,std::milli> >(t6 - t5).count();
double t_Insert = std::chrono::duration_cast<std::chrono::duration<double,std::milli> >(t8 - t7).count();
//DEBUG--
/*f_lm << setprecision(6);
f_lm << t_procKF << ",";
f_lm << t_MPcull << ",";
f_lm << t_CheckMP << ",";
f_lm << t_searchNeigh << ",";
f_lm << t_Opt << ",";
f_lm << t_KF_cull << ",";
f_lm << setprecision(0) << num_FixedKF_BA << "\n";*/
//--
}
else if(Stop() && !mbBadImu)
{
// Safe area to stop
while(isStopped() && !CheckFinish())
{
// cout << "LM: usleep if is stopped" << endl;
// 如果还没有结束利索,那么等
usleep(3000);
}
// 然后确定终止了就跳出这个线程的主循环
if(CheckFinish())
break;
}
// 查看是否有复位线程的请求
ResetIfRequested();
// Tracking will see that Local Mapping is busy
SetAcceptKeyFrames(true);
// 如果当前线程已经结束了就跳出主循环
if(CheckFinish())
break;
// cout << "LM: normal usleep" << endl;
usleep(3000);
}
// 设置线程已经终止
SetFinish();
}
// 插入关键帧,由外部Tracking线程调用;这里只是插入到列表中,等待线程主函数对其进行处理
void LocalMapping::InsertKeyFrame(KeyFrame *pKF)
{
unique_lock<mutex> lock(mMutexNewKFs);
// 将关键帧插入到列表中
mlNewKeyFrames.push_back(pKF);
mbAbortBA=true;
}
// 查看列表中是否有等待被插入的关键帧,
bool LocalMapping::CheckNewKeyFrames()
{
unique_lock<mutex> lock(mMutexNewKFs);
return(!mlNewKeyFrames.empty());
}
/**
* @brief 处理列表中的关键帧包括计算BoW、更新观测、描述子、共视图插入到地图等
*
*/
void LocalMapping::ProcessNewKeyFrame()
{
// Step 1从缓冲队列中取出一帧关键帧
// 该关键帧队列是Tracking线程向LocalMapping中插入的关键帧组成
{
unique_lock<mutex> lock(mMutexNewKFs);
// 取出列表中最前面的关键帧,作为当前要处理的关键帧
mpCurrentKeyFrame = mlNewKeyFrames.front();
// 取出最前面的关键帧后,在原来的列表里删掉该关键帧
mlNewKeyFrames.pop_front();
}
// Compute Bags of Words structures
// Step 2计算该关键帧特征点的Bow信息
mpCurrentKeyFrame->ComputeBoW();
// Associate MapPoints to the new keyframe and update normal and descriptor
// Step 3当前处理关键帧中有效的地图点更新normal描述子等信息
// TrackLocalMap中和当前帧新匹配上的地图点和当前关键帧进行关联绑定
const vector<MapPoint*> vpMapPointMatches = mpCurrentKeyFrame->GetMapPointMatches();
// 对当前处理的这个关键帧中的所有的地图点展开遍历
for(size_t i=0; i<vpMapPointMatches.size(); i++)
{
MapPoint* pMP = vpMapPointMatches[i];
if(pMP)
{
if(!pMP->isBad())
{
if(!pMP->IsInKeyFrame(mpCurrentKeyFrame))
{
// 如果地图点不是来自当前帧的观测,为当前地图点添加观测
pMP->AddObservation(mpCurrentKeyFrame, i);
// 获得该点的平均观测方向和观测距离范围
pMP->UpdateNormalAndDepth();
// 更新地图点的最佳描述子
pMP->ComputeDistinctiveDescriptors();
}
else // this can only happen for new stereo points inserted by the Tracking
{
// 如果当前帧中已经包含了这个地图点,但是这个地图点中却没有包含这个关键帧的信息
// 这些地图点可能来自双目或RGBD跟踪过程中新生成的地图点或者是CreateNewMapPoints 中通过三角化产生
// 将上述地图点放入mlpRecentAddedMapPoints等待后续MapPointCulling函数的检验
mlpRecentAddedMapPoints.push_back(pMP);
}
}
}
}
// Update links in the Covisibility Graph
// Step 4更新关键帧间的连接关系共视图
mpCurrentKeyFrame->UpdateConnections();
// Insert Keyframe in Map
// Step 5将该关键帧插入到地图中
mpAtlas->AddKeyFrame(mpCurrentKeyFrame);
}
void LocalMapping::EmptyQueue()
{
while(CheckNewKeyFrames())
ProcessNewKeyFrame();
}
/**
* @brief 检查新增地图点,根据地图点的观测情况剔除质量不好的新增的地图点
* mlpRecentAddedMapPoints存储新增的地图点这里是要删除其中不靠谱的
*/
void LocalMapping::MapPointCulling()
{
// Check Recent Added MapPoints
list<MapPoint*>::iterator lit = mlpRecentAddedMapPoints.begin();
const unsigned long int nCurrentKFid = mpCurrentKeyFrame->mnId;
// Step 1根据相机类型设置不同的观测阈值
int nThObs;
if(mbMonocular)
nThObs = 2;
else
nThObs = 3; // MODIFICATION_STEREO_IMU here 3
const int cnThObs = nThObs;
int borrar = mlpRecentAddedMapPoints.size();
// Step 2遍历检查的新添加的MapPoints
while(lit!=mlpRecentAddedMapPoints.end())
{
MapPoint* pMP = *lit;
if(pMP->isBad())
// Step 2.1已经是坏点的MapPoints直接从检查链表中删除
lit = mlpRecentAddedMapPoints.erase(lit);
else if(pMP->GetFoundRatio()<0.25f)
{
// Step 2.2跟踪到该MapPoint的Frame数相比预计可观测到该MapPoint的Frame数的比例小于25%,删除
// (mnFound/mnVisible < 25%
// mnFound :地图点被多少帧(包括普通帧)看到,次数越多越好
// mnVisible地图点应该被看到的次数
// (mnFound/mnVisible对于大FOV镜头这个比例会高对于窄FOV镜头这个比例会低
pMP->SetBadFlag();
lit = mlpRecentAddedMapPoints.erase(lit);
}
else if(((int)nCurrentKFid-(int)pMP->mnFirstKFid)>=2 && pMP->Observations()<=cnThObs)
{
// Step 2.3从该点建立开始到现在已经过了不小于2个关键帧
// 但是观测到该点的关键帧数却不超过cnThObs帧那么删除该点
pMP->SetBadFlag();
lit = mlpRecentAddedMapPoints.erase(lit);
}
else if(((int)nCurrentKFid-(int)pMP->mnFirstKFid)>=3)
// Step 2.4从建立该点开始已经过了3个关键帧而没有被剔除则认为是质量高的点
// 因此没有SetBadFlag()仅从队列中删除放弃继续对该MapPoint的检测
lit = mlpRecentAddedMapPoints.erase(lit);
else
{
lit++;
borrar--;
}
}
//cout << "erase MP: " << borrar << endl;
}
/**
* @brief 用当前关键帧与相邻关键帧通过三角化产生新的地图点,使得跟踪更稳
*
*/
void LocalMapping::CreateNewMapPoints()
{
// Retrieve neighbor keyframes in covisibility graph
// nn表示搜索最佳共视关键帧的数目
// 不同传感器下要求不一样,单目的时候需要有更多的具有较好共视关系的关键帧来建立地图
int nn = 10;
// For stereo inertial case
if(mbMonocular)
nn=20;
// Step 1在当前关键帧的共视关键帧中找到共视程度最高的nn帧相邻关键帧
vector<KeyFrame*> vpNeighKFs = mpCurrentKeyFrame->GetBestCovisibilityKeyFrames(nn);
if (mbInertial)
{
KeyFrame* pKF = mpCurrentKeyFrame;
int count=0;
while((vpNeighKFs.size()<=nn)&&(pKF->mPrevKF)&&(count++<nn))
{
vector<KeyFrame*>::iterator it = std::find(vpNeighKFs.begin(), vpNeighKFs.end(), pKF->mPrevKF);
if(it==vpNeighKFs.end())
vpNeighKFs.push_back(pKF->mPrevKF);
pKF = pKF->mPrevKF;
}
}
float th = 0.6f;
// 特征点匹配配置 最佳距离 < 0.6*次佳距离,比较苛刻了。不检查旋转
ORBmatcher matcher(th,false);
// 取出当前帧从世界坐标系到相机坐标系的变换矩阵
cv::Mat Rcw1 = mpCurrentKeyFrame->GetRotation();
cv::Mat Rwc1 = Rcw1.t();
cv::Mat tcw1 = mpCurrentKeyFrame->GetTranslation();
cv::Mat Tcw1(3,4,CV_32F);
Rcw1.copyTo(Tcw1.colRange(0,3));
tcw1.copyTo(Tcw1.col(3));
// 得到当前关键帧(左目)光心在世界坐标系中的坐标、内参
cv::Mat Ow1 = mpCurrentKeyFrame->GetCameraCenter();
const float &fx1 = mpCurrentKeyFrame->fx;
const float &fy1 = mpCurrentKeyFrame->fy;
const float &cx1 = mpCurrentKeyFrame->cx;
const float &cy1 = mpCurrentKeyFrame->cy;
const float &invfx1 = mpCurrentKeyFrame->invfx;
const float &invfy1 = mpCurrentKeyFrame->invfy;
// 用于后面的点深度的验证;这里的1.5是经验值
const float ratioFactor = 1.5f*mpCurrentKeyFrame->mfScaleFactor;
// Search matches with epipolar restriction and triangulate
// Step 2遍历相邻关键帧vpNeighKFs
for(size_t i=0; i<vpNeighKFs.size(); i++)
{
// 下面的过程会比较耗费时间,因此如果有新的关键帧需要处理的话,就先去处理新的关键帧吧
if(i>0 && CheckNewKeyFrames())// && (mnMatchesInliers>50))
return;
KeyFrame* pKF2 = vpNeighKFs[i];
GeometricCamera* pCamera1 = mpCurrentKeyFrame->mpCamera, *pCamera2 = pKF2->mpCamera;
// Check first that baseline is not too short
// 邻接的关键帧光心在世界坐标系中的坐标
cv::Mat Ow2 = pKF2->GetCameraCenter();
// 基线向量,两个关键帧间的相机位移
cv::Mat vBaseline = Ow2-Ow1;
// 基线长度
const float baseline = cv::norm(vBaseline);
// Step 3判断相机运动的基线是不是足够长
if(!mbMonocular)
{
// 如果是双目相机关键帧间距小于本身的基线时不生成3D点
// 因为太短的基线下能够恢复的地图点不稳定
if(baseline<pKF2->mb)
continue;
}
else
{
// 单目相机情况
// 邻接关键帧的场景深度中值
const float medianDepthKF2 = pKF2->ComputeSceneMedianDepth(2);
// baseline与景深的比例
const float ratioBaselineDepth = baseline/medianDepthKF2;
// 如果比例特别小基线太短恢复3D点不准那么跳过当前邻接的关键帧不生成3D点
if(ratioBaselineDepth<0.01)
continue;
}
// Compute Fundamental Matrix
// Step 4根据两个关键帧的位姿计算它们之间的基础矩阵
cv::Mat F12 = ComputeF12(mpCurrentKeyFrame,pKF2);
// Search matches that fullfil epipolar constraint
// Step 5通过BoW对两关键帧的未匹配的特征点快速匹配用极线约束抑制离群点生成新的匹配点对
vector<pair<size_t,size_t> > vMatchedIndices;
bool bCoarse = mbInertial &&
((!mpCurrentKeyFrame->GetMap()->GetIniertialBA2() && mpCurrentKeyFrame->GetMap()->GetIniertialBA1())||
mpTracker->mState==Tracking::RECENTLY_LOST);
matcher.SearchForTriangulation(mpCurrentKeyFrame,pKF2,F12,vMatchedIndices,false,bCoarse);
cv::Mat Rcw2 = pKF2->GetRotation();
cv::Mat Rwc2 = Rcw2.t();
cv::Mat tcw2 = pKF2->GetTranslation();
cv::Mat Tcw2(3,4,CV_32F);
Rcw2.copyTo(Tcw2.colRange(0,3));
tcw2.copyTo(Tcw2.col(3));
const float &fx2 = pKF2->fx;
const float &fy2 = pKF2->fy;
const float &cx2 = pKF2->cx;
const float &cy2 = pKF2->cy;
const float &invfx2 = pKF2->invfx;
const float &invfy2 = pKF2->invfy;
// Triangulate each match
// Step 6对每对匹配通过三角化生成3D点,和 Triangulate函数差不多
const int nmatches = vMatchedIndices.size();
for(int ikp=0; ikp<nmatches; ikp++)
{
// Step 6.1:取出匹配特征点
// 当前匹配对在当前关键帧中的索引
const int &idx1 = vMatchedIndices[ikp].first;
// 当前匹配对在邻接关键帧中的索引
const int &idx2 = vMatchedIndices[ikp].second;
// 当前匹配在当前关键帧中的特征点
const cv::KeyPoint &kp1 = (mpCurrentKeyFrame -> NLeft == -1) ? mpCurrentKeyFrame->mvKeysUn[idx1]
: (idx1 < mpCurrentKeyFrame -> NLeft) ? mpCurrentKeyFrame -> mvKeys[idx1]
: mpCurrentKeyFrame -> mvKeysRight[idx1 - mpCurrentKeyFrame -> NLeft];
const float kp1_ur=mpCurrentKeyFrame->mvuRight[idx1];
bool bStereo1 = (!mpCurrentKeyFrame->mpCamera2 && kp1_ur>=0);
const bool bRight1 = (mpCurrentKeyFrame -> NLeft == -1 || idx1 < mpCurrentKeyFrame -> NLeft) ? false
: true;
// 当前匹配在邻接关键帧中的特征点
const cv::KeyPoint &kp2 = (pKF2 -> NLeft == -1) ? pKF2->mvKeysUn[idx2]
: (idx2 < pKF2 -> NLeft) ? pKF2 -> mvKeys[idx2]
: pKF2 -> mvKeysRight[idx2 - pKF2 -> NLeft];
// mvuRight中存放着双目的深度值如果不是双目其值将为-1
const float kp2_ur = pKF2->mvuRight[idx2];
bool bStereo2 = (!pKF2->mpCamera2 && kp2_ur>=0);
const bool bRight2 = (pKF2 -> NLeft == -1 || idx2 < pKF2 -> NLeft) ? false
: true;
if(mpCurrentKeyFrame->mpCamera2 && pKF2->mpCamera2){
if(bRight1 && bRight2){
Rcw1 = mpCurrentKeyFrame->GetRightRotation();
Rwc1 = Rcw1.t();
tcw1 = mpCurrentKeyFrame->GetRightTranslation();
Tcw1 = mpCurrentKeyFrame->GetRightPose();
Ow1 = mpCurrentKeyFrame->GetRightCameraCenter();
Rcw2 = pKF2->GetRightRotation();
Rwc2 = Rcw2.t();
tcw2 = pKF2->GetRightTranslation();
Tcw2 = pKF2->GetRightPose();
Ow2 = pKF2->GetRightCameraCenter();
pCamera1 = mpCurrentKeyFrame->mpCamera2;
pCamera2 = pKF2->mpCamera2;
}
else if(bRight1 && !bRight2){
Rcw1 = mpCurrentKeyFrame->GetRightRotation();
Rwc1 = Rcw1.t();
tcw1 = mpCurrentKeyFrame->GetRightTranslation();
Tcw1 = mpCurrentKeyFrame->GetRightPose();
Ow1 = mpCurrentKeyFrame->GetRightCameraCenter();
Rcw2 = pKF2->GetRotation();
Rwc2 = Rcw2.t();
tcw2 = pKF2->GetTranslation();
Tcw2 = pKF2->GetPose();
Ow2 = pKF2->GetCameraCenter();
pCamera1 = mpCurrentKeyFrame->mpCamera2;
pCamera2 = pKF2->mpCamera;
}
else if(!bRight1 && bRight2){
Rcw1 = mpCurrentKeyFrame->GetRotation();
Rwc1 = Rcw1.t();
tcw1 = mpCurrentKeyFrame->GetTranslation();
Tcw1 = mpCurrentKeyFrame->GetPose();
Ow1 = mpCurrentKeyFrame->GetCameraCenter();
Rcw2 = pKF2->GetRightRotation();
Rwc2 = Rcw2.t();
tcw2 = pKF2->GetRightTranslation();
Tcw2 = pKF2->GetRightPose();
Ow2 = pKF2->GetRightCameraCenter();
pCamera1 = mpCurrentKeyFrame->mpCamera;
pCamera2 = pKF2->mpCamera2;
}
else{
Rcw1 = mpCurrentKeyFrame->GetRotation();
Rwc1 = Rcw1.t();
tcw1 = mpCurrentKeyFrame->GetTranslation();
Tcw1 = mpCurrentKeyFrame->GetPose();
Ow1 = mpCurrentKeyFrame->GetCameraCenter();
Rcw2 = pKF2->GetRotation();
Rwc2 = Rcw2.t();
tcw2 = pKF2->GetTranslation();
Tcw2 = pKF2->GetPose();
Ow2 = pKF2->GetCameraCenter();
pCamera1 = mpCurrentKeyFrame->mpCamera;
pCamera2 = pKF2->mpCamera;
}
}
// Check parallax between rays
// Step 6.2:利用匹配点反投影得到视差角
// 特征点反投影,其实得到的是在各自相机坐标系下的一个非归一化的方向向量,和这个点的反投影射线重合
cv::Mat xn1 = pCamera1->unprojectMat(kp1.pt);
cv::Mat xn2 = pCamera2->unprojectMat(kp2.pt);
// 由相机坐标系转到世界坐标系(得到的是那条反投影射线的一个同向向量在世界坐标系下的表示,还是只能够表示方向),得到视差角余弦值
cv::Mat ray1 = Rwc1*xn1;
cv::Mat ray2 = Rwc2*xn2;
// 这个就是求向量之间角度公式
const float cosParallaxRays = ray1.dot(ray2)/(cv::norm(ray1)*cv::norm(ray2));
// 加1是为了让cosParallaxStereo随便初始化为一个很大的值
float cosParallaxStereo = cosParallaxRays+1;
float cosParallaxStereo1 = cosParallaxStereo;
float cosParallaxStereo2 = cosParallaxStereo;
// Step 6.3:对于双目,利用双目得到视差角;单目相机没有特殊操作
if(bStereo1)
// 传感器是双目相机,并且当前的关键帧的这个点有对应的深度
// 假设是平行的双目相机,计算出两个相机观察这个点的时候的视差角;
// ? 感觉直接使用向量夹角的方式计算会准确一些啊(双目的时候),那么为什么不直接使用那个呢?
// 回答:因为双目深度值、基线是更可靠的,比特征匹配再三角化出来的稳
cosParallaxStereo1 = cos(2*atan2(mpCurrentKeyFrame->mb/2,mpCurrentKeyFrame->mvDepth[idx1]));
else if(bStereo2)
//传感器是双目相机,并且邻接的关键帧的这个点有对应的深度,和上面一样操作
cosParallaxStereo2 = cos(2*atan2(pKF2->mb/2,pKF2->mvDepth[idx2]));
// 得到双目观测的视差角
cosParallaxStereo = min(cosParallaxStereo1,cosParallaxStereo2);
// Step 6.4三角化恢复3D点
cv::Mat x3D;
// cosParallaxRays>0 && (bStereo1 || bStereo2 || cosParallaxRays<0.9998)表明视差角正常,0.9998 对应1°
// cosParallaxRays < cosParallaxStereo 表明视差角很小
// ?视差角度小时用三角法恢复3D点视差角大时用双目恢复3D点双目以及深度有效
// 参考https://github.com/raulmur/ORB_SLAM2/issues/345
if(cosParallaxRays<cosParallaxStereo && cosParallaxRays>0 && (bStereo1 || bStereo2 ||
(cosParallaxRays<0.9998 && mbInertial) || (cosParallaxRays<0.9998 && !mbInertial)))
{
// Linear Triangulation Method
// 见Initializer.cc的 Triangulate 函数,实现是一样的,顶多就是把投影矩阵换成了变换矩阵
cv::Mat A(4,4,CV_32F);
A.row(0) = xn1.at<float>(0)*Tcw1.row(2)-Tcw1.row(0);
A.row(1) = xn1.at<float>(1)*Tcw1.row(2)-Tcw1.row(1);
A.row(2) = xn2.at<float>(0)*Tcw2.row(2)-Tcw2.row(0);
A.row(3) = xn2.at<float>(1)*Tcw2.row(2)-Tcw2.row(1);
cv::Mat w,u,vt;
cv::SVD::compute(A,w,u,vt,cv::SVD::MODIFY_A| cv::SVD::FULL_UV);
x3D = vt.row(3).t();
// 归一化之前的检查
if(x3D.at<float>(3)==0)
continue;
// 归一化成为齐次坐标,然后提取前面三个维度作为欧式坐标
// Euclidean coordinates
x3D = x3D.rowRange(0,3)/x3D.at<float>(3);
}
else if(bStereo1 && cosParallaxStereo1<cosParallaxStereo2)
{
// 如果是双目用视差角更大的那个双目信息来恢复直接用已知3D点反投影了
x3D = mpCurrentKeyFrame->UnprojectStereo(idx1);
}
else if(bStereo2 && cosParallaxStereo2<cosParallaxStereo1)
{
x3D = pKF2->UnprojectStereo(idx2);
}
else
{
continue; //No stereo and very low parallax
}
// 为方便后续计算,转换成为了行向量
cv::Mat x3Dt = x3D.t();
if(x3Dt.empty()) continue;
//Check triangulation in front of cameras
// Step 6.5检测生成的3D点是否在相机前方,不在的话就放弃这个点
float z1 = Rcw1.row(2).dot(x3Dt)+tcw1.at<float>(2);
if(z1<=0)
continue;
float z2 = Rcw2.row(2).dot(x3Dt)+tcw2.at<float>(2);
if(z2<=0)
continue;
//Check reprojection error in first keyframe
// Step 6.6计算3D点在当前关键帧下的重投影误差
const float &sigmaSquare1 = mpCurrentKeyFrame->mvLevelSigma2[kp1.octave];
const float x1 = Rcw1.row(0).dot(x3Dt)+tcw1.at<float>(0);
const float y1 = Rcw1.row(1).dot(x3Dt)+tcw1.at<float>(1);
const float invz1 = 1.0/z1;
if(!bStereo1)
{
// 单目情况下
cv::Point2f uv1 = pCamera1->project(cv::Point3f(x1,y1,z1));
float errX1 = uv1.x - kp1.pt.x;
float errY1 = uv1.y - kp1.pt.y;
// 假设测量有一个像素的偏差2自由度卡方检验阈值是5.991
if((errX1*errX1+errY1*errY1)>5.991*sigmaSquare1)
continue;
}
else
{
// 双目情况
float u1 = fx1*x1*invz1+cx1;
// 根据视差公式计算假想的右目坐标
float u1_r = u1 - mpCurrentKeyFrame->mbf*invz1;
float v1 = fy1*y1*invz1+cy1;
float errX1 = u1 - kp1.pt.x;
float errY1 = v1 - kp1.pt.y;
float errX1_r = u1_r - kp1_ur;
// 自由度为3卡方检验阈值是7.8
if((errX1*errX1+errY1*errY1+errX1_r*errX1_r)>7.8*sigmaSquare1)
continue;
}
//Check reprojection error in second keyframe
// 计算3D点在另一个关键帧下的重投影误差操作同上
const float sigmaSquare2 = pKF2->mvLevelSigma2[kp2.octave];
const float x2 = Rcw2.row(0).dot(x3Dt)+tcw2.at<float>(0);
const float y2 = Rcw2.row(1).dot(x3Dt)+tcw2.at<float>(1);
const float invz2 = 1.0/z2;
if(!bStereo2)
{
cv::Point2f uv2 = pCamera2->project(cv::Point3f(x2,y2,z2));
float errX2 = uv2.x - kp2.pt.x;
float errY2 = uv2.y - kp2.pt.y;
if((errX2*errX2+errY2*errY2)>5.991*sigmaSquare2)
continue;
}
else
{
float u2 = fx2*x2*invz2+cx2;
float u2_r = u2 - mpCurrentKeyFrame->mbf*invz2;
float v2 = fy2*y2*invz2+cy2;
float errX2 = u2 - kp2.pt.x;
float errY2 = v2 - kp2.pt.y;
float errX2_r = u2_r - kp2_ur;
if((errX2*errX2+errY2*errY2+errX2_r*errX2_r)>7.8*sigmaSquare2)
continue;
}
//Check scale consistency
// Step 6.7:检查尺度连续性
// 世界坐标系下3D点与相机间的向量方向由相机指向3D点
cv::Mat normal1 = x3D-Ow1;
float dist1 = cv::norm(normal1);
cv::Mat normal2 = x3D-Ow2;
float dist2 = cv::norm(normal2);
if(dist1==0 || dist2==0)
continue;
if(mbFarPoints && (dist1>=mThFarPoints||dist2>=mThFarPoints)) // MODIFICATION
continue;
// ratioDist是不考虑金字塔尺度下的距离比例
const float ratioDist = dist2/dist1;
// 金字塔尺度因子的比例
const float ratioOctave = mpCurrentKeyFrame->mvScaleFactors[kp1.octave]/pKF2->mvScaleFactors[kp2.octave];
// 距离的比例和图像金字塔的比例不应该差太多,否则就跳过
if(ratioDist*ratioFactor<ratioOctave || ratioDist>ratioOctave*ratioFactor)
continue;
// Triangulation is succesfull
// Step 6.8三角化生成3D点成功构造成MapPoint
MapPoint* pMP = new MapPoint(x3D,mpCurrentKeyFrame,mpAtlas->GetCurrentMap());
// Step 6.9为该MapPoint添加属性
// a.观测到该MapPoint的关键帧
pMP->AddObservation(mpCurrentKeyFrame,idx1);
pMP->AddObservation(pKF2,idx2);
mpCurrentKeyFrame->AddMapPoint(pMP,idx1);
pKF2->AddMapPoint(pMP,idx2);
// b.该MapPoint的描述子
pMP->ComputeDistinctiveDescriptors();
// c.该MapPoint的平均观测方向和深度范围
pMP->UpdateNormalAndDepth();
mpAtlas->AddMapPoint(pMP);
// Step 6.10:将新产生的点放入检测队列
// 这些MapPoints都会经过MapPointCulling函数的检验
mlpRecentAddedMapPoints.push_back(pMP);
}
}
}
/**
* @brief 检查并融合当前关键帧与相邻帧两级相邻重复的MapPoints
*
*/
void LocalMapping::SearchInNeighbors()
{
// Retrieve neighbor keyframes
// Step 1获得当前关键帧在共视图中权重排名前nn的邻接关键帧
// 开始之前先定义几个概念
// 当前关键帧的邻接关键帧,称为一级相邻关键帧,也就是邻居
// 与一级相邻关键帧相邻的关键帧,称为二级相邻关键帧,也就是邻居的邻居
// 单目情况要10个邻接关键帧双目或者RGBD则要20个
int nn = 10;
if(mbMonocular)
nn=20;
// 和当前关键帧相邻的关键帧,也就是一级相邻关键帧
const vector<KeyFrame*> vpNeighKFs = mpCurrentKeyFrame->GetBestCovisibilityKeyFrames(nn);
// Step 2存储一级相邻关键帧及其二级相邻关键帧
vector<KeyFrame*> vpTargetKFs;
// 开始对所有候选的一级关键帧展开遍历:
for(vector<KeyFrame*>::const_iterator vit=vpNeighKFs.begin(), vend=vpNeighKFs.end(); vit!=vend; vit++)
{
KeyFrame* pKFi = *vit;
// 没有和当前帧进行过融合的操作
if(pKFi->isBad() || pKFi->mnFuseTargetForKF == mpCurrentKeyFrame->mnId)
continue;
// 加入一级相邻关键帧
vpTargetKFs.push_back(pKFi);
// 标记已经加入
pKFi->mnFuseTargetForKF = mpCurrentKeyFrame->mnId;
}
// Add some covisible of covisible
// Extend to some second neighbors if abort is not requested
// 以一级相邻关键帧的共视关系最好的5个相邻关键帧 作为二级相邻关键帧
for(int i=0, imax=vpTargetKFs.size(); i<imax; i++)
{
const vector<KeyFrame*> vpSecondNeighKFs = vpTargetKFs[i]->GetBestCovisibilityKeyFrames(20);
// 遍历得到的二级相邻关键帧
for(vector<KeyFrame*>::const_iterator vit2=vpSecondNeighKFs.begin(), vend2=vpSecondNeighKFs.end(); vit2!=vend2; vit2++)
{
KeyFrame* pKFi2 = *vit2;
// 当然这个二级相邻关键帧要求没有和当前关键帧发生融合,并且这个二级相邻关键帧也不是当前关键帧
if(pKFi2->isBad() || pKFi2->mnFuseTargetForKF==mpCurrentKeyFrame->mnId || pKFi2->mnId==mpCurrentKeyFrame->mnId)
continue;
// 存入二级相邻关键帧
vpTargetKFs.push_back(pKFi2);
pKFi2->mnFuseTargetForKF=mpCurrentKeyFrame->mnId;
}
if (mbAbortBA)
break;
}
// Extend to temporal neighbors
if(mbInertial)
{
KeyFrame* pKFi = mpCurrentKeyFrame->mPrevKF;
while(vpTargetKFs.size()<20 && pKFi)
{
if(pKFi->isBad() || pKFi->mnFuseTargetForKF==mpCurrentKeyFrame->mnId)
{
pKFi = pKFi->mPrevKF;
continue;
}
vpTargetKFs.push_back(pKFi);
pKFi->mnFuseTargetForKF=mpCurrentKeyFrame->mnId;
pKFi = pKFi->mPrevKF;
}
}
// Search matches by projection from current KF in target KFs
// 使用默认参数, 最优和次优比例0.6,匹配时检查特征点的旋转
ORBmatcher matcher;
// Step 3将当前帧的地图点分别与一级二级相邻关键帧地图点进行融合 -- 正向
vector<MapPoint*> vpMapPointMatches = mpCurrentKeyFrame->GetMapPointMatches();
for(vector<KeyFrame*>::iterator vit=vpTargetKFs.begin(), vend=vpTargetKFs.end(); vit!=vend; vit++)
{
KeyFrame* pKFi = *vit;
// 将地图点投影到关键帧中进行匹配和融合;融合策略如下
// 1.如果地图点能匹配关键帧的特征点,并且该点有对应的地图点,那么选择观测数目多的替换两个地图点
// 2.如果地图点能匹配关键帧的特征点,并且该点没有对应的地图点,那么为该点添加该投影地图点
// 注意这个时候对地图点融合的操作是立即生效的
matcher.Fuse(pKFi,vpMapPointMatches);
if(pKFi->NLeft != -1) matcher.Fuse(pKFi,vpMapPointMatches,true);
}
if (mbAbortBA)
return;
// Search matches by projection from target KFs in current KF
// Step 4将一级二级相邻关键帧地图点分别与当前关键帧地图点进行融合 -- 反向
// 用于进行存储要融合的一级邻接和二级邻接关键帧所有MapPoints的集合
vector<MapPoint*> vpFuseCandidates;
vpFuseCandidates.reserve(vpTargetKFs.size()*vpMapPointMatches.size());
// Step 4.1:遍历每一个一级邻接和二级邻接关键帧,收集他们的地图点存储到 vpFuseCandidates
for(vector<KeyFrame*>::iterator vitKF=vpTargetKFs.begin(), vendKF=vpTargetKFs.end(); vitKF!=vendKF; vitKF++)
{
KeyFrame* pKFi = *vitKF;
vector<MapPoint*> vpMapPointsKFi = pKFi->GetMapPointMatches();
// 遍历当前一级邻接和二级邻接关键帧中所有的MapPoints,找出需要进行融合的并且加入到集合中
for(vector<MapPoint*>::iterator vitMP=vpMapPointsKFi.begin(), vendMP=vpMapPointsKFi.end(); vitMP!=vendMP; vitMP++)
{
MapPoint* pMP = *vitMP;
if(!pMP)
continue;
// 如果地图点是坏点或者已经加进集合vpFuseCandidates跳过
if(pMP->isBad() || pMP->mnFuseCandidateForKF == mpCurrentKeyFrame->mnId)
continue;
// 加入集合,并标记已经加入
pMP->mnFuseCandidateForKF = mpCurrentKeyFrame->mnId;
vpFuseCandidates.push_back(pMP);
}
}
// Step 4.2:进行地图点投影融合,和正向融合操作是完全相同的
// 不同的是正向操作是"每个关键帧和当前关键帧的地图点进行融合",而这里的是"当前关键帧和所有邻接关键帧的地图点进行融合"
matcher.Fuse(mpCurrentKeyFrame,vpFuseCandidates);
if(mpCurrentKeyFrame->NLeft != -1) matcher.Fuse(mpCurrentKeyFrame,vpFuseCandidates,true);
// Update points
// Step 5更新当前帧地图点的描述子、深度、观测主方向等属性
vpMapPointMatches = mpCurrentKeyFrame->GetMapPointMatches();
for(size_t i=0, iend=vpMapPointMatches.size(); i<iend; i++)
{
MapPoint* pMP=vpMapPointMatches[i];
if(pMP)
{
if(!pMP->isBad())
{
// 在所有找到pMP的关键帧中获得最佳的描述子
pMP->ComputeDistinctiveDescriptors();
// 更新平均观测方向和观测距离
pMP->UpdateNormalAndDepth();
}
}
}
// Update connections in covisibility graph
// Step 6更新当前帧的MapPoints后更新与其它帧的连接关系
// 更新covisibility图
mpCurrentKeyFrame->UpdateConnections();
}
// 根据两关键帧的姿态计算两个关键帧之间的基本矩阵
cv::Mat LocalMapping::ComputeF12(KeyFrame *&pKF1, KeyFrame *&pKF2)
{
// 先构造两帧之间的R12,t12
cv::Mat R1w = pKF1->GetRotation();
cv::Mat t1w = pKF1->GetTranslation();
cv::Mat R2w = pKF2->GetRotation();
cv::Mat t2w = pKF2->GetTranslation();
cv::Mat R12 = R1w*R2w.t();
cv::Mat t12 = -R1w*R2w.t()*t2w+t1w;
// 得到 t12 的反对称矩阵
cv::Mat t12x = SkewSymmetricMatrix(t12);
const cv::Mat &K1 = pKF1->mpCamera->toK();
const cv::Mat &K2 = pKF2->mpCamera->toK();
// Essential Matrix: t12叉乘R12
// Fundamental Matrix: inv(K1)*E*inv(K2)
return K1.t().inv()*t12x*R12*K2.inv();
}
// 外部线程调用,请求停止当前线程的工作; 其实是回环检测线程调用,来避免在进行全局优化的过程中局部建图线程添加新的关键帧
void LocalMapping::RequestStop()
{
unique_lock<mutex> lock(mMutexStop);
mbStopRequested = true;
unique_lock<mutex> lock2(mMutexNewKFs);
mbAbortBA = true;
}
// 检查是否要把当前的局部建图线程停止工作,运行的时候要检查是否有终止请求,如果有就执行. 由run函数调用
bool LocalMapping::Stop()
{
unique_lock<mutex> lock(mMutexStop);
// 如果当前线程还没有准备停止,但是已经有终止请求了,那么就准备停止当前线程
if(mbStopRequested && !mbNotStop)
{
mbStopped = true;
cout << "Local Mapping STOP" << endl;
return true;
}
return false;
}
// 检查mbStopped是否为true为true表示可以并终止localmapping 线程
bool LocalMapping::isStopped()
{
unique_lock<mutex> lock(mMutexStop);
return mbStopped;
}
// 求外部线程调用为true表示外部线程请求停止 local mapping
bool LocalMapping::stopRequested()
{
unique_lock<mutex> lock(mMutexStop);
return mbStopRequested;
}
// 释放当前还在缓冲区中的关键帧指针
void LocalMapping::Release()
{
unique_lock<mutex> lock(mMutexStop);
unique_lock<mutex> lock2(mMutexFinish);
if(mbFinished)
return;
mbStopped = false;
mbStopRequested = false;
for(list<KeyFrame*>::iterator lit = mlNewKeyFrames.begin(), lend=mlNewKeyFrames.end(); lit!=lend; lit++)
delete *lit;
mlNewKeyFrames.clear();
cout << "Local Mapping RELEASE" << endl;
}
// 查看当前是否允许接受关键帧
bool LocalMapping::AcceptKeyFrames()
{
unique_lock<mutex> lock(mMutexAccept);
return mbAcceptKeyFrames;
}
// 设置"允许接受关键帧"的状态标志
void LocalMapping::SetAcceptKeyFrames(bool flag)
{
unique_lock<mutex> lock(mMutexAccept);
mbAcceptKeyFrames=flag;
}
// 设置 mbnotStop标志的状态
bool LocalMapping::SetNotStop(bool flag)
{
unique_lock<mutex> lock(mMutexStop);
if(flag && mbStopped)
return false;
mbNotStop = flag;
return true;
}
// 终止BA
void LocalMapping::InterruptBA()
{
mbAbortBA = true;
}
/**
* @brief 检测当前关键帧在共视图中的关键帧,根据地图点在共视图中的冗余程度剔除该共视关键帧
* 冗余关键帧的判定90%以上的地图点能被其他关键帧至少3个观测到
*/
void LocalMapping::KeyFrameCulling()
{
// Check redundant keyframes (only local keyframes)
// A keyframe is considered redundant if the 90% of the MapPoints it sees, are seen
// in at least other 3 keyframes (in the same or finer scale)
// We only consider close stereo points
// 该函数里变量层层深入,这里列一下:
// mpCurrentKeyFrame当前关键帧本程序就是判断它是否需要删除
// pKF mpCurrentKeyFrame的某一个共视关键帧
// vpMapPointspKF对应的所有地图点
// pMPvpMapPoints中的某个地图点
// observations所有能观测到pMP的关键帧
// pKFiobservations中的某个关键帧
// scaleLevelipKFi的金字塔尺度
// scaleLevelpKF的金字塔尺度
const int Nd = 21; // MODIFICATION_STEREO_IMU 20 This should be the same than that one from LIBA
mpCurrentKeyFrame->UpdateBestCovisibles();
vector<KeyFrame*> vpLocalKeyFrames = mpCurrentKeyFrame->GetVectorCovisibleKeyFrames();
float redundant_th;
if(!mbInertial)
redundant_th = 0.9;
else if (mbMonocular)
redundant_th = 0.9;
else
redundant_th = 0.5;
const bool bInitImu = mpAtlas->isImuInitialized();
int count=0;
// Compoute last KF from optimizable window:
unsigned int last_ID;
if (mbInertial)
{
int count = 0;
KeyFrame* aux_KF = mpCurrentKeyFrame;
while(count<Nd && aux_KF->mPrevKF)
{
aux_KF = aux_KF->mPrevKF;
count++;
}
last_ID = aux_KF->mnId;
}
// 对所有的共视关键帧进行遍历
for(vector<KeyFrame*>::iterator vit=vpLocalKeyFrames.begin(), vend=vpLocalKeyFrames.end(); vit!=vend; vit++)
{
count++;
KeyFrame* pKF = *vit;
if((pKF->mnId==pKF->GetMap()->GetInitKFid()) || pKF->isBad())
continue;
// Step 2提取每个共视关键帧的地图点
const vector<MapPoint*> vpMapPoints = pKF->GetMapPointMatches();
// 记录某个点被观测次数,后面并未使用
int nObs = 3;
// 观测次数阈值默认为3
const int thObs=nObs;
// 记录冗余观测点的数目
int nRedundantObservations=0;
int nMPs=0;
// Step 3遍历该共视关键帧的所有地图点判断是否90%以上的地图点能被其它至少3个关键帧同样或者更低层级观测到
for(size_t i=0, iend=vpMapPoints.size(); i<iend; i++)
{
MapPoint* pMP = vpMapPoints[i];
if(pMP)
{
if(!pMP->isBad())
{
if(!mbMonocular)
{
// 对于双目,仅考虑近处(不超过基线的 35倍 )的地图点
if(pKF->mvDepth[i]>pKF->mThDepth || pKF->mvDepth[i]<0)
continue;
}
nMPs++;
// pMP->Observations() 是观测到该地图点的相机总数目单目1双目2
if(pMP->Observations()>thObs)
{
const int &scaleLevel = (pKF -> NLeft == -1) ? pKF->mvKeysUn[i].octave
: (i < pKF -> NLeft) ? pKF -> mvKeys[i].octave
: pKF -> mvKeysRight[i].octave;
const map<KeyFrame*, tuple<int,int>> observations = pMP->GetObservations();
int nObs=0;
// 遍历观测到该地图点的关键帧
for(map<KeyFrame*, tuple<int,int>>::const_iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++)
{
KeyFrame* pKFi = mit->first;
if(pKFi==pKF)
continue;
tuple<int,int> indexes = mit->second;
int leftIndex = get<0>(indexes), rightIndex = get<1>(indexes);
int scaleLeveli = -1;
if(pKFi -> NLeft == -1)
scaleLeveli = pKFi->mvKeysUn[leftIndex].octave;
else {
if (leftIndex != -1) {
scaleLeveli = pKFi->mvKeys[leftIndex].octave;
}
if (rightIndex != -1) {
int rightLevel = pKFi->mvKeysRight[rightIndex - pKFi->NLeft].octave;
scaleLeveli = (scaleLeveli == -1 || scaleLeveli > rightLevel) ? rightLevel
: scaleLeveli;
}
}
// 尺度约束为什么pKF 尺度+1 要大于等于 pKFi 尺度?
// 回答:因为同样或更低金字塔层级的地图点更准确
if(scaleLeveli<=scaleLevel+1)
{
nObs++;
// 已经找到3个满足条件的关键帧就停止不找了
if(nObs>thObs)
break;
}
}
// 地图点至少被3个关键帧观测到就记录为冗余点更新冗余点计数数目
if(nObs>thObs)
{
nRedundantObservations++;
}
}
}
}
}
// Step 4该关键帧90%以上的有效地图点被判断为冗余的,则删除该关键帧
if(nRedundantObservations>redundant_th*nMPs)
{
if (mbInertial)
{
if (mpAtlas->KeyFramesInMap()<=Nd)
continue;
if(pKF->mnId>(mpCurrentKeyFrame->mnId-2))
continue;
if(pKF->mPrevKF && pKF->mNextKF)
{
const float t = pKF->mNextKF->mTimeStamp-pKF->mPrevKF->mTimeStamp;
if((bInitImu && (pKF->mnId<last_ID) && t<3.) || (t<0.5))
{
pKF->mNextKF->mpImuPreintegrated->MergePrevious(pKF->mpImuPreintegrated);
pKF->mNextKF->mPrevKF = pKF->mPrevKF;
pKF->mPrevKF->mNextKF = pKF->mNextKF;
pKF->mNextKF = NULL;
pKF->mPrevKF = NULL;
pKF->SetBadFlag();
}
else if(!mpCurrentKeyFrame->GetMap()->GetIniertialBA2() && (cv::norm(pKF->GetImuPosition()-pKF->mPrevKF->GetImuPosition())<0.02) && (t<3))
{
pKF->mNextKF->mpImuPreintegrated->MergePrevious(pKF->mpImuPreintegrated);
pKF->mNextKF->mPrevKF = pKF->mPrevKF;
pKF->mPrevKF->mNextKF = pKF->mNextKF;
pKF->mNextKF = NULL;
pKF->mPrevKF = NULL;
pKF->SetBadFlag();
}
}
}
else
{
pKF->SetBadFlag();
}
}
if((count > 20 && mbAbortBA) || count>100) // MODIFICATION originally 20 for mbabortBA check just 10 keyframes
{
break;
}
}
}
// 计算三维向量v的反对称矩阵
cv::Mat LocalMapping::SkewSymmetricMatrix(const cv::Mat &v)
{
return (cv::Mat_<float>(3,3) << 0, -v.at<float>(2), v.at<float>(1),
v.at<float>(2), 0,-v.at<float>(0),
-v.at<float>(1), v.at<float>(0), 0);
}
// 请求当前线程复位,由外部线程调用,堵塞的
void LocalMapping::RequestReset()
{
{
unique_lock<mutex> lock(mMutexReset);
cout << "LM: Map reset recieved" << endl;
mbResetRequested = true;
}
cout << "LM: Map reset, waiting..." << endl;
// 一直等到局部建图线程响应之后才可以退出
while(1)
{
{
unique_lock<mutex> lock2(mMutexReset);
if(!mbResetRequested)
break;
}
usleep(3000);
}
cout << "LM: Map reset, Done!!!" << endl;
}
void LocalMapping::RequestResetActiveMap(Map* pMap)
{
{
unique_lock<mutex> lock(mMutexReset);
cout << "LM: Active map reset recieved" << endl;
mbResetRequestedActiveMap = true;
mpMapToReset = pMap;
}
cout << "LM: Active map reset, waiting..." << endl;
while(1)
{
{
unique_lock<mutex> lock2(mMutexReset);
if(!mbResetRequestedActiveMap)
break;
}
usleep(3000);
}
cout << "LM: Active map reset, Done!!!" << endl;
}
// 检查是否有复位线程的请求
void LocalMapping::ResetIfRequested()
{
bool executed_reset = false;
{
unique_lock<mutex> lock(mMutexReset);
// 执行复位操作:清空关键帧缓冲区,清空待cull的地图点缓冲
if(mbResetRequested)
{
executed_reset = true;
cout << "LM: Reseting Atlas in Local Mapping..." << endl;
mlNewKeyFrames.clear();
mlpRecentAddedMapPoints.clear();
// 恢复为false表示复位过程完成
mbResetRequested=false;
mbResetRequestedActiveMap = false;
// Inertial parameters
mTinit = 0.f;
mbNotBA2 = true;
mbNotBA1 = true;
mbBadImu=false;
mIdxInit=0;
cout << "LM: End reseting Local Mapping..." << endl;
}
if(mbResetRequestedActiveMap) {
executed_reset = true;
cout << "LM: Reseting current map in Local Mapping..." << endl;
mlNewKeyFrames.clear();
mlpRecentAddedMapPoints.clear();
// Inertial parameters
mTinit = 0.f;
mbNotBA2 = true;
mbNotBA1 = true;
mbBadImu=false;
mbResetRequestedActiveMap = false;
cout << "LM: End reseting Local Mapping..." << endl;
}
}
if(executed_reset)
cout << "LM: Reset free the mutex" << endl;
}
// 请求终止当前线程
void LocalMapping::RequestFinish()
{
unique_lock<mutex> lock(mMutexFinish);
mbFinishRequested = true;
}
// 检查是否已经有外部线程请求终止当前线程
bool LocalMapping::CheckFinish()
{
unique_lock<mutex> lock(mMutexFinish);
return mbFinishRequested;
}
// 设置当前线程已经真正地结束了
void LocalMapping::SetFinish()
{
unique_lock<mutex> lock(mMutexFinish);
mbFinished = true;
unique_lock<mutex> lock2(mMutexStop);
mbStopped = true;
}
// 当前线程的run函数是否已经终止
bool LocalMapping::isFinished()
{
unique_lock<mutex> lock(mMutexFinish);
return mbFinished;
}
void LocalMapping::InitializeIMU(float priorG, float priorA, bool bFIBA)
{
if (mbResetRequested)
return;
float minTime;
int nMinKF;
if (mbMonocular)
{
minTime = 2.0;
nMinKF = 10;
}
else
{
minTime = 1.0;
nMinKF = 10;
}
if(mpAtlas->KeyFramesInMap()<nMinKF)
return;
// Retrieve all keyframe in temporal order
list<KeyFrame*> lpKF;
KeyFrame* pKF = mpCurrentKeyFrame;
while(pKF->mPrevKF)
{
lpKF.push_front(pKF);
pKF = pKF->mPrevKF;
}
lpKF.push_front(pKF);
vector<KeyFrame*> vpKF(lpKF.begin(),lpKF.end());
if(vpKF.size()<nMinKF)
return;
mFirstTs=vpKF.front()->mTimeStamp;
if(mpCurrentKeyFrame->mTimeStamp-mFirstTs<minTime)
return;
bInitializing = true;
while(CheckNewKeyFrames())
{
ProcessNewKeyFrame();
vpKF.push_back(mpCurrentKeyFrame);
lpKF.push_back(mpCurrentKeyFrame);
}
const int N = vpKF.size();
IMU::Bias b(0,0,0,0,0,0);
// Compute and KF velocities mRwg estimation
if (!mpCurrentKeyFrame->GetMap()->isImuInitialized())
{
cv::Mat cvRwg;
cv::Mat dirG = cv::Mat::zeros(3,1,CV_32F);
for(vector<KeyFrame*>::iterator itKF = vpKF.begin(); itKF!=vpKF.end(); itKF++)
{
if (!(*itKF)->mpImuPreintegrated)
continue;
if (!(*itKF)->mPrevKF)
continue;
dirG -= (*itKF)->mPrevKF->GetImuRotation()*(*itKF)->mpImuPreintegrated->GetUpdatedDeltaVelocity();
cv::Mat _vel = ((*itKF)->GetImuPosition() - (*itKF)->mPrevKF->GetImuPosition())/(*itKF)->mpImuPreintegrated->dT;
(*itKF)->SetVelocity(_vel);
(*itKF)->mPrevKF->SetVelocity(_vel);
}
dirG = dirG/cv::norm(dirG);
cv::Mat gI = (cv::Mat_<float>(3,1) << 0.0f, 0.0f, -1.0f);
cv::Mat v = gI.cross(dirG);
const float nv = cv::norm(v);
const float cosg = gI.dot(dirG);
const float ang = acos(cosg);
cv::Mat vzg = v*ang/nv;
cvRwg = IMU::ExpSO3(vzg);
mRwg = Converter::toMatrix3d(cvRwg);
mTinit = mpCurrentKeyFrame->mTimeStamp-mFirstTs;
}
else
{
mRwg = Eigen::Matrix3d::Identity();
mbg = Converter::toVector3d(mpCurrentKeyFrame->GetGyroBias());
mba = Converter::toVector3d(mpCurrentKeyFrame->GetAccBias());
}
mScale=1.0;
mInitTime = mpTracker->mLastFrame.mTimeStamp-vpKF.front()->mTimeStamp;
std::chrono::steady_clock::time_point t0 = std::chrono::steady_clock::now();
Optimizer::InertialOptimization(mpAtlas->GetCurrentMap(), mRwg, mScale, mbg, mba, mbMonocular, infoInertial, false, false, priorG, priorA);
std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now();
/*cout << "scale after inertial-only optimization: " << mScale << endl;
cout << "bg after inertial-only optimization: " << mbg << endl;
cout << "ba after inertial-only optimization: " << mba << endl;*/
if (mScale<1e-1)
{
cout << "scale too small" << endl;
bInitializing=false;
return;
}
// Before this line we are not changing the map
unique_lock<mutex> lock(mpAtlas->GetCurrentMap()->mMutexMapUpdate);
std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now();
if ((fabs(mScale-1.f)>0.00001)||!mbMonocular)
{
mpAtlas->GetCurrentMap()->ApplyScaledRotation(Converter::toCvMat(mRwg).t(),mScale,true);
mpTracker->UpdateFrameIMU(mScale,vpKF[0]->GetImuBias(),mpCurrentKeyFrame);
}
std::chrono::steady_clock::time_point t3 = std::chrono::steady_clock::now();
// Check if initialization OK
if (!mpAtlas->isImuInitialized())
for(int i=0;i<N;i++)
{
KeyFrame* pKF2 = vpKF[i];
pKF2->bImu = true;
}
/*cout << "Before GIBA: " << endl;
cout << "ba: " << mpCurrentKeyFrame->GetAccBias() << endl;
cout << "bg: " << mpCurrentKeyFrame->GetGyroBias() << endl;*/
std::chrono::steady_clock::time_point t4 = std::chrono::steady_clock::now();
if (bFIBA)
{
if (priorA!=0.f)
Optimizer::FullInertialBA(mpAtlas->GetCurrentMap(), 100, false, 0, NULL, true, priorG, priorA);
else
Optimizer::FullInertialBA(mpAtlas->GetCurrentMap(), 100, false, 0, NULL, false);
}
std::chrono::steady_clock::time_point t5 = std::chrono::steady_clock::now();
// If initialization is OK
mpTracker->UpdateFrameIMU(1.0,vpKF[0]->GetImuBias(),mpCurrentKeyFrame);
if (!mpAtlas->isImuInitialized())
{
cout << "IMU in Map " << mpAtlas->GetCurrentMap()->GetId() << " is initialized" << endl;
mpAtlas->SetImuInitialized();
mpTracker->t0IMU = mpTracker->mCurrentFrame.mTimeStamp;
mpCurrentKeyFrame->bImu = true;
}
mbNewInit=true;
mnKFs=vpKF.size();
mIdxInit++;
for(list<KeyFrame*>::iterator lit = mlNewKeyFrames.begin(), lend=mlNewKeyFrames.end(); lit!=lend; lit++)
{
(*lit)->SetBadFlag();
delete *lit;
}
mlNewKeyFrames.clear();
mpTracker->mState=Tracking::OK;
bInitializing = false;
/*cout << "After GIBA: " << endl;
cout << "ba: " << mpCurrentKeyFrame->GetAccBias() << endl;
cout << "bg: " << mpCurrentKeyFrame->GetGyroBias() << endl;
double t_inertial_only = std::chrono::duration_cast<std::chrono::duration<double> >(t1 - t0).count();
double t_update = std::chrono::duration_cast<std::chrono::duration<double> >(t3 - t2).count();
double t_viba = std::chrono::duration_cast<std::chrono::duration<double> >(t5 - t4).count();
cout << t_inertial_only << ", " << t_update << ", " << t_viba << endl;*/
mpCurrentKeyFrame->GetMap()->IncreaseChangeIndex();
return;
}
void LocalMapping::ScaleRefinement()
{
// Minimum number of keyframes to compute a solution
// Minimum time (seconds) between first and last keyframe to compute a solution. Make the difference between monocular and stereo
// unique_lock<mutex> lock0(mMutexImuInit);
if (mbResetRequested)
return;
// Retrieve all keyframes in temporal order
list<KeyFrame*> lpKF;
KeyFrame* pKF = mpCurrentKeyFrame;
while(pKF->mPrevKF)
{
lpKF.push_front(pKF);
pKF = pKF->mPrevKF;
}
lpKF.push_front(pKF);
vector<KeyFrame*> vpKF(lpKF.begin(),lpKF.end());
while(CheckNewKeyFrames())
{
ProcessNewKeyFrame();
vpKF.push_back(mpCurrentKeyFrame);
lpKF.push_back(mpCurrentKeyFrame);
}
const int N = vpKF.size();
mRwg = Eigen::Matrix3d::Identity();
mScale=1.0;
std::chrono::steady_clock::time_point t0 = std::chrono::steady_clock::now();
Optimizer::InertialOptimization(mpAtlas->GetCurrentMap(), mRwg, mScale);
std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now();
if (mScale<1e-1) // 1e-1
{
cout << "scale too small" << endl;
bInitializing=false;
return;
}
// Before this line we are not changing the map
unique_lock<mutex> lock(mpAtlas->GetCurrentMap()->mMutexMapUpdate);
std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now();
if ((fabs(mScale-1.f)>0.00001)||!mbMonocular)
{
mpAtlas->GetCurrentMap()->ApplyScaledRotation(Converter::toCvMat(mRwg).t(),mScale,true);
mpTracker->UpdateFrameIMU(mScale,mpCurrentKeyFrame->GetImuBias(),mpCurrentKeyFrame);
}
std::chrono::steady_clock::time_point t3 = std::chrono::steady_clock::now();
for(list<KeyFrame*>::iterator lit = mlNewKeyFrames.begin(), lend=mlNewKeyFrames.end(); lit!=lend; lit++)
{
(*lit)->SetBadFlag();
delete *lit;
}
mlNewKeyFrames.clear();
double t_inertial_only = std::chrono::duration_cast<std::chrono::duration<double> >(t1 - t0).count();
// To perform pose-inertial opt w.r.t. last keyframe
mpCurrentKeyFrame->GetMap()->IncreaseChangeIndex();
return;
}
bool LocalMapping::IsInitializing()
{
return bInitializing;
}
double LocalMapping::GetCurrKFTime()
{
if (mpCurrentKeyFrame)
{
return mpCurrentKeyFrame->mTimeStamp;
}
else
return 0.0;
}
KeyFrame* LocalMapping::GetCurrKF()
{
return mpCurrentKeyFrame;
}
} //namespace ORB_SLAM