0
点赞
收藏
分享

微信扫一扫

mono_inertial_euroc.cc

从main函数开始讲解,##表示源码

判断参数是否大于等于5个,否则,输入有误,直接返回,正确执行参数如下

./Examples/Monocular-Inertial/mono_inertial_euroc  ./Vocabulary/ORBvoc.txt ./Examples/Monocular-Inertial/EuRoC.yaml ${dir}/MH01 ./Examples/Monocular-Inertial/EuRoC_TimeStamps/MH01.txt dataset-MH01_monoi

##    

    if(argc < 5)
    {
        cerr << endl << "Usage: ./mono_inertial_euroc path_to_vocabulary path_to_settings path_to_sequence_folder_1 path_to_times_file_1 (path_to_image_folder_2 path_to_times_file_2 ... path_to_image_folder_N path_to_times_file_N) " << endl;
        return 1;
    }

 

计算有多少个可执行序列

## const int num_seq = (argc-3)/2;

将图像数据和IMU

##
for (seq = 0; seq<num_seq; seq++)
    {//数据路径,指向参数${dir}/MH01
        string pathSeq(argv[(2*seq) + 3]);
        //数据时间戳路径,./Examples/Monocular-Inertial/EuRoC_TimeStamps/MH01.txt
        string pathTimeStamps(argv[(2*seq) + 4]);
        string pathCam0 = pathSeq + "/mav0/cam0/data";
        string pathImu = pathSeq + "/mav0/imu0/data.csv";
     //将加载进来的图像数据和对应时间戳放入数组vstrImageFilenames以及vTimestampsCam中
        LoadImages(pathCam0, pathTimeStamps, vstrImageFilenames[seq], vTimestampsCam[seq]);//获取三轴加速度计和三轴陀螺仪数据以及时间戳
        LoadIMU(pathImu, vTimestampsImu[seq], vAcc[seq], vGyro[seq]);//保存对应序列的图像的数据量
        nImages[seq] = vstrImageFilenames[seq].size();
        //如果有多个图像序列,那么将总的图像数据量保存下来
        tot_images += nImages[seq];
        //imu对应的数据量
        nImu[seq] = vTimestampsImu[seq].size();// Find first imu to be considered, supposing imu measurements start first
        //找到IMU的每一个序列第一个大于图像的imu时间戳
        //图像序列  - - - - - -   
        //IMU序列    - - - - - - 把第一个大于图像序列的时间戳找到  
        while(vTimestampsImu[seq][first_imu[seq]]<=vTimestampsCam[seq][0])
            first_imu[seq]++;
        //将大于第一个图像的imu时间戳回滚回来,为方便后续做预积分    
        first_imu[seq]--; // first imu measurement to be considered
    }

初始化SLAM系统变量

##
    // Create SLAM system. It initializes all system threads and gets ready to process frames.
    //argv[1] = ./Vocabulary/ORBvoc.txt
    //argv[2] = ./Examples/Monocular-Inertial/EuRoC.yaml
    ORB_SLAM3::System SLAM(argv[1],argv[2],ORB_SLAM3::System::IMU_MONOCULAR, true);

进入到上面ORB_SLAM3::System的构造函数,只看相对重点部分

##
/**
 * @brief 系统构造函数,会启动其他线程
 * @param strVocFile 词袋文件路径./Vocabulary/ORBvoc.txt
 * @param strSettingsFile 配置文件路径./Examples/Monocular-Inertial/EuRoC.yaml
 * @param sensor 传感器类型
 * @param bUseViewer 是否使用可视化界面,默认为true
 * @param initFr initFr表示初始化帧的id,默认值0
 * @param strSequence 序列名,在跟踪线程和局部建图线程用得到,默认值std::string()
 */
System::System(const string &strVocFile, const string &strSettingsFile, const eSensor sensor,
               const bool bUseViewer, const int initFr, const string &strSequence):
    mSensor(sensor), mpViewer(static_cast<Viewer*>(NULL)), mbReset(false), mbResetActiveMap(false),
    mbActivateLocalizationMode(false), mbDeactivateLocalizationMode(false), mbShutDown(false)
{// 读取配置文件,strSettingsFile配置文件所在路径./Examples/Monocular-Inertial/EuRoC.yaml
    cv::FileStorage fsSettings(strSettingsFile.c_str(), cv::FileStorage::READ);
// 查看配置文件版本,不同版本有不同处理方法
    cv::FileNode node = fsSettings["File.version"];
    if(!node.empty() && node.isString() && node.string() == "1.0")
    {
        settings_ = new Settings(strSettingsFile,mSensor);//需要进入该函数,TODO1

        // 保存及加载地图的名字,在配置文件里面没有对应的内容,后续可以自行添加用于定位
        mStrLoadAtlasFromFile = settings_->atlasLoadFile();
        mStrSaveAtlasToFile = settings_->atlasSaveFile();
    }
   // ORBSLAM3新加的多地图管理功能,这里加载Atlas标识符
    bool loadedAtlas = false;

    if(mStrLoadAtlasFromFile.empty())
    {// 建立一个新的ORB字典
        mpVocabulary = new ORBVocabulary();
        // 读取预训练好的ORB字典
        bool bVocLoad = mpVocabulary->loadFromTextFile(strVocFile);//创建关键帧数据库
        mpKeyFrameDatabase = new KeyFrameDatabase(*mpVocabulary);//创建多地图,参数0表示初始化关键帧id为0
        mpAtlas = new Atlas(0);//需要进入该函数,TODO2
    }// 如果是有imu的传感器类型,设置mbIsInertial标志为true
    if (mSensor==IMU_STEREO || mSensor==IMU_MONOCULAR || mSensor==IMU_RGBD)
        mpAtlas->SetInertialSensor();
// 创建跟踪线程(位于主线程)
    mpTracker = new Tracking(this, mpVocabulary, mpFrameDrawer, mpMapDrawer,
                             mpAtlas, mpKeyFrameDatabase, strSettingsFile, mSensor, settings_, strSequence);//需要进入该函数,TODO3
//创建并开启local mapping线程
    mpLocalMapper = new LocalMapping(this, mpAtlas, mSensor==MONOCULAR || mSensor==IMU_MONOCULAR,
                                     mSensor==IMU_MONOCULAR || mSensor==IMU_STEREO || mSensor==IMU_RGBD, strSequence);//后续讲解的时候,再进入分析
    mptLocalMapping = new thread(&ORB_SLAM3::LocalMapping::Run,mpLocalMapper);
    mpLocalMapper->mInitFr = initFr;//初始化帧的id,默认为0

    // 设置最远3D地图点的深度值,如果超过阈值,说明可能三角化不太准确,丢弃
    if(settings_)
        mpLocalMapper->mThFarPoints = settings_->thFarPoints();//20
    else
        mpLocalMapper->mThFarPoints = fsSettings["thFarPoints"];// 创建并开启闭环线程
    mpLoopCloser = new LoopClosing(mpAtlas, mpKeyFrameDatabase, mpVocabulary, mSensor!=MONOCULAR, activeLC); //后续讲解时,再进入分析
    mptLoopClosing = new thread(&ORB_SLAM3::LoopClosing::Run, mpLoopCloser);
    // 设置线程间的指针
    mpTracker->SetLocalMapper(mpLocalMapper);
    mpTracker->SetLoopClosing(mpLoopCloser);

    mpLocalMapper->SetTracker(mpTracker);
    mpLocalMapper->SetLoopCloser(mpLoopCloser);

    mpLoopCloser->SetTracker(mpTracker);
    mpLoopCloser->SetLocalMapper(mpLocalMapper);
}

进入到构造函数,TODO1

##
// configFile=./Examples/Monocular-Inertial/EuRoC.yaml
Settings::Settings(const std::string &configFile, const int &sensor) : bNeedToUndistort_(false), bNeedToRectify_(false), bNeedToResize1_(false), bNeedToResize2_(false)
{// Open settings file./Examples/Monocular-Inertial/EuRoC.yaml
    cv::FileStorage fSettings(configFile, cv::FileStorage::READ);//读取相机1的内参和畸变参数
    readCamera1(fSettings); //需要进入该函数,TODO1-1// 如果是双目,则读取第二个相机的配置
    if (sensor_ == System::STEREO || sensor_ == System::IMU_STEREO)
    {
        readCamera2(fSettings);
    }//读取图像信息
    readImageInfo(fSettings); //需要进入该函数,TODO1-2//读取IMU参数
    readIMU(fSettings); if (sensor_ == System::RGBD || sensor_ == System::IMU_RGBD)
    {
        //读取RGBD配置
        readRGBD(fSettings); //需要进入该函数,TODO1-3
    }

    //提取ORB特征相关信息
    readORB(fSettings); if (bNeedToRectify_)
    {
        precomputeRectificationMaps(); 
    }
}

readCamera1(fSettings); //需要进入该函数,TODO1-1

void Settings::readCamera1(cv::FileStorage &fSettings)
{//相机类型
    string cameraModel = readParameter<string>(fSettings, "Camera.type", found);

    vector<float> vCalibration;
    //针孔相机类型
    if (cameraModel == "PinHole")
    {
        cameraType_ = PinHole;//相机内参
        float fx = readParameter<float>(fSettings, "Camera1.fx", found);
        float fy = readParameter<float>(fSettings, "Camera1.fy", found);
        float cx = readParameter<float>(fSettings, "Camera1.cx", found);
        float cy = readParameter<float>(fSettings, "Camera1.cy", found);
        vCalibration = {fx, fy, cx, cy};
     //构建针孔相机模型
        calibration1_ = new Pinhole(vCalibration);
     //畸变参数的获取
        vPinHoleDistorsion1_[0] = readParameter<float>(fSettings, "Camera1.k1", found);
        vPinHoleDistorsion1_[1] = readParameter<float>(fSettings, "Camera1.k2", found);
        vPinHoleDistorsion1_[2] = readParameter<float>(fSettings, "Camera1.p1", found);
        vPinHoleDistorsion1_[3] = readParameter<float>(fSettings, "Camera1.p2", found);

    }else if (cameraModel == "KannalaBrandt8")//鱼眼相机的镜头类型,广角镜头
    {
        cameraType_ = KannalaBrandt;

        // Read intrinsic parameters
        float fx = readParameter<float>(fSettings, "Camera1.fx", found);
        float fy = readParameter<float>(fSettings, "Camera1.fy", found);
        float cx = readParameter<float>(fSettings, "Camera1.cx", found);
        float cy = readParameter<float>(fSettings, "Camera1.cy", found);

        float k0 = readParameter<float>(fSettings, "Camera1.k1", found);
        float k1 = readParameter<float>(fSettings, "Camera1.k2", found);
        float k2 = readParameter<float>(fSettings, "Camera1.k3", found);
        float k3 = readParameter<float>(fSettings, "Camera1.k4", found);

        vCalibration = {fx, fy, cx, cy, k0, k1, k2, k3};

        calibration1_ = new KannalaBrandt8(vCalibration);
        originalCalib1_ = new KannalaBrandt8(vCalibration);

        if (sensor_ == System::STEREO || sensor_ == System::IMU_STEREO)
        {
            int colBegin = readParameter<int>(fSettings, "Camera1.overlappingBegin", found);
            int colEnd = readParameter<int>(fSettings, "Camera1.overlappingEnd", found);
            vector<int> vOverlapping = {colBegin, colEnd};//记录重叠的开始和结束列

            static_cast<KannalaBrandt8 *>(calibration1_)->mvLappingArea = vOverlapping;
        }
    }
    
}

readImageInfo(fSettings); //需要进入该函数,TODO1-2

void Setting::readImageInfo(cv::FileStorage &fSettings){
    bool found;//读取原始的相机分辨率
    int originalRows = readParameter<int>(fSettings, "Camera.height", found);
    int originalCols = readParameter<int>(fSettings, "Camera.width", found);
    originalImSize_.width = originalCols;
    originalImSize_.height = originalRows;
    //类型为cv::Size
    newImSize_ = originalImSize_;
    //读取新的相机的高度
    int newHeigh = readParameter<int>(fSettings, "Camera.newHeight", found, false);
    //如果有这个参数,则执行下面
    if (found)
    {
        //需要去修改大小
        bNeedToResize1_ = true;
        newImSize_.height = newHeigh;
        if (!bNeedToRectify_)
        {
            // Update calibration
            //新的高度除以原始的高度,假设原始高度为4,新的高度为2,则scaleRowFactor为0.5
            float scaleRowFactor = (float)newImSize_.height / (float)originalImSize_.height;
            //对fy和cy进行参数的改变,根据内参矩阵将相机坐标系下的点转换到图像坐标系下,对高度v(或y)方向(也就是行方向),统一做调整
        //v = fy * Y/Z + cy,将相机坐标系下的点映射到经过修改后的像素坐标系下
            calibration1_->setParameter(calibration1_->getParameter(1) * scaleRowFactor, 1);
            calibration1_->setParameter(calibration1_->getParameter(3) * scaleRowFactor, 3);

            if ((sensor_ == System::STEREO || sensor_ == System::IMU_STEREO) && cameraType_ != Rectified)
            {
                calibration2_->setParameter(calibration2_->getParameter(1) * scaleRowFactor, 1);
                calibration2_->setParameter(calibration2_->getParameter(3) * scaleRowFactor, 3);
            }
        }
    }
    //读取新的相机的宽度
    int newWidth = readParameter<int>(fSettings, "Camera.newWidth", found, false);
    //有这个值,则执行下面
    if (found)
    {
        bNeedToResize1_ = true;
        newImSize_.width = newWidth;
        if (!bNeedToRectify_)
        {
            // Update calibration
            float scaleColFactor = (float)newImSize_.width / (float)originalImSize_.width;
               //对fx和cx进行参数的改变,根据内参矩阵将相机坐标系下的点转换到图像坐标系下,对宽度u方向(也就是列方向),统一做调整
            calibration1_->setParameter(calibration1_->getParameter(0) * scaleColFactor, 0);
            calibration1_->setParameter(calibration1_->getParameter(2) * scaleColFactor, 2);

            if ((sensor_ == System::STEREO || sensor_ == System::IMU_STEREO) && cameraType_ != Rectified)
            {
                calibration2_->setParameter(calibration2_->getParameter(0) * scaleColFactor, 0);
                calibration2_->setParameter(calibration2_->getParameter(2) * scaleColFactor, 2);

                if (cameraType_ == KannalaBrandt)
                {
                    static_cast<KannalaBrandt8 *>(calibration1_)->mvLappingArea[0] *= scaleColFactor;
                    static_cast<KannalaBrandt8 *>(calibration1_)->mvLappingArea[1] *= scaleColFactor;

                    static_cast<KannalaBrandt8 *>(calibration2_)->mvLappingArea[0] *= scaleColFactor;
                    static_cast<KannalaBrandt8 *>(calibration2_)->mvLappingArea[1] *= scaleColFactor;
                }
            }
        }
    }
    //读取相机频率
    fps_ = readParameter<int>(fSettings, "Camera.fps", found);
    //图像为RGB格式
    bRGB_ = (bool)readParameter<int>(fSettings, "Camera.RGB", found);
}

readIMU(fSettings); //需要进入该函数,TODO1-3

void Settings::readRGBD(cv::FileStorage &fSettings)
{
    bool found;

    depthMapFactor_ = readParameter<float>(fSettings, "RGBD.DepthMapFactor", found);
    thDepth_ = readParameter<float>(fSettings, "Stereo.ThDepth", found);
    b_ = readParameter<float>(fSettings, "Stereo.b", found);
    bf_ = b_ * calibration1_->getParameter(0);//焦距与基线的乘积,多说一句视差的概念:d(视差)=UL - UR,左右图的横坐标之差,3D点的深度z = f*b/d;视差与深度z成反比;
}

mpAtlas = new Atlas(0);//需要进入该函数,TODO2
//初始化地图数据集

Atlas::Atlas(int initKFid) : mnLastInitKFidMap(initKFid), mHasViewer(false)
{
    mpCurrentMap = static_cast<Map *>(NULL);
    CreateNewMap();//TODO2-1
}

CreateNewMap();TODO2-1

/**
 * @brief 创建新地图,如果当前活跃地图有效,先存储当前地图为不活跃地图,然后新建地图;否则,可以直接新建地图。
 * 
 */
void Atlas::CreateNewMap()
{
    // 锁住地图集
    unique_lock<mutex> lock(mMutexAtlas);// 如果当前活跃地图有效,先存储当前地图为不活跃地图后退出
    if (mpCurrentMap)
    {
        // mnLastInitKFidMap为当前地图创建时第1个关键帧的id,它是在上一个地图最大关键帧id的基础上增加1
        if (!mspMaps.empty() && mnLastInitKFidMap < mpCurrentMap->GetMaxKFid())
            mnLastInitKFidMap = mpCurrentMap->GetMaxKFid() + 1; // 将当前地图储存起来,其实就是把mIsInUse标记为false
        mpCurrentMap->SetStoredMap();
    }
    mpCurrentMap = new Map(mnLastInitKFidMap);  //新建地图
    mpCurrentMap->SetCurrentMap();              //设置为活跃地图
    mspMaps.insert(mpCurrentMap);               //插入地图集
}

mpTracker = new Tracking(this, mpVocabulary, mpFrameDrawer, mpMapDrawer,
                             mpAtlas, mpKeyFrameDatabase, strSettingsFile, mSensor, settings_, strSequence);//需要进入该函数,TODO3

/**
 * @brief 跟踪线程构造函数
 * @param pSys 系统类指针
 * @param pVoc 词典
 * @param pFrameDrawer 画图像的
 * @param pMapDrawer 画地图的
 * @param pAtlas atlas
 * @param pKFDB 关键帧词典数据库
 * @param strSettingPath 参数文件路径
 * @param sensor 传感器类型
 * @param settings 参数类
 * @param _strSeqName 序列名字,没用到
 */
Tracking::Tracking(System *pSys, ORBVocabulary* pVoc, FrameDrawer *pFrameDrawer, MapDrawer *pMapDrawer,
    Atlas *pAtlas, KeyFrameDatabase* pKFDB, const string &strSettingPath, const int sensor, Settings* settings, const string &_nameSeq)
    : mState(NO_IMAGES_YET), mSensor(sensor), mTrackedFr(0), mbStep(false),
    mbOnlyTracking(false), mbMapUpdated(false), mbVO(false), mpORBVocabulary(pVoc), mpKeyFrameDB(pKFDB),
    mbReadyToInitializate(false), mpSystem(pSys), mpViewer(NULL), bStepByStep(false),
    mpFrameDrawer(pFrameDrawer), mpMapDrawer(pMapDrawer), mpAtlas(pAtlas), mnLastRelocFrameId(0), time_recently_lost(5.0),
    mnInitialFrameId(0), mbCreatedMap(false), mnFirstFrameId(0), mpCamera2(nullptr), mpLastKeyFrame(static_cast<KeyFrame*>(NULL))
{
    // Load camera parameters from settings file
    // Step 1 从配置文件中加载相机参数
    if(settings){
        newParameterLoader(settings);
    }
    else{
        cv::FileStorage fSettings(strSettingPath, cv::FileStorage::READ);

        bool b_parse_cam = ParseCamParamFile(fSettings);
        if(!b_parse_cam)
        {
            std::cout << "*Error with the camera parameters in the config file*" << std::endl;
        }

        // Load ORB parameters
        bool b_parse_orb = ParseORBParamFile(fSettings);
        if(!b_parse_orb)
        {
            std::cout << "*Error with the ORB parameters in the config file*" << std::endl;
        }

        bool b_parse_imu = true;
        if(sensor==System::IMU_MONOCULAR || sensor==System::IMU_STEREO || sensor==System::IMU_RGBD)
        {
            b_parse_imu = ParseIMUParamFile(fSettings);
            if(!b_parse_imu)
            {
                std::cout << "*Error with the IMU parameters in the config file*" << std::endl;
            }

            mnFramesToResetIMU = mMaxFrames;
        }

        if(!b_parse_cam || !b_parse_orb || !b_parse_imu)
        {
            std::cerr << "**ERROR in the config file, the format is not correct**" << std::endl;
            try
            {
                throw -1;
            }
            catch(exception &e)
            {

            }
        }
    }

    initID = 0; lastID = 0;
    mbInitWith3KFs = false;
    mnNumDataset = 0;

    // 遍历下地图中的相机,然后打印出来了
    vector<GeometricCamera*> vpCams = mpAtlas->GetAllCameras();
    std::cout << "There are " << vpCams.size() << " cameras in the atlas" << std::endl;
    for(GeometricCamera* pCam : vpCams)
    {
        std::cout << "Camera " << pCam->GetId();
        if(pCam->GetType() == GeometricCamera::CAM_PINHOLE)
        {
            std::cout << " is pinhole" << std::endl;
        }
        else if(pCam->GetType() == GeometricCamera::CAM_FISHEYE)
        {
            std::cout << " is fisheye" << std::endl;
        }
        else
        {
            std::cout << " is unknown" << std::endl;
        }
    }

#ifdef REGISTER_TIMES
    vdRectStereo_ms.clear();
    vdResizeImage_ms.clear();
    vdORBExtract_ms.clear();
    vdStereoMatch_ms.clear();
    vdIMUInteg_ms.clear();
    vdPosePred_ms.clear();
    vdLMTrack_ms.clear();
    vdNewKF_ms.clear();
    vdTrackTotal_ms.clear();
#endif
}

 

 

举报

相关推荐

0 条评论