由粗到精学习LVI-SAM(九)VIS——visual_feature模块

由粗到精学习LVI-SAM(九)VIS——visual_feature模块

一、节点关系

回顾节点关系图:

观察节点关系图可得,特征提取节点visual_feature订阅了2个话题:原始图片话题image_raw、去畸变点云话题cloud_deskewed。发布了3个话题:标记特征点的图片话题feature_img,自定义特征话题feature,特征提取系统重启标志话题restart。其中标记特征点的图片话题用于可视化。

二、主函数

int main(int argc, char **argv)
    //// 初始化ROS
    ros::init(argc, argv, "vins");
    ros::NodeHandle n;
    ROS_INFO("\033[1;32m----> Visual Feature Tracker Started.\033[0m");
    //// 设置控制台日志显示信息
    ros::console::set_logger_level(ROSCONSOLE_DEFAULT_NAME, ros::console::levels::Warn);
    readParameters(n);
    //// 读取相机参数
    for (int i = 0; i < NUM_OF_CAM; i++)
        trackerData[i].readIntrinsicParameter(CAM_NAMES[i]);
    // load fisheye mask to remove features on the boundary
    //// 对鱼眼相机,贴上掩膜防止提取边界特征点
    if(FISHEYE)
        for (int i = 0; i < NUM_OF_CAM; i++)
            trackerData[i].fisheye_mask = cv::imread(FISHEYE_MASK, 0);
            if(!trackerData[i].fisheye_mask.data)
                ROS_ERROR("load fisheye mask fail");
                ROS_BREAK();
                ROS_INFO("load mask success");
    // initialize depthRegister (after readParameters())
    //// 初始化深度注册器
    depthRegister = new DepthRegister(n);
    // subscriber to image and lidar
    //// 订阅img话题和去畸变点云话题
    ros::Subscriber sub_img   = n.subscribe(IMAGE_TOPIC,       5,    img_callback);
    ros::Subscriber sub_lidar = n.subscribe(POINT_CLOUD_TOPIC, 5,    lidar_callback);
    if (!USE_LIDAR)
        sub_lidar.shutdown();
    // messages to vins estimator
    //// 发布自定义特征话题,含特征点图片话题,重启标志位话题
    pub_feature = n.advertise<sensor_msgs::PointCloud>(PROJECT_NAME + "/vins/feature/feature",     5);
    pub_match   = n.advertise<sensor_msgs::Image>     (PROJECT_NAME + "/vins/feature/feature_img", 5);
    pub_restart = n.advertise<std_msgs::Bool>         (PROJECT_NAME + "/vins/feature/restart",     5);
    // two ROS spinners for parallel processing (image and lidar)
    //// 两个线程共同处理
    ros::MultiThreadedSpinner spinner(2);
    spinner.spin();
    return 0;

三、回调函数

3.1 订阅图片的回调函数img_callback()

void img_callback(const sensor_msgs::ImageConstPtr &img_msg)
    double cur_img_time = img_msg->header.stamp.toSec();
    //// 第一帧图像则直接返回
    if(first_image_flag)
        first_image_flag = false;
        first_image_time = cur_img_time;
        last_image_time = cur_img_time;
        return;
    //// 如果当前帧与上一帧间隔有问题,则跟踪不稳定,清空、重新初始化
    //// detect unstable camera stream
    if (cur_img_time - last_image_time > 1.0 || cur_img_time < last_image_time)
        ROS_WARN("image discontinue! reset the feature tracker!");
        first_image_flag = true; 
        last_image_time = 0;
        pub_count = 1;
        std_msgs::Bool restart_flag;
        restart_flag.data = true;
        pub_restart.publish(restart_flag);
        return;
    last_image_time = cur_img_time;
    // frequency control 频率控制 默认pub频率20
    //// 每一帧图片都要处理,但不一定要全都pub,因为所有pub出去的内容都会增加后端优化的规模
    if (round(1.0 * pub_count / (cur_img_time - first_image_time)) <= FREQ)
        PUB_THIS_FRAME = true;
        // reset the frequency control
        if (abs(1.0 * pub_count / (cur_img_time - first_image_time) - FREQ) < 0.01 * FREQ)
            first_image_time = cur_img_time;
            pub_count = 0;
        PUB_THIS_FRAME = false;
    //// 提取照片个,格式转换
    cv_bridge::CvImageConstPtr ptr;
    if (img_msg->encoding == "8UC1")
        sensor_msgs::Image img;
        img.header = img_msg->header;
        img.height = img_msg->height;
        img.width = img_msg->width;
        img.is_bigendian = img_msg->is_bigendian;
        img.step = img_msg->step;
        img.data = img_msg->data;
        img.encoding = "mono8";
        ptr = cv_bridge::toCvCopy(img, sensor_msgs::image_encodings::MONO8);
        ptr = cv_bridge::toCvCopy(img_msg, sensor_msgs::image_encodings::MONO8);
    cv::Mat show_img = ptr->image;
    TicToc t_r;
    for (int i = 0; i < NUM_OF_CAM; i++)
        ROS_DEBUG("processing camera %d", i);
        //// 每个相机都有一个跟踪实例
        //// 对于单目相机:提取特征+光流跟踪  对于双目相机:左目按照单目处理,右目灰度均衡化处理或仅保存
        if (i != 1 || !STEREO_TRACK)
            trackerData[i].readImage(ptr->image.rowRange(ROW * i, ROW * (i + 1)), cur_img_time);
            if (EQUALIZE)
                cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE();
                clahe->apply(ptr->image.rowRange(ROW * i, ROW * (i + 1)), trackerData[i].cur_img);
                trackerData[i].cur_img = ptr->image.rowRange(ROW * i, ROW * (i + 1));
        #if SHOW_UNDISTORTION
            trackerData[i].showUndistortion("undistrotion_" + std::to_string(i));
        #endif
    //// 更新特征点ID
    for (unsigned int i = 0;; i++)
        bool completed = false;
        for (int j = 0; j < NUM_OF_CAM; j++)
            if (j != 1 || !STEREO_TRACK)
                completed |= trackerData[j].updateID(i);
        if (!completed)
            break;
   if (PUB_THIS_FRAME)
        pub_count++;
        //// 用pointcloud消息类型,这种消息类型包含下面各种通道:归一化平面坐标, 关键点索引,关键点像素坐标,关键点速度,关键点的深度
        sensor_msgs::PointCloudPtr feature_points(new sensor_msgs::PointCloud);
        sensor_msgs::ChannelFloat32 id_of_point;
        sensor_msgs::ChannelFloat32 u_of_point;
        sensor_msgs::ChannelFloat32 v_of_point;
        sensor_msgs::ChannelFloat32 velocity_x_of_point;
        sensor_msgs::ChannelFloat32 velocity_y_of_point;
        feature_points->header.stamp = img_msg->header.stamp;
        feature_points->header.frame_id = "vins_body";
        //// 记录每个相机的关键点id
        vector<set<int>> hash_ids(NUM_OF_CAM);
        for (int i = 0; i < NUM_OF_CAM; i++)
            auto &un_pts = trackerData[i].cur_un_pts;
            auto &cur_pts = trackerData[i].cur_pts;
            auto &ids = trackerData[i].ids;
            auto &pts_velocity = trackerData[i].pts_velocity;
            for (unsigned int j = 0; j < ids.size(); j++)
                //// 对于跟踪次数大于1的关键点
                if (trackerData[i].track_cnt[j] > 1)
                    int p_id = ids[j];
                    hash_ids[i].insert(p_id);
                    geometry_msgs::Point32 p;
                    p.x = un_pts[j].x;
                    p.y = un_pts[j].y;
                    p.z = 1;            //// 归一化平面上的点
                    feature_points->points.push_back(p);
                    id_of_point.values.push_back(p_id * NUM_OF_CAM + i);
                    u_of_point.values.push_back(cur_pts[j].x);
                    v_of_point.values.push_back(cur_pts[j].y);
                    velocity_x_of_point.values.push_back(pts_velocity[j].x);
                    velocity_y_of_point.values.push_back(pts_velocity[j].y);
        feature_points->channels.push_back(id_of_point);
        feature_points->channels.push_back(u_of_point);
        feature_points->channels.push_back(v_of_point);
        feature_points->channels.push_back(velocity_x_of_point);
        feature_points->channels.push_back(velocity_y_of_point);
        // get feature depth from lidar point cloud
        //// 获取连续几帧聚合的点云
        pcl::PointCloud<PointType>::Ptr depth_cloud_temp(new pcl::PointCloud<PointType>());
        mtx_lidar.lock();
        *depth_cloud_temp = *depthCloud;
        mtx_lidar.unlock();
        //// 通过聚合点云获取深度信息
        sensor_msgs::ChannelFloat32 depth_of_points = depthRegister->get_depth(img_msg->header.stamp, show_img, depth_cloud_temp, trackerData[0].m_camera, feature_points->points);
        feature_points->channels.push_back(depth_of_points);
        // skip the first image; since no optical speed on frist image
        //// 第一次跟踪的图片只有关键点坐标信息,没有速度信息,跳过
        if (!init_pub)
            init_pub = 1;
            pub_feature.publish(feature_points);
        // publish features in image
        if (pub_match.getNumSubscribers() != 0)
            ptr = cv_bridge::cvtColor(ptr, sensor_msgs::image_encodings::RGB8);
            //cv::Mat stereo_img(ROW * NUM_OF_CAM, COL, CV_8UC3);
            cv::Mat stereo_img = ptr->image;
            for (int i = 0; i < NUM_OF_CAM; i++)
                cv::Mat tmp_img = stereo_img.rowRange(i * ROW, (i + 1) * ROW);
                cv::cvtColor(show_img, tmp_img, CV_GRAY2RGB);
                for (unsigned int j = 0; j < trackerData[i].cur_pts.size(); j++)
                    if (SHOW_TRACK)
                        // track count
                        //// 如果设定SHOW_TRACK,在关键点上画圈,关键点跟踪次数越多,圆圈越绿,反之则越蓝
                        double len = std::min(1.0, 1.0 * trackerData[i].track_cnt[j] / WINDOW_SIZE);
                        cv::circle(tmp_img, trackerData[i].cur_pts[j], 4, cv::Scalar(255 * (1 - len), 255 * len, 0), 4);
                    } else {
                        // depth
                        //// 如果不设定SHOW_TRACK,则对有深度的关键点画绿圈,对没有深度的关键点画红圈
                        if(j < depth_of_points.values.size())
                            if (depth_of_points.values[j] > 0)
                                cv::circle(tmp_img, trackerData[i].cur_pts[j], 4, cv::Scalar(0, 255, 0), 4);
                                cv::circle(tmp_img, trackerData[i].cur_pts[j], 4, cv::Scalar(0, 0, 255), 4);
            pub_match.publish(ptr->toImageMsg());

3.2 订阅点云的回调函数lidar_callback()

void lidar_callback(const sensor_msgs::PointCloud2ConstPtr& laser_msg)
    static int lidar_count = -1;
    //// 默认隔三帧取一帧
    if (++lidar_count % (LIDAR_SKIP+1) != 0)
        return;
    // 0. listen to transform
    //// 收听tf广播
    static tf::TransformListener listener;
    static tf::StampedTransform transform;
    try{
        listener.waitForTransform("vins_world", "vins_body_ros", laser_msg->header.stamp, ros::Duration(0.01));
        listener.lookupTransform("vins_world", "vins_body_ros", laser_msg->header.stamp, transform);
    catch (tf::TransformException ex){
        // ROS_ERROR("lidar no tf");
        return;
    //// 获取位姿
    double xCur, yCur, zCur, rollCur, pitchCur, yawCur;
    xCur = transform.getOrigin().x();
    yCur = transform.getOrigin().y();
    zCur = transform.getOrigin().z();
    tf::Matrix3x3 m(transform.getRotation());
    m.getRPY(rollCur, pitchCur, yawCur);
    Eigen::Affine3f transNow = pcl::getTransformation(xCur, yCur, zCur, rollCur, pitchCur, yawCur);
    // 1. convert laser cloud message to pcl
    //// 获取点云
    pcl::PointCloud<PointType>::Ptr laser_cloud_in(new pcl::PointCloud<PointType>());
    pcl::fromROSMsg(*laser_msg, *laser_cloud_in);
    // 2. downsample new cloud (save memory)
    //// 降采样
    pcl::PointCloud<PointType>::Ptr laser_cloud_in_ds(new pcl::PointCloud<PointType>());
    static pcl::VoxelGrid<PointType> downSizeFilter;
    downSizeFilter.setLeafSize(0.2, 0.2, 0.2);
    downSizeFilter.setInputCloud(laser_cloud_in);
    downSizeFilter.filter(*laser_cloud_in_ds);
    *laser_cloud_in = *laser_cloud_in_ds;
    // 3. filter lidar points (only keep points in camera view)
    //// 仅保留相机FoV中的点
    pcl::PointCloud<PointType>::Ptr laser_cloud_in_filter(new pcl::PointCloud<PointType>());
    for (int i = 0; i < (int)laser_cloud_in->size(); ++i)
        PointType p = laser_cloud_in->points[i];
        if (p.x >= 0 && abs(p.y / p.x) <= 10 && abs(p.z / p.x) <= 10)
            laser_cloud_in_filter->push_back(p);
    *laser_cloud_in = *laser_cloud_in_filter;
    // TODO: transform to IMU body frame
    // 4. offset T_lidar -> T_camera
    //// 从lidar坐标系转换到camera坐标系
    pcl::PointCloud<PointType>::Ptr laser_cloud_offset(new pcl::PointCloud<PointType>());
    Eigen::Affine3f transOffset = pcl::getTransformation(L_C_TX, L_C_TY, L_C_TZ, L_C_RX, L_C_RY, L_C_RZ);
    pcl::transformPointCloud(*laser_cloud_in, *laser_cloud_offset, transOffset);
    *laser_cloud_in = *laser_cloud_offset;
    // 5. transform new cloud into global odom frame
    //// 从camera坐标系转换到global坐标系
    pcl::PointCloud<PointType>::Ptr laser_cloud_global(new pcl::PointCloud<PointType>());
    pcl::transformPointCloud(*laser_cloud_in, *laser_cloud_global, transNow);
    // 6. save new cloud
    //// 保存到队列
    double timeScanCur = laser_msg->header.stamp.toSec();
    cloudQueue.push_back(*laser_cloud_global);
    timeQueue.push_back(timeScanCur);
    // 7. pop old cloud
    //// 删除与最新的点云时间戳相差5s以上的旧点云
    while (!timeQueue.empty())
        if (timeScanCur - timeQueue.front() > 5.0)
            cloudQueue.pop_front();
            timeQueue.pop_front();
        } else {
            break;
    std::lock_guard<std::mutex> lock(mtx_lidar);
    // 8. fuse global cloud
    //// 聚合队列中的点云
    depthCloud->clear();
    for (int i = 0; i < (int)cloudQueue.size(); ++i)
        *depthCloud += cloudQueue[i];
    // 9. downsample global cloud
    //// 聚合点云降采样
    pcl::PointCloud<PointType>::Ptr depthCloudDS(new pcl::PointCloud<PointType>());
    downSizeFilter.setLeafSize(0.2, 0.2, 0.2);
    downSizeFilter.setInputCloud(depthCloud);
    downSizeFilter.filter(*depthCloudDS);
    *depthCloud = *depthCloudDS;

四、FeatureTracker类

4.1 FeatureTracker类成员概览

4.2 构造函数

空构造函数,仅用于初始化参数

4.3 关键点均匀化函数setMask()

void FeatureTracker::setMask()
    //// 按照参数设置掩膜
    if(FISHEYE)
        mask = fisheye_mask.clone();
        mask = cv::Mat(ROW, COL, CV_8UC1, cv::Scalar(255));
    // prefer to keep features that are tracked for long time
    //// 取出光流跟踪的关键点forw_pts,关键点索引ids,关键点跟踪次数track_cnt,按照跟踪次数由大到小保存到数组中
    vector<pair<int, pair<cv::Point2f, int>>> cnt_pts_id;
    for (unsigned int i = 0; i < forw_pts.size(); i++)
        cnt_pts_id.push_back(make_pair(track_cnt[i], make_pair(forw_pts[i], ids[i])));
    sort(cnt_pts_id.begin(), cnt_pts_id.end(), [](const pair<int, pair<cv::Point2f, int>> &a, const pair<int, pair<cv::Point2f, int>> &b)
            return a.first > b.first;
    forw_pts.clear();
    ids.clear();
    track_cnt.clear();
    for (auto &it : cnt_pts_id)
        //// 只保留指定掩模区域内的关键点
        if (mask.at<uchar>(it.second.first) == 255)
            forw_pts.push_back(it.second.first);
            ids.push_back(it.second.second);
            track_cnt.push_back(it.first);
            //// 将已有特征点的区域灰度置零,以该点为中心,MIN_DIST为半径的实心圆;该区域就不再检测特征点了;
            cv::circle(mask, it.second.first, MIN_DIST, 0, -1);

4.4 添加新关键点函数addPoints()

void FeatureTracker::addPoints()
    for (auto &p : n_pts)
        // 加入到后一帧关键点数组 id设为-1 跟踪次数设为1
        forw_pts.push_back(p);
        ids.push_back(-1);
        track_cnt.push_back(1);

4.5 跟踪图片函数readImage()

void FeatureTracker::readImage(const cv::Mat &_img, double _cur_time)
    cv::Mat img;
    TicToc t_r;
    cur_time = _cur_time;
    //// 图片灰度均衡化处理
    if (EQUALIZE)
        cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE(3.0, cv::Size(8, 8));
        TicToc t_c;
        clahe->apply(_img, img);
        ROS_DEBUG("CLAHE costs: %fms", t_c.toc());
        img = _img;
    //// 如果后一帧图片为空,代表是第一帧图片,设置前一帧当前帧后一帧图片均为img
    if (forw_img.empty())
        prev_img = cur_img = forw_img = img;
        //// 如果后一帧非空,设置后一帧图片为img
        forw_img = img;
    //// 清空后一帧关键点并跟踪后一帧图片
    forw_pts.clear();
    if (cur_pts.size() > 0)
        TicToc t_o;
        vector<uchar> status;
        vector<float> err;
        cv::calcOpticalFlowPyrLK(cur_img, forw_img, cur_pts, forw_pts, status, err, cv::Size(21, 21), 3);
        for (int i = 0; i < int(forw_pts.size()); i++)
            if (status[i] && !inBorder(forw_pts[i]))
                status[i] = 0;
        //// 根据跟踪状态向量去除没有跟踪到的特征点;
        reduceVector(prev_pts, status);
        reduceVector(cur_pts, status);
        reduceVector(forw_pts, status);
        reduceVector(ids, status);
        reduceVector(cur_un_pts, status);
        reduceVector(track_cnt, status);
        ROS_DEBUG("temporal optical flow costs: %fms", t_o.toc());
    //// 对于仍然存在与track_cnt中的关键点,跟踪次数+1
    for (auto &n : track_cnt)
        n++;
    //// 如果当前帧是关键帧
    if (PUB_THIS_FRAME)
        //// 根据2D特征匹配关系计算基本矩阵(对极几何),并利用rasanc算法去除外点;
        rejectWithF();
        ROS_DEBUG("set mask begins");
        TicToc t_m;
        //// 关键点均匀化
        setMask();
        ROS_DEBUG("set mask costs %fms", t_m.toc());
        ROS_DEBUG("detect feature begins");
        TicToc t_t;
        int n_max_cnt = MAX_CNT - static_cast<int>(forw_pts.size());
        //// 如果跟踪到的点少于设定值,则在最新帧提取新的特征点,保证跟踪质量;否则不提取新的关键点
        if (n_max_cnt > 0)
            if(mask.empty())
                cout << "mask is empty " << endl;
            if (mask.type() != CV_8UC1)
                cout << "mask type wrong " << endl;
            if (mask.size() != forw_img.size())
                cout << "wrong size " << endl;
            //// 根据指定数量,指定的掩模区域提取新特征点,已有特征点的区域就不用再提取了,保证特征点均匀分布;
            cv::goodFeaturesToTrack(forw_img, n_pts, MAX_CNT - forw_pts.size(), 0.01, MIN_DIST, mask);
            n_pts.clear();
        ROS_DEBUG("detect feature costs: %fms", t_t.toc());
        ROS_DEBUG("add feature begins");
        TicToc t_a;
        //// 将新提取的特征点加入到跟踪向量中;
        addPoints();
        ROS_DEBUG("selectFeature costs: %fms", t_a.toc());
    //// 更新前一帧和当前帧的数据
    prev_img = cur_img;
    prev_pts = cur_pts;
    prev_un_pts = cur_un_pts;
    cur_img = forw_img;
    cur_pts = forw_pts;
    // 根据相机模型去除视觉特征畸变,并计算特征运动速度;
    // 只对跟踪点去畸变,而不用对全部像素点去畸变
    undistortedPoints();
    prev_time = cur_time;

4.6 计算基础矩阵函数rejectWithF()

void FeatureTracker::rejectWithF()
    if (forw_pts.size() >= 8)
        ROS_DEBUG("FM ransac begins");
        TicToc t_f;
        vector<cv::Point2f> un_cur_pts(cur_pts.size()), un_forw_pts(forw_pts.size());
        for (unsigned int i = 0; i < cur_pts.size(); i++)
            Eigen::Vector3d tmp_p;
            //// 获取当前帧和后一帧的关键点归一化坐标
            m_camera->liftProjective(Eigen::Vector2d(cur_pts[i].x, cur_pts[i].y), tmp_p);
            tmp_p.x() = FOCAL_LENGTH * tmp_p.x() / tmp_p.z() + COL / 2.0;
            tmp_p.y() = FOCAL_LENGTH * tmp_p.y() / tmp_p.z() + ROW / 2.0;
            un_cur_pts[i] = cv::Point2f(tmp_p.x(), tmp_p.y());
            m_camera->liftProjective(Eigen::Vector2d(forw_pts[i].x, forw_pts[i].y), tmp_p);
            tmp_p.x() = FOCAL_LENGTH * tmp_p.x() / tmp_p.z() + COL / 2.0;
            tmp_p.y() = FOCAL_LENGTH * tmp_p.y() / tmp_p.z() + ROW / 2.0;
            un_forw_pts[i] = cv::Point2f(tmp_p.x(), tmp_p.y());
        vector<uchar> status;
        //// 计算基础矩阵,采用FM_RANSAC排除外点
        cv::findFundamentalMat(un_cur_pts, un_forw_pts, cv::FM_RANSAC, F_THRESHOLD, 0.99, status);
        //// 删除外点
        int size_a = cur_pts.size();
        reduceVector(prev_pts, status);
        reduceVector(cur_pts, status);
        reduceVector(forw_pts, status);
        reduceVector(cur_un_pts, status);
        reduceVector(ids, status);
        reduceVector(track_cnt, status);
        ROS_DEBUG("FM ransac: %d -> %lu: %f", size_a, forw_pts.size(), 1.0 * forw_pts.size() / size_a);
        ROS_DEBUG("FM ransac costs: %fms", t_f.toc());

4.7 更新关键点索引函数updateID()

bool FeatureTracker::updateID(unsigned int i)
    if (i < ids.size())
        //// 给没有索引的关键点添加索引
        if (ids[i] == -1)
            ids[i] = n_id++;
        return true;
        return false;

4.8 读取相机内参函数readIntrinsicParameter()

直接读取相机内参,略

4.9 去畸变可视化函数showUndistortion()

void FeatureTracker::showUndistortion(const string &name)
    //// 创建去畸变图片,长宽留出600像素的裕度
    cv::Mat undistortedImg(ROW + 600, COL + 600, CV_8UC1, cv::Scalar(0));
    vector<Eigen::Vector2d> distortedp, undistortedp;
    for (int i = 0; i < COL; i++)
        for (int j = 0; j < ROW; j++)
            //// 计算每个坐标的归一化平面坐标(这里应该已经包含了去畸变)
            Eigen::Vector2d a(i, j);
            Eigen::Vector3d b;
            m_camera->liftProjective(a, b);
            distortedp.push_back(a);
            undistortedp.push_back(Eigen::Vector2d(b.x() / b.z(), b.y() / b.z()));
            //printf("%f,%f->%f,%f,%f\n)\n", a.x(), a.y(), b.x(), b.y(), b.z());
    for (int i = 0; i < int(undistortedp.size()); i++)
        //// 重新把归一化平面上的点投影到像素坐标,于是就得到了畸变坐标与去畸变坐标的对应关系
        cv::Mat pp(3, 1, CV_32FC1);
        pp.at<float>(0, 0) = undistortedp[i].x() * FOCAL_LENGTH + COL / 2;
        pp.at<float>(1, 0) = undistortedp[i].y() * FOCAL_LENGTH + ROW / 2;
        pp.at<float>(2, 0) = 1.0;
        //cout << trackerData[0].K << endl;
        //printf("%lf %lf\n", p.at<float>(1, 0), p.at<float>(0, 0));
        //printf("%lf %lf\n", pp.at<float>(1, 0), pp.at<float>(0, 0));
        //// 按照对应关系得到去畸变图片并显示出来
        if (pp.at<float>(1, 0) + 300 >= 0 && pp.at<float>(1, 0) + 300 < ROW + 600 && pp.at<float>(0, 0) + 300 >= 0 && pp.at<float>(0, 0) + 300 < COL + 600)
            undistortedImg.at<uchar>(pp.at<float>(1, 0) + 300, pp.at<float>(0, 0) + 300) = cur_img.at<uchar>(distortedp[i].y(), distortedp[i].x());
            //ROS_ERROR("(%f %f) -> (%f %f)", distortedp[i].y, distortedp[i].x, pp.at<float>(1, 0), pp.at<float>(0, 0));
    cv::imshow(name, undistortedImg);
    cv::waitKey(0);

4.10 关键点去畸变函数undistortedPoints()

void FeatureTracker::undistortedPoints()
    cur_un_pts.clear();
    cur_un_pts_map.clear();
    //cv::undistortPoints(cur_pts, un_pts, K, cv::Mat());
    //// 获取关键点的去畸变归一化平面坐标,更新cur_un_pts数组和cur_un_pts_map映射
    for (unsigned int i = 0; i < cur_pts.size(); i++)
        Eigen::Vector2d a(cur_pts[i].x, cur_pts[i].y);
        Eigen::Vector3d b;
        m_camera->liftProjective(a, b);
        cur_un_pts.push_back(cv::Point2f(b.x() / b.z(), b.y() / b.z()));
        cur_un_pts_map.insert(make_pair(ids[i], cv::Point2f(b.x() / b.z(), b.y() / b.z())));
        //printf("cur pts id %d %f %f", ids[i], cur_un_pts[i].x, cur_un_pts[i].y);
    // caculate points velocity
    //// 去畸变之后。计算特征点像素运动速度,
    if (!prev_un_pts_map.empty())
        double dt = cur_time - prev_time;
        pts_velocity.clear();
        for (unsigned int i = 0; i < cur_un_pts.size(); i++)
            //// 在前一帧的关键点映射中能找到的有速度,找不到的设置速度为0(没有跟踪到的关键点和新发现的关键点)
            if (ids[i] != -1)
                std::map<int, cv::Point2f>::iterator it;
                it = prev_un_pts_map.find(ids[i]);
                if (it != prev_un_pts_map.end())
                    double v_x = (cur_un_pts[i].x - it->second.x) / dt;
                    double v_y = (cur_un_pts[i].y - it->second.y) / dt;
                    pts_velocity.push_back(cv::Point2f(v_x, v_y));
                    pts_velocity.push_back(cv::Point2f(0, 0));
                pts_velocity.push_back(cv::Point2f(0, 0));
        //// 前一帧关键点映射为空,则所有关键点速度均为0
        for (unsigned int i = 0; i < cur_pts.size(); i++)
            pts_velocity.push_back(cv::Point2f(0, 0));
    prev_un_pts_map = cur_un_pts_map;

五、DepthRegister类

5.1 DepthRegister类成员概览

5.2 搜索深度值函数get_depth()

sensor_msgs::ChannelFloat32 get_depth(const ros::Time& stamp_cur, const cv::Mat& imageCur, 
                                      const pcl::PointCloud<PointType>::Ptr& depthCloud,
                                      const camodocal::CameraPtr& camera_model ,
                                      const vector<geometry_msgs::Point32>& features_2d)
    // 0.1 initialize depth for return
    //// 初始化深度值通道
    sensor_msgs::ChannelFloat32 depth_of_point;
    depth_of_point.name = "depth";
    depth_of_point.values.resize(features_2d.size(), -1);
    // 0.2  check if depthCloud available
    if (depthCloud->size() == 0)
        return depth_of_point;
    // 0.3 look up transform at current image time
    //// 收听tf广播
    try{
        listener.waitForTransform("vins_world", "vins_body_ros", stamp_cur, ros::Duration(0.01));
        listener.lookupTransform("vins_world", "vins_body_ros", stamp_cur, transform);
    catch (tf::TransformException ex){
        // ROS_ERROR("image no tf");
        return depth_of_point;
    //// tf位姿格式转换到仿射变换
    double xCur, yCur, zCur, rollCur, pitchCur, yawCur;
    xCur = transform.getOrigin().x();
    yCur = transform.getOrigin().y();
    zCur = transform.getOrigin().z();
    tf::Matrix3x3 m(transform.getRotation());
    m.getRPY(rollCur, pitchCur, yawCur);
    Eigen::Affine3f transNow = pcl::getTransformation(xCur, yCur, zCur, rollCur, pitchCur, yawCur);
    // 0.4 transform cloud from global frame to camera frame
    //// 世界坐标系点云转换到相机坐标系点云
    pcl::PointCloud<PointType>::Ptr depth_cloud_local(new pcl::PointCloud<PointType>());
    pcl::transformPointCloud(*depthCloud, *depth_cloud_local, transNow.inverse());
    // 0.5 project undistorted normalized (z) 2d features onto a unit sphere
    //// 把2d关键点投影到球坐标系,并做坐标变换
    pcl::PointCloud<PointType>::Ptr features_3d_sphere(new pcl::PointCloud<PointType>());
    for (int i = 0; i < (int)features_2d.size(); ++i)
        // normalize 2d feature to a unit sphere
        Eigen::Vector3f feature_cur(features_2d[i].x, features_2d[i].y, features_2d[i].z); // z always equal to 1
        feature_cur.normalize(); 
        // convert to ROS standard
        //// LiDAR是Front-Left-Up坐标系,Camera是Right-Down-Front坐标系
        PointType p;
        p.x =  feature_cur(2);
        p.y = -feature_cur(0);
        p.z = -feature_cur(1);
        p.intensity = -1; // intensity will be used to save depth
        features_3d_sphere->push_back(p);
    // 3. project depth cloud on a range image, filter points satcked in the same region
    //// 筛选在相机FoV以内的,距离相机近的点云点,用来构建用于搜索关键点深度的点云
    //// 点云投影图分辨率
    float bin_res = 180.0 / (float)num_bins; // currently only cover the space in front of lidar (-90 ~ 90)
    cv::Mat rangeImage = cv::Mat(num_bins, num_bins, CV_32F, cv::Scalar::all(FLT_MAX)); //// 点云投影图
    for (int i = 0; i < (int)depth_cloud_local->size(); ++i)
        PointType p = depth_cloud_local->points[i];
        // filter points not in camera view
        //// 排除相机FoV以外的点
        if (p.x < 0 || abs(p.y / p.x) > 10 || abs(p.z / p.x) > 10)
            continue;
        // find row id in range image
        //// 计算俯仰角,横排id
        float row_angle = atan2(p.z, sqrt(p.x * p.x + p.y * p.y)) * 180.0 / M_PI + 90.0; // degrees, bottom -> up, 0 -> 360
        int row_id = round(row_angle / bin_res);
        // find column id in range image
        //// 计算偏航角,竖排id
        float col_angle = atan2(p.x, p.y) * 180.0 / M_PI; // degrees, left -> right, 0 -> 360
        int col_id = round(col_angle / bin_res);
        // id may be out of boundary
        //// 排除过界id
        if (row_id < 0 || row_id >= num_bins || col_id < 0 || col_id >= num_bins)
            continue;
        // only keep points that's closer
        //// 计算点云点与相机的距离,如果同时有两个点在同一个像素坐标上,则保留较小的那个
        float dist = pointDistance(p);
        if (dist < rangeImage.at<float>(row_id, col_id))
            rangeImage.at<float>(row_id, col_id) = dist;
            pointsArray[row_id][col_id] = p;
    // 4. extract downsampled depth cloud from range image
    //// 构建用于搜索关键点深度的点云并发布
    pcl::PointCloud<PointType>::Ptr depth_cloud_local_filter2(new pcl::PointCloud<PointType>());
    for (int i = 0; i < num_bins; ++i)
        for (int j = 0; j < num_bins; ++j)
            if (rangeImage.at<float>(i, j) != FLT_MAX)
                depth_cloud_local_filter2->push_back(pointsArray[i][j]);
    *depth_cloud_local = *depth_cloud_local_filter2;
    publishCloud(&pub_depth_cloud, depth_cloud_local, stamp_cur, "vins_body_ros");
    // 5. project depth cloud onto a unit sphere
    //// 把上面的点云投影到单位球球面,intensity保存距离信息
    pcl::PointCloud<PointType>::Ptr depth_cloud_unit_sphere(new pcl::PointCloud<PointType>());
    for (int i = 0; i < (int)depth_cloud_local->size(); ++i)
        PointType p = depth_cloud_local->points[i];
        float range = pointDistance(p);
        p.x /= range;
        p.y /= range;
        p.z /= range;
        p.intensity = range;
        depth_cloud_unit_sphere->push_back(p);
    //// 如果球面上的点云点太少则直接返回
    if (depth_cloud_unit_sphere->size() < 10)
        return depth_of_point;
    // 6. create a kd-tree using the spherical depth cloud
    //// 建立kdtree
    pcl::KdTreeFLANN<PointType>::Ptr kdtree(new pcl::KdTreeFLANN<PointType>());
    kdtree->setInputCloud(depth_cloud_unit_sphere);
    // 7. find the feature depth using kd-tree
    //// kdtree搜索与关键点球面最近的三个点
    vector<int> pointSearchInd;
    vector<float> pointSearchSqDis;
    float dist_sq_threshold = pow(sin(bin_res / 180.0 * M_PI) * 5.0, 2); //// 搜索距离阈值
    for (int i = 0; i < (int)features_3d_sphere->size(); ++i)
        kdtree->nearestKSearch(features_3d_sphere->points[i], 3, pointSearchInd, pointSearchSqDis);
        //// 保证有三个点并且最远距离仍不超过阈值
        if (pointSearchInd.size() == 3 && pointSearchSqDis[2] < dist_sq_threshold)
            float r1 = depth_cloud_unit_sphere->points[pointSearchInd[0]].intensity;
            Eigen::Vector3f A(depth_cloud_unit_sphere->points[pointSearchInd[0]].x * r1,
                              depth_cloud_unit_sphere->points[pointSearchInd[0]].y * r1,
                              depth_cloud_unit_sphere->points[pointSearchInd[0]].z * r1);
            float r2 = depth_cloud_unit_sphere->points[pointSearchInd[1]].intensity;
            Eigen::Vector3f B(depth_cloud_unit_sphere->points[pointSearchInd[1]].x * r2,
                              depth_cloud_unit_sphere->points[pointSearchInd[1]].y * r2,
                              depth_cloud_unit_sphere->points[pointSearchInd[1]].z * r2);
            float r3 = depth_cloud_unit_sphere->points[pointSearchInd[2]].intensity;
            Eigen::Vector3f C(depth_cloud_unit_sphere->points[pointSearchInd[2]].x * r3,
                              depth_cloud_unit_sphere->points[pointSearchInd[2]].y * r3,
                              depth_cloud_unit_sphere->points[pointSearchInd[2]].z * r3);
            // https://math.stackexchange.com/questions/100439/determine-where-a-vector-will-intersect-a-plane
            Eigen::Vector3f V(features_3d_sphere->points[i].x,
                              features_3d_sphere->points[i].y,
                              features_3d_sphere->points[i].z);
            //// 计算关键点与ABC平面之间的距离
            Eigen::Vector3f N = (A - B).cross(B - C);
            float s = (N(0) * A(0) + N(1) * A(1) + N(2) * A(2)) 
                    / (N(0) * V(0) + N(1) * V(1) + N(2) * V(2));
            float min_depth = min(r1, min(r2, r3));
            float max_depth = max(r1, max(r2, r3));
            //// 三个点之间的距离也不能太远,距离s应该在ABC距离之间
            if (max_depth - min_depth > 2 || s <= 0.5)
                continue;
            } else if (s - max_depth > 0) {
                s = max_depth;
            } else if (s - min_depth < 0) {
                s = min_depth;
            // convert feature into cartesian space if depth is available
            //// 关键点乘以距离,其中x坐标应当就等于深度值
            features_3d_sphere->points[i].x *= s;
            features_3d_sphere->points[i].y *= s;
            features_3d_sphere->points[i].z *= s;
            // the obtained depth here is for unit sphere, VINS estimator needs depth for normalized feature (by value z), (lidar x = camera z)
            features_3d_sphere->points[i].intensity = features_3d_sphere->points[i].x;
    // visualize features in cartesian 3d space (including the feature without depth (default 1))
    //// 发布包含深度的关键点点云
    publishCloud(&pub_depth_feature, features_3d_sphere, stamp_cur, "vins_body_ros");
    // update depth value for return
    //// 保存大于3的深度值,返回到通道
    for (int i = 0; i < (int)features_3d_sphere->size(); ++i)
        if (features_3d_sphere->points[i].intensity > 3.0)
            depth_of_point.values[i] = features_3d_sphere->points[i].intensity;
    // visualization project points on image for visualization (it's slow!)
    //// 可视化
    if (pub_depth_image.getNumSubscribers() != 0)
        vector<cv::Point2f> points_2d;
        vector<float> points_distance;
        for (int i = 0; i < (int)depth_cloud_local->size(); ++i)
            // convert points from 3D to 2D
            Eigen::Vector3d p_3d(-depth_cloud_local->points[i].y,
                                 -depth_cloud_local->points[i].z,
                                  depth_cloud_local->points[i].x);
            Eigen::Vector2d p_2d;
            camera_model->spaceToPlane(p_3d, p_2d);
            points_2d.push_back(cv::Point2f(p_2d(0), p_2d(1)));
            points_distance.push_back(pointDistance(depth_cloud_local->points[i]));
        cv::Mat showImage, circleImage;
        cv::cvtColor(imageCur, showImage, cv::COLOR_GRAY2RGB);
        circleImage = showImage.clone();
        for (int i = 0; i < (int)points_2d.size(); ++i)
            float r, g, b;
            getColor(points_distance[i], 50.0, r, g, b);
            cv::circle(circleImage, points_2d[i], 0, cv::Scalar(r, g, b), 5);
        cv::addWeighted(showImage, 1.0, circleImage, 0.7, 0, showImage); // blend camera image and circle image
        cv_bridge::CvImage bridge;
        bridge.image = showImage;
        bridge.encoding = "rgb8";
        sensor_msgs::Image::Ptr imageShowPointer = bridge.toImageMsg();
        imageShowPointer->header.stamp = stamp_cur;